From 033de60cad123bfceacb261cd55802cba40d657f Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Fri, 7 Nov 2025 15:33:51 +0200 Subject: [PATCH 01/42] 390 convert StepDecay to Numpower --- docs/neural-network/optimizers/step-decay.md | 4 +- .../Optimizers/StepDecay/StepDecay.php | 115 ++++++++++++++++++ .../Optimizers/StepDecay/StepDecayTest.php | 100 +++++++++++++++ 3 files changed, 217 insertions(+), 2 deletions(-) create mode 100644 src/NeuralNet/Optimizers/StepDecay/StepDecay.php create mode 100644 tests/NeuralNet/Optimizers/StepDecay/StepDecayTest.php diff --git a/docs/neural-network/optimizers/step-decay.md b/docs/neural-network/optimizers/step-decay.md index 1a21f0804..0ec9395cc 100644 --- a/docs/neural-network/optimizers/step-decay.md +++ b/docs/neural-network/optimizers/step-decay.md @@ -12,7 +12,7 @@ A learning rate decay optimizer that reduces the global learning rate by a facto ## Example ```php -use Rubix\ML\NeuralNet\Optimizers\StepDecay; +use Rubix\ML\NeuralNet\Optimizers\StepDecay\StepDecay; $optimizer = new StepDecay(0.1, 50, 1e-3); -``` \ No newline at end of file +``` diff --git a/src/NeuralNet/Optimizers/StepDecay/StepDecay.php b/src/NeuralNet/Optimizers/StepDecay/StepDecay.php new file mode 100644 index 000000000..246adc6c7 --- /dev/null +++ b/src/NeuralNet/Optimizers/StepDecay/StepDecay.php @@ -0,0 +1,115 @@ + + */ +class StepDecay implements Optimizer +{ + /** + * The learning rate that controls the global step size. + * + * @var float + */ + protected float $rate; + + /** + * The size of every floor in steps. i.e. the number of steps to take before applying another factor of decay. + * + * @var int + */ + protected int $losses; + + /** + * The factor to decrease the learning rate by over a period of k steps. + * + * @var float + */ + protected float $decay; + + /** + * The number of steps taken so far. + * + * @var int + */ + protected int $steps = 0; + + /** + * @param float $rate + * @param int $losses + * @param float $decay + * @throws InvalidArgumentException + */ + public function __construct(float $rate = 0.01, int $losses = 100, float $decay = 1e-3) + { + if ($rate <= 0.0) { + throw new InvalidArgumentException( + "Learning rate must be greater than 0, $rate given." + ); + } + + if ($losses < 1) { + throw new InvalidArgumentException( + "The number of steps per floor must be greater than 0, $losses given." + ); + } + + if ($decay < 0.0) { + throw new InvalidArgumentException( + "Decay rate must be positive, $decay given." + ); + } + + $this->rate = $rate; + $this->losses = $losses; + $this->decay = $decay; + } + + /** + * Take a step of gradient descent for a given parameter. + * + * @internal + * + * @param Parameter $param + * @param NDArray $gradient + * @return NDArray + */ + public function step(Parameter $param, NDArray $gradient) : NDArray + { + $floor = floor($this->steps / $this->losses); + + $rate = $this->rate * (1.0 / (1.0 + $floor * $this->decay)); + + ++$this->steps; + + return NumPower::multiply($gradient, $rate); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Step Decay (rate: {$this->rate}, steps: {$this->losses}, decay: {$this->decay})"; + } +} diff --git a/tests/NeuralNet/Optimizers/StepDecay/StepDecayTest.php b/tests/NeuralNet/Optimizers/StepDecay/StepDecayTest.php new file mode 100644 index 000000000..f3535552b --- /dev/null +++ b/tests/NeuralNet/Optimizers/StepDecay/StepDecayTest.php @@ -0,0 +1,100 @@ +optimizer = new StepDecay(rate: 0.001); + } + + #[Test] + #[TestDox('Throws exception when constructed with invalid learning rate')] + public function testConstructorWithInvalidRate() : void + { + $this->expectException(InvalidArgumentException::class); + + new StepDecay(rate: 0.0); + } + + #[Test] + #[TestDox('Throws exception when constructed with invalid losses')] + public function testConstructorWithInvalidLosses() : void + { + $this->expectException(InvalidArgumentException::class); + + new StepDecay(rate: 0.01, losses: 0); + } + + #[Test] + #[TestDox('Throws exception when constructed with invalid decay')] + public function testConstructorWithInvalidDecay() : void + { + $this->expectException(InvalidArgumentException::class); + + new StepDecay(rate: 0.01, losses: 100, decay: -0.1); + } + + #[Test] + #[TestDox('Can be cast to a string')] + public function testToString() : void + { + self::assertEquals('Step Decay (rate: 0.001, steps: 100, decay: 0.001)', (string) $this->optimizer); + } + + /** + * @param Parameter $param + * @param NDArray $gradient + * @param list> $expected + */ + #[DataProvider('stepProvider')] + public function testStep(Parameter $param, NDArray $gradient, array $expected) : void + { + $step = $this->optimizer->step(param: $param, gradient: $gradient); + + self::assertEqualsWithDelta($expected, $step->toArray(), 1e-7); + } +} + From a02c4a08dc490ff158174a4890e8facb91140d23 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Fri, 7 Nov 2025 17:36:38 +0200 Subject: [PATCH 02/42] 390 convert RMSProp to Numpower --- docs/neural-network/optimizers/rms-prop.md | 4 +- src/NeuralNet/Optimizers/Base/Adaptive.php | 25 +++ src/NeuralNet/Optimizers/RMSProp/RMSProp.php | 158 ++++++++++++++++++ .../Optimizers/RMSProp/RMSPropTest.php | 127 ++++++++++++++ 4 files changed, 312 insertions(+), 2 deletions(-) create mode 100644 src/NeuralNet/Optimizers/Base/Adaptive.php create mode 100644 src/NeuralNet/Optimizers/RMSProp/RMSProp.php create mode 100644 tests/NeuralNet/Optimizers/RMSProp/RMSPropTest.php diff --git a/docs/neural-network/optimizers/rms-prop.md b/docs/neural-network/optimizers/rms-prop.md index fdca6fd05..ae6b847bc 100644 --- a/docs/neural-network/optimizers/rms-prop.md +++ b/docs/neural-network/optimizers/rms-prop.md @@ -11,10 +11,10 @@ An adaptive gradient technique that divides the current gradient over a rolling ## Example ```php -use Rubix\ML\NeuralNet\Optimizers\RMSProp; +use Rubix\ML\NeuralNet\Optimizers\RMSProp\RMSProp; $optimizer = new RMSProp(0.01, 0.1); ``` ## References -[^1]: T. Tieleman et al. (2012). Lecture 6e rmsprop: Divide the gradient by a running average of its recent magnitude. \ No newline at end of file +[^1]: T. Tieleman et al. (2012). Lecture 6e rmsprop: Divide the gradient by a running average of its recent magnitude. diff --git a/src/NeuralNet/Optimizers/Base/Adaptive.php b/src/NeuralNet/Optimizers/Base/Adaptive.php new file mode 100644 index 000000000..35ee5323b --- /dev/null +++ b/src/NeuralNet/Optimizers/Base/Adaptive.php @@ -0,0 +1,25 @@ + + */ +interface Adaptive extends Optimizer +{ + /** + * Warm the parameter cache. + * + * @param Parameter $param + */ + public function warm(Parameter $param) : void; +} diff --git a/src/NeuralNet/Optimizers/RMSProp/RMSProp.php b/src/NeuralNet/Optimizers/RMSProp/RMSProp.php new file mode 100644 index 000000000..531f3ad80 --- /dev/null +++ b/src/NeuralNet/Optimizers/RMSProp/RMSProp.php @@ -0,0 +1,158 @@ + + */ +class RMSProp implements Optimizer, Adaptive +{ + /** + * The learning rate that controls the global step size. + * + * @var float + */ + protected float $rate; + + /** + * The rms decay rate. + * + * @var float + */ + protected float $decay; + + /** + * The opposite of the rms decay rate. + * + * @var float + */ + protected float $rho; + + /** + * The cache of running squared gradients. + * + * @var array + */ + protected array $cache = [ + // + ]; + + /** + * @param float $rate + * @param float $decay + * @throws InvalidArgumentException + */ + public function __construct(float $rate = 0.001, float $decay = 0.1) + { + if ($rate <= 0.0) { + throw new InvalidArgumentException( + "Learning rate must be greater than 0, $rate given." + ); + } + + if ($decay <= 0.0 or $decay >= 1.0) { + throw new InvalidArgumentException( + "Decay must be between 0 and 1, $decay given." + ); + } + + $this->rate = $rate; + $this->decay = $decay; + $this->rho = 1.0 - $decay; + } + + /** + * Warm the parameter cache. + * + * @internal + * + * @param Parameter $param + * @throws RuntimeException + */ + public function warm(Parameter $param) : void + { + $class = get_class($param->param()); + + if (!$class) { + throw new RuntimeException('Could not locate parameter class.'); + } + + $this->cache[$param->id()] = NumPower::zeros($param->param()->shape()); + } + + /** + * Take a step of gradient descent for a given parameter. + * + * RMSProp update (element-wise): + * v_t = ρ · v_{t-1} + (1 − ρ) · g_t^2 + * Δθ_t = η · g_t / max(sqrt(v_t), ε) + * + * where: + * - g_t is the current gradient, + * - v_t is the running average of squared gradients, + * - ρ = 1 − decay, η is the learning rate, + * - ε is a small constant to avoid division by zero (implemented by clipping √v_t to [ε, +∞)). + * + * @internal + * + * @param Parameter $param + * @param NDArray $gradient + * @return NDArray + */ + public function step(Parameter $param, NDArray $gradient) : NDArray + { + $norm = $this->cache[$param->id()]; + + $norm = NumPower::add( + NumPower::multiply($norm, $this->rho), + NumPower::multiply(NumPower::square($gradient), $this->decay) + ); + + $this->cache[$param->id()] = $norm; + + $denominator = NumPower::sqrt($norm); + $denominator = NumPower::clip($denominator, EPSILON, PHP_FLOAT_MAX); + + return NumPower::divide( + NumPower::multiply($gradient, $this->rate), + $denominator + ); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "RMS Prop (rate: {$this->rate}, decay: {$this->decay})"; + } +} diff --git a/tests/NeuralNet/Optimizers/RMSProp/RMSPropTest.php b/tests/NeuralNet/Optimizers/RMSProp/RMSPropTest.php new file mode 100644 index 000000000..456bd54c0 --- /dev/null +++ b/tests/NeuralNet/Optimizers/RMSProp/RMSPropTest.php @@ -0,0 +1,127 @@ +optimizer = new RMSProp(rate: 0.001, decay: 0.1); + } + + #[Test] + #[TestDox('Throws exception when constructed with zero rate')] + public function testConstructorWithZeroRate() : void + { + $this->expectException(InvalidArgumentException::class); + + new RMSProp(rate: 0.0); + } + + #[Test] + #[TestDox('Throws exception when constructed with negative rate')] + public function testConstructorWithNegativeRate() : void + { + $this->expectException(InvalidArgumentException::class); + + new RMSProp(rate: -0.001); + } + + #[Test] + #[TestDox('Throws exception when constructed with zero decay')] + public function testConstructorWithZeroDecay() : void + { + $this->expectException(InvalidArgumentException::class); + + new RMSProp(rate: 0.001, decay: 0.0); + } + + #[Test] + #[TestDox('Throws exception when constructed with decay equal to 1')] + public function testConstructorWithDecayEqualToOne() : void + { + $this->expectException(InvalidArgumentException::class); + + new RMSProp(rate: 0.001, decay: 1.0); + } + + #[Test] + #[TestDox('Throws exception when constructed with decay greater than 1')] + public function testConstructorWithDecayGreaterThanOne() : void + { + $this->expectException(InvalidArgumentException::class); + + new RMSProp(rate: 0.001, decay: 1.5); + } + + #[Test] + #[TestDox('Throws exception when constructed with negative decay')] + public function testConstructorWithNegativeDecay() : void + { + $this->expectException(InvalidArgumentException::class); + + new RMSProp(rate: 0.001, decay: -0.1); + } + + #[Test] + #[TestDox('Can be cast to a string')] + public function testToString() : void + { + self::assertEquals('RMS Prop (rate: 0.001, decay: 0.1)', (string) $this->optimizer); + } + + /** + * @param Parameter $param + * @param NDArray $gradient + * @param list> $expected + */ + #[DataProvider('stepProvider')] + public function testStep(Parameter $param, NDArray $gradient, array $expected) : void + { + $this->optimizer->warm($param); + + $step = $this->optimizer->step(param: $param, gradient: $gradient); + + self::assertEqualsWithDelta($expected, $step->toArray(), 1e-7); + } +} From cccfa79140a0769604b5d92e36dd300a89405dff Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Fri, 7 Nov 2025 17:45:33 +0200 Subject: [PATCH 03/42] 390 added math explanation for step() methods --- src/NeuralNet/Optimizers/StepDecay/StepDecay.php | 12 ++++++++++++ src/NeuralNet/Optimizers/Stochastic/Stochastic.php | 7 +++++++ 2 files changed, 19 insertions(+) diff --git a/src/NeuralNet/Optimizers/StepDecay/StepDecay.php b/src/NeuralNet/Optimizers/StepDecay/StepDecay.php index 246adc6c7..abfeb6f7e 100644 --- a/src/NeuralNet/Optimizers/StepDecay/StepDecay.php +++ b/src/NeuralNet/Optimizers/StepDecay/StepDecay.php @@ -84,6 +84,18 @@ public function __construct(float $rate = 0.01, int $losses = 100, float $decay /** * Take a step of gradient descent for a given parameter. * + * Step Decay update (element-wise): + * floor = ⌊t / k⌋ + * η_t = η₀ / (1 + floor · λ) + * Δθ_t = η_t · g_t + * + * where: + * - t is the current step number, + * - k is the number of steps per floor, + * - η₀ is the initial learning rate, + * - λ is the decay factor, + * - g_t is the current gradient. + * * @internal * * @param Parameter $param diff --git a/src/NeuralNet/Optimizers/Stochastic/Stochastic.php b/src/NeuralNet/Optimizers/Stochastic/Stochastic.php index ffd9daf30..004489a78 100644 --- a/src/NeuralNet/Optimizers/Stochastic/Stochastic.php +++ b/src/NeuralNet/Optimizers/Stochastic/Stochastic.php @@ -44,6 +44,13 @@ public function __construct(float $rate = 0.01) /** * Take a step of gradient descent for a given parameter. * + * SGD update (element-wise): + * Δθ_t = η · g_t + * + * where: + * - g_t is the current gradient, + * - η is the learning rate. + * * @internal * * @param Parameter $param From f1c55e67537832e5e49a299a639fd3cfe163d5d2 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Fri, 7 Nov 2025 23:02:43 +0200 Subject: [PATCH 04/42] 390 convert Momentum to Numpower --- docs/neural-network/optimizers/momentum.md | 2 +- .../Optimizers/Momentum/Momentum.php | 164 ++++++++++++++++++ src/NeuralNet/Optimizers/RMSProp/RMSProp.php | 2 +- .../Optimizers/Momentum/MomentumTest.php | 154 ++++++++++++++++ .../Optimizers/RMSProp/RMSPropTest.php | 27 +++ 5 files changed, 347 insertions(+), 2 deletions(-) create mode 100644 src/NeuralNet/Optimizers/Momentum/Momentum.php create mode 100644 tests/NeuralNet/Optimizers/Momentum/MomentumTest.php diff --git a/docs/neural-network/optimizers/momentum.md b/docs/neural-network/optimizers/momentum.md index 7556ca008..017cf0efa 100644 --- a/docs/neural-network/optimizers/momentum.md +++ b/docs/neural-network/optimizers/momentum.md @@ -12,7 +12,7 @@ Momentum accelerates each update step by accumulating velocity from past updates ## Example ```php -use Rubix\ML\NeuralNet\Optimizers\Momentum; +use Rubix\ML\NeuralNet\Optimizers\Momentum\Momentum; $optimizer = new Momentum(0.01, 0.1, true); ``` diff --git a/src/NeuralNet/Optimizers/Momentum/Momentum.php b/src/NeuralNet/Optimizers/Momentum/Momentum.php new file mode 100644 index 000000000..05e62fa0b --- /dev/null +++ b/src/NeuralNet/Optimizers/Momentum/Momentum.php @@ -0,0 +1,164 @@ + + */ +class Momentum implements Optimizer, Adaptive +{ + /** + * The learning rate that controls the global step size. + * + * @var float + */ + protected float $rate; + + /** + * The rate at which the momentum force decays. + * + * @var float + */ + protected float $decay; + + /** + * Should we employ Nesterov's lookahead (NAG) when updating the parameters? + * + * @var bool + */ + protected bool $lookahead; + + /** + * The parameter cache of velocity NDArrays. + * + * @var NDArray[] + */ + protected array $cache = [ + // + ]; + + /** + * @param float $rate + * @param float $decay + * @param bool $lookahead + * @throws InvalidArgumentException + */ + public function __construct(float $rate = 0.001, float $decay = 0.1, bool $lookahead = false) + { + if ($rate <= 0.0) { + throw new InvalidArgumentException( + "Learning rate must be greater than 0, $rate given." + ); + } + + if ($decay <= 0.0 or $decay >= 1.0) { + throw new InvalidArgumentException( + "Decay must be between 0 and 1, $decay given." + ); + } + + $this->rate = $rate; + $this->decay = $decay; + $this->lookahead = $lookahead; + } + + /** + * Warm the cache. + * + * @internal + * + * @param Parameter $param + * @throws RuntimeException + */ + public function warm(Parameter $param) : void + { + $class = get_class($param->param()); + + if (!$class) { + throw new RuntimeException('Could not locate parameter class.'); + } + + $this->cache[$param->id()] = NumPower::zeros($param->param()->shape()); + } + + /** + * Take a step of gradient descent for a given parameter. + * + * Mathematical formulation (per-parameter element): + * - Velocity update: v_t = β · v_{t-1} + η · g_t + * where β = 1 − decay and η = rate, and g_t is the current gradient. + * - Returned step (the amount added to the parameter by the trainer): Δθ_t = v_t + * + * Nesterov lookahead (when lookahead = true): + * - We apply the same velocity update a second time to approximate NAG: + * v_t ← β · v_t + η · g_t + * + * Notes: + * - This method updates and caches the velocity tensor per Parameter id. + * - The actual parameter update is performed by the training loop using the returned velocity. + * + * @internal + * + * @param Parameter $param + * @param NDArray $gradient + * @return NDArray + */ + public function step(Parameter $param, NDArray $gradient) : NDArray + { + $velocity = $this->cache[$param->id()]; + + // velocity = gradient * rate + velocity * (1 - decay) + $velocity = NumPower::add( + NumPower::multiply($gradient, $this->rate), + NumPower::multiply($velocity, 1.0 - $this->decay) + ); + + $this->cache[$param->id()] = $velocity; + + if ($this->lookahead) { + // Apply lookahead: velocity = gradient * rate + velocity * (1 - decay) + $velocity = NumPower::add( + NumPower::multiply($gradient, $this->rate), + NumPower::multiply($velocity, 1.0 - $this->decay) + ); + } + + return $velocity; + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Momentum (rate: {$this->rate}, decay: {$this->decay}," + . ' lookahead: ' . Params::toString($this->lookahead) . ')'; + } +} diff --git a/src/NeuralNet/Optimizers/RMSProp/RMSProp.php b/src/NeuralNet/Optimizers/RMSProp/RMSProp.php index 531f3ad80..7c08aebb2 100644 --- a/src/NeuralNet/Optimizers/RMSProp/RMSProp.php +++ b/src/NeuralNet/Optimizers/RMSProp/RMSProp.php @@ -56,7 +56,7 @@ class RMSProp implements Optimizer, Adaptive /** * The cache of running squared gradients. * - * @var array + * @var NDArray[] */ protected array $cache = [ // diff --git a/tests/NeuralNet/Optimizers/Momentum/MomentumTest.php b/tests/NeuralNet/Optimizers/Momentum/MomentumTest.php new file mode 100644 index 000000000..1b2a90378 --- /dev/null +++ b/tests/NeuralNet/Optimizers/Momentum/MomentumTest.php @@ -0,0 +1,154 @@ +optimizer = new Momentum(rate: 0.001, decay: 0.1, lookahead: false); + } + + #[Test] + #[TestDox('Throws exception when constructed with zero rate')] + public function testConstructorWithZeroRate() : void + { + $this->expectException(InvalidArgumentException::class); + + new Momentum(rate: 0.0); + } + + #[Test] + #[TestDox('Throws exception when constructed with negative rate')] + public function testConstructorWithNegativeRate() : void + { + $this->expectException(InvalidArgumentException::class); + + new Momentum(rate: -0.001); + } + + #[Test] + #[TestDox('Throws exception when constructed with zero decay')] + public function testConstructorWithZeroDecay() : void + { + $this->expectException(InvalidArgumentException::class); + + new Momentum(rate: 0.001, decay: 0.0); + } + + #[Test] + #[TestDox('Throws exception when constructed with decay equal to 1')] + public function testConstructorWithDecayEqualToOne() : void + { + $this->expectException(InvalidArgumentException::class); + + new Momentum(rate: 0.001, decay: 1.0); + } + + #[Test] + #[TestDox('Throws exception when constructed with decay greater than 1')] + public function testConstructorWithDecayGreaterThanOne() : void + { + $this->expectException(InvalidArgumentException::class); + + new Momentum(rate: 0.001, decay: 1.5); + } + + #[Test] + #[TestDox('Throws exception when constructed with negative decay')] + public function testConstructorWithNegativeDecay() : void + { + $this->expectException(InvalidArgumentException::class); + + new Momentum(rate: 0.001, decay: -0.1); + } + + #[Test] + #[TestDox('Can be cast to a string')] + public function testToString() : void + { + self::assertEquals('Momentum (rate: 0.001, decay: 0.1, lookahead: false)', (string) $this->optimizer); + } + + #[Test] + #[TestDox('Warm initializes a zeroed velocity cache with the parameter\'s shape')] + public function testWarmInitializesZeroedCache() : void + { + $param = new Parameter(NumPower::array([ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + ])); + + // Warm the optimizer for this parameter + $this->optimizer->warm($param); + + // Use reflection to read the protected cache + $ref = new \ReflectionClass($this->optimizer); + $prop = $ref->getProperty('cache'); + $prop->setAccessible(true); + $cache = $prop->getValue($this->optimizer); + + self::assertArrayHasKey($param->id(), $cache); + + $velocity = $cache[$param->id()]; + + // Verify the velocity is an all-zeros tensor of the correct shape + $zeros = NumPower::zeros($param->param()->shape()); + self::assertEqualsWithDelta($zeros->toArray(), $velocity->toArray(), 0.0); + } + + /** + * @param Parameter $param + * @param NDArray $gradient + * @param list> $expected + */ + #[DataProvider('stepProvider')] + public function testStep(Parameter $param, NDArray $gradient, array $expected) : void + { + $this->optimizer->warm($param); + + $step = $this->optimizer->step(param: $param, gradient: $gradient); + + self::assertEqualsWithDelta($expected, $step->toArray(), 1e-7); + } +} diff --git a/tests/NeuralNet/Optimizers/RMSProp/RMSPropTest.php b/tests/NeuralNet/Optimizers/RMSProp/RMSPropTest.php index 456bd54c0..09d43ac24 100644 --- a/tests/NeuralNet/Optimizers/RMSProp/RMSPropTest.php +++ b/tests/NeuralNet/Optimizers/RMSProp/RMSPropTest.php @@ -110,6 +110,33 @@ public function testToString() : void self::assertEquals('RMS Prop (rate: 0.001, decay: 0.1)', (string) $this->optimizer); } + #[Test] + #[TestDox('Warm initializes a zeroed velocity cache with the parameter\'s shape')] + public function testWarmInitializesZeroedCache() : void + { + $param = new Parameter(NumPower::array([ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + ])); + + // Warm the optimizer for this parameter + $this->optimizer->warm($param); + + // Use reflection to read the protected cache + $ref = new \ReflectionClass($this->optimizer); + $prop = $ref->getProperty('cache'); + $prop->setAccessible(true); + $cache = $prop->getValue($this->optimizer); + + self::assertArrayHasKey($param->id(), $cache); + + $velocity = $cache[$param->id()]; + + // Verify the velocity is an all-zeros tensor of the correct shape + $zeros = NumPower::zeros($param->param()->shape()); + self::assertEqualsWithDelta($zeros->toArray(), $velocity->toArray(), 0.0); + } + /** * @param Parameter $param * @param NDArray $gradient From 919ce3629cf005ec20fb9f8a4070e2dc2d1835be Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sat, 8 Nov 2025 18:54:42 +0200 Subject: [PATCH 05/42] 390 convert Cyclical to NumPower --- docs/neural-network/optimizers/cyclical.md | 26 ++- .../Optimizers/Cyclical/Cyclical.php | 166 ++++++++++++++++++ .../Optimizers/Cyclical/CyclicalTest.php | 143 +++++++++++++++ 3 files changed, 332 insertions(+), 3 deletions(-) create mode 100644 src/NeuralNet/Optimizers/Cyclical/Cyclical.php create mode 100644 tests/NeuralNet/Optimizers/Cyclical/CyclicalTest.php diff --git a/docs/neural-network/optimizers/cyclical.md b/docs/neural-network/optimizers/cyclical.md index 9773004da..f6d3940db 100644 --- a/docs/neural-network/optimizers/cyclical.md +++ b/docs/neural-network/optimizers/cyclical.md @@ -1,8 +1,28 @@ -[source] +[source] # Cyclical The Cyclical optimizer uses a global learning rate that cycles between the lower and upper bound over a designated period while also decaying the upper bound by a factor at each step. Cyclical learning rates have been shown to help escape bad local minima and saddle points of the gradient. +## Mathematical formulation +Per step (element-wise), the cyclical learning rate and update are computed as: + +$$ +\begin{aligned} +\text{cycle} &= \left\lfloor 1 + \frac{t}{2\,\text{steps}} \right\rfloor \\ +x &= \left| \frac{t}{\text{steps}} - 2\,\text{cycle} + 1 \right| \\ +\text{scale} &= \text{decay}^{\,t} \\ +\eta_t &= \text{lower} + (\text{upper} - \text{lower})\,\max\bigl(0\,1 - x\bigr)\,\text{scale} \\ +\Delta\theta_t &= \eta_t\,g_t +\end{aligned} +$$ + +where: +- `t` is the current step counter, +- `steps` is the number of steps in every half cycle, +- `lower` and `upper` are the learning rate bounds, +- `decay` is the multiplicative decay applied each step, +- `g_t` is the current gradient. + ## Parameters | # | Name | Default | Type | Description | |---|---|---|---|---| @@ -13,10 +33,10 @@ The Cyclical optimizer uses a global learning rate that cycles between the lower ## Example ```php -use Rubix\ML\NeuralNet\Optimizers\Cyclical; +use Rubix\ML\NeuralNet\Optimizers\Cyclical\Cyclical; $optimizer = new Cyclical(0.001, 0.005, 1000); ``` ## References -[^1]: L. N. Smith. (2017). Cyclical Learning Rates for Training Neural Networks. \ No newline at end of file +[^1]: L. N. Smith. (2017). Cyclical Learning Rates for Training Neural Networks. diff --git a/src/NeuralNet/Optimizers/Cyclical/Cyclical.php b/src/NeuralNet/Optimizers/Cyclical/Cyclical.php new file mode 100644 index 000000000..ac22d9d52 --- /dev/null +++ b/src/NeuralNet/Optimizers/Cyclical/Cyclical.php @@ -0,0 +1,166 @@ + + */ +class Cyclical implements Optimizer +{ + /** + * The lower bound on the learning rate. + * + * @var float + */ + protected float $lower; + + /** + * The upper bound on the learning rate. + * + * @var float + */ + protected float $upper; + + /** + * The range of the learning rate. + * + * @var float + */ + protected float $range; + + /** + * The number of steps in every cycle. + * + * @var int + */ + protected int $losses; + + /** + * The exponential scaling factor applied to each step as decay. + * + * @var float + */ + protected float $decay; + + /** + * The number of steps taken so far. + * + * @var int + */ + protected int $t = 0; + + /** + * @param float $lower + * @param float $upper + * @param int $losses + * @param float $decay + * @throws InvalidArgumentException + */ + public function __construct( + float $lower = 0.001, + float $upper = 0.006, + int $losses = 2000, + float $decay = 0.99994 + ) { + if ($lower <= 0.0) { + throw new InvalidArgumentException( + "Lower bound must be greater than 0, $lower given." + ); + } + + if ($lower > $upper) { + throw new InvalidArgumentException( + 'Lower bound cannot be reater than the upper bound.' + ); + } + + if ($losses < 1) { + throw new InvalidArgumentException( + "The number of steps per cycle must be greater than 0, $losses given." + ); + } + + if ($decay <= 0.0 or $decay >= 1.0) { + throw new InvalidArgumentException( + "Decay must be between 0 and 1, $decay given." + ); + } + + $this->lower = $lower; + $this->upper = $upper; + $this->range = $upper - $lower; + $this->losses = $losses; + $this->decay = $decay; + } + + /** + * Take a step of gradient descent for a given parameter. + * + * Cyclical learning rate schedule (per-step, element-wise update): + * - Cycle index: cycle = floor(1 + t / (2 · losses)) + * - Triangular position: x = | t / losses − 2 · cycle + 1 | + * - Exponential decay: scale = decay^t + * - Learning rate at t: η_t = lower + (upper − lower) · max(0, 1 − x) · scale + * - Returned step: Δθ_t = η_t · g_t + * + * where: + * - t is the current step counter (incremented after computing η_t), + * - losses is the number of steps per cycle, + * - lower and upper are the learning rate bounds, + * - decay is the multiplicative decay applied each step, + * - g_t is the current gradient. + * + * @internal + * + * @param Parameter $param + * @param NDArray $gradient + * @return NDArray + */ + public function step(Parameter $param, NDArray $gradient) : NDArray + { + $cycle = floor(1 + $this->t / (2 * $this->losses)); + + $x = abs($this->t / $this->losses - 2 * $cycle + 1); + + $scale = $this->decay ** $this->t; + + $rate = $this->lower + $this->range * max(0, 1 - $x) * $scale; + + ++$this->t; + + return NumPower::multiply($gradient, $rate); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Cyclical (lower: {$this->lower}, upper: {$this->upper}," + . " steps: {$this->losses}, decay: {$this->decay})"; + } +} diff --git a/tests/NeuralNet/Optimizers/Cyclical/CyclicalTest.php b/tests/NeuralNet/Optimizers/Cyclical/CyclicalTest.php new file mode 100644 index 000000000..7d1691fe8 --- /dev/null +++ b/tests/NeuralNet/Optimizers/Cyclical/CyclicalTest.php @@ -0,0 +1,143 @@ +optimizer = new Cyclical(lower: 0.001, upper: 0.006, losses: 2000); + } + + #[Test] + #[TestDox('Throws exception when constructed with zero lower bound')] + public function testConstructorWithZeroLower() : void + { + $this->expectException(InvalidArgumentException::class); + new Cyclical(lower: 0.0, upper: 0.006, losses: 2000); + } + + #[Test] + #[TestDox('Throws exception when constructed with negative lower bound')] + public function testConstructorWithNegativeLower() : void + { + $this->expectException(InvalidArgumentException::class); + new Cyclical(lower: -0.001, upper: 0.006, losses: 2000); + } + + #[Test] + #[TestDox('Throws exception when lower bound is greater than upper bound')] + public function testConstructorWithLowerGreaterThanUpper() : void + { + $this->expectException(InvalidArgumentException::class); + new Cyclical(lower: 0.01, upper: 0.006, losses: 2000); + } + + #[Test] + #[TestDox('Throws exception when constructed with zero steps per cycle')] + public function testConstructorWithZeroSteps() : void + { + $this->expectException(InvalidArgumentException::class); + new Cyclical(lower: 0.001, upper: 0.006, losses: 0); + } + + #[Test] + #[TestDox('Throws exception when constructed with negative steps per cycle')] + public function testConstructorWithNegativeSteps() : void + { + $this->expectException(InvalidArgumentException::class); + new Cyclical(lower: 0.001, upper: 0.006, losses: -5); + } + + #[Test] + #[TestDox('Throws exception when constructed with zero decay')] + public function testConstructorWithZeroDecay() : void + { + $this->expectException(InvalidArgumentException::class); + new Cyclical(lower: 0.001, upper: 0.006, losses: 2000, decay: 0.0); + } + + #[Test] + #[TestDox('Throws exception when constructed with decay equal to 1')] + public function testConstructorWithDecayEqualToOne() : void + { + $this->expectException(InvalidArgumentException::class); + new Cyclical(lower: 0.001, upper: 0.006, losses: 2000, decay: 1.0); + } + + #[Test] + #[TestDox('Throws exception when constructed with decay greater than 1')] + public function testConstructorWithDecayGreaterThanOne() : void + { + $this->expectException(InvalidArgumentException::class); + new Cyclical(lower: 0.001, upper: 0.006, losses: 2000, decay: 1.5); + } + + #[Test] + #[TestDox('Throws exception when constructed with negative decay')] + public function testConstructorWithNegativeDecay() : void + { + $this->expectException(InvalidArgumentException::class); + new Cyclical(lower: 0.001, upper: 0.006, losses: 2000, decay: -0.1); + } + + #[Test] + #[TestDox('Can be cast to a string')] + public function testToString() : void + { + self::assertEquals('Cyclical (lower: 0.001, upper: 0.006, steps: 2000, decay: 0.99994)', (string) $this->optimizer); + } + + /** + * @param Parameter $param + * @param NDArray $gradient + * @param list> $expected + */ + #[DataProvider('stepProvider')] + public function testStep(Parameter $param, NDArray $gradient, array $expected) : void + { + $step = $this->optimizer->step(param: $param, gradient: $gradient); + + self::assertEqualsWithDelta($expected, $step->toArray(), 1e-7); + } +} From d806494ef7db5ca520d1c121bda49f1d63724fb4 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sat, 8 Nov 2025 18:59:59 +0200 Subject: [PATCH 06/42] 390 added math formulas to momentum.md --- docs/neural-network/optimizers/momentum.md | 27 +++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/docs/neural-network/optimizers/momentum.md b/docs/neural-network/optimizers/momentum.md index 017cf0efa..f949a4115 100644 --- a/docs/neural-network/optimizers/momentum.md +++ b/docs/neural-network/optimizers/momentum.md @@ -1,8 +1,33 @@ -[source] +[source] # Momentum Momentum accelerates each update step by accumulating velocity from past updates and adding a factor of the previous velocity to the current step. Momentum can help speed up training and escape bad local minima when compared with [Stochastic](stochastic.md) Gradient Descent. +## Mathematical formulation +Per step (element-wise), Momentum updates the velocity and applies it as the parameter step: + +$$ +\begin{aligned} +\beta &= 1 - \text{decay}, \quad \eta = \text{rate} \\ +\text{Velocity update:}\quad v_t &= \beta\,v_{t-1} + \eta\,g_t \\ +\text{Returned step:}\quad \Delta\theta_t &= v_t +\end{aligned} +$$ + +Nesterov lookahead (when `lookahead = true`) is approximated by applying the velocity update a second time: + +$$ +\begin{aligned} +v_t &\leftarrow \beta\,v_t + \eta\,g_t +\end{aligned} +$$ + +where: +- `g_t` is the current gradient, +- `v_t` is the velocity (accumulated update), +- `\beta` is the momentum coefficient (`1 − decay`), +- `\eta` is the learning rate (`rate`). + ## Parameters | # | Name | Default | Type | Description | |---|---|---|---|---| From 3fa08ec89e9132a43c5ab4e34319404ce8f140d9 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sat, 8 Nov 2025 19:10:18 +0200 Subject: [PATCH 07/42] 390 added math formulas to rms-prop.md --- docs/neural-network/optimizers/cyclical.md | 10 +++++----- docs/neural-network/optimizers/momentum.md | 8 ++++---- docs/neural-network/optimizers/rms-prop.md | 22 ++++++++++++++++++++-- 3 files changed, 29 insertions(+), 11 deletions(-) diff --git a/docs/neural-network/optimizers/cyclical.md b/docs/neural-network/optimizers/cyclical.md index f6d3940db..eed8b2779 100644 --- a/docs/neural-network/optimizers/cyclical.md +++ b/docs/neural-network/optimizers/cyclical.md @@ -17,11 +17,11 @@ x &= \left| \frac{t}{\text{steps}} - 2\,\text{cycle} + 1 \right| \\ $$ where: -- `t` is the current step counter, -- `steps` is the number of steps in every half cycle, -- `lower` and `upper` are the learning rate bounds, -- `decay` is the multiplicative decay applied each step, -- `g_t` is the current gradient. +- $t$ is the current step counter, +- $steps$ is the number of steps in every half cycle, +- $lower$ and $upper$ are the learning rate bounds, +- $decay$ is the multiplicative decay applied each step, +- $g_t$ is the current gradient. ## Parameters | # | Name | Default | Type | Description | diff --git a/docs/neural-network/optimizers/momentum.md b/docs/neural-network/optimizers/momentum.md index f949a4115..e9c787a2f 100644 --- a/docs/neural-network/optimizers/momentum.md +++ b/docs/neural-network/optimizers/momentum.md @@ -23,10 +23,10 @@ v_t &\leftarrow \beta\,v_t + \eta\,g_t $$ where: -- `g_t` is the current gradient, -- `v_t` is the velocity (accumulated update), -- `\beta` is the momentum coefficient (`1 − decay`), -- `\eta` is the learning rate (`rate`). +- $g_t$ is the current gradient, +- $v_t$ is the velocity (accumulated update), +- $\beta$ is the momentum coefficient ($1 − decay$), +- $\eta$ is the learning rate ($rate$). ## Parameters | # | Name | Default | Type | Description | diff --git a/docs/neural-network/optimizers/rms-prop.md b/docs/neural-network/optimizers/rms-prop.md index ae6b847bc..c531a863e 100644 --- a/docs/neural-network/optimizers/rms-prop.md +++ b/docs/neural-network/optimizers/rms-prop.md @@ -1,7 +1,25 @@ -[source] +[source] # RMS Prop -An adaptive gradient technique that divides the current gradient over a rolling window of the magnitudes of recent gradients. Unlike [AdaGrad](adagrad.md), RMS Prop does not suffer from an infinitely decaying step size. +An adaptive gradient technique that divides the current gradient over a rolling window of magnitudes of recent gradients. Unlike [AdaGrad](adagrad.md), RMS Prop does not suffer from an infinitely decaying step size. + +## Mathematical formulation +Per step (element-wise), RMSProp maintains a running average of squared gradients and scales the step by the root-mean-square: + +$$ +\begin{aligned} +\rho &= 1 - \text{decay}, \quad \eta = \text{rate} \\ +\text{Running average:}\quad v_t &= \rho\,v_{t-1} + (1 - \rho)\,g_t^{\,2} \\ +\text{Returned step:}\quad \Delta\theta_t &= \frac{\eta\,g_t}{\max\bigl(\sqrt{v_t},\,\varepsilon\bigr)} +\end{aligned} +$$ + +where: +- $g_t$ - is the current gradient, +- $v_t$ - is the running average of squared gradients, +- $\rho$ - is the averaging coefficient ($1 − decay$), +- $\eta$ - is the learning rate ($rate$), +- $\varepsilon$ - is a small constant to avoid division by zero (implemented by clipping $\sqrt{v_t}$ to $[ε, +∞)$). ## Parameters | # | Name | Default | Type | Description | From 537b586d807fae161b3c6f7760d3a495e78cce52 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sat, 8 Nov 2025 19:19:08 +0200 Subject: [PATCH 08/42] 390 added math formulas to stochastic.md --- docs/neural-network/optimizers/step-decay.md | 20 ++++++++++++++++++- docs/neural-network/optimizers/stochastic.md | 14 +++++++++++++ .../Optimizers/Stochastic/Stochastic.php | 4 +++- 3 files changed, 36 insertions(+), 2 deletions(-) diff --git a/docs/neural-network/optimizers/step-decay.md b/docs/neural-network/optimizers/step-decay.md index 0ec9395cc..f5da99c8b 100644 --- a/docs/neural-network/optimizers/step-decay.md +++ b/docs/neural-network/optimizers/step-decay.md @@ -1,8 +1,26 @@ -[source] +[source] # Step Decay A learning rate decay optimizer that reduces the global learning rate by a factor whenever it reaches a new *floor*. The number of steps needed to reach a new floor is defined by the *steps* hyper-parameter. +## Mathematical formulation +Per step (element-wise), the Step Decay learning rate and update are: + +$$ +\begin{aligned} +\text{floor} &= \left\lfloor \frac{t}{k} \right\rfloor \\ +\eta_t &= \frac{\eta_0}{1 + \text{floor}\cdot \lambda} \\ +\Delta\theta_t &= \eta_t\,g_t +\end{aligned} +$$ + +where: +- $t$ is the current step number, +- $k$ is the number of steps per floor, +- $\eta_0$ is the initial learning rate ($rate$), +- $\lambda$ is the decay factor ($decay$), +- $g_t$ is the current gradient. + ## Parameters | # | Name | Default | Type | Description | |---|---|---|---|---| diff --git a/docs/neural-network/optimizers/stochastic.md b/docs/neural-network/optimizers/stochastic.md index 4422e0ddc..bb0096b87 100644 --- a/docs/neural-network/optimizers/stochastic.md +++ b/docs/neural-network/optimizers/stochastic.md @@ -3,6 +3,20 @@ # Stochastic A constant learning rate optimizer based on vanilla Stochastic Gradient Descent (SGD). +## Mathematical formulation +Per step (element-wise), the SGD update scales the gradient by a constant learning rate: + +$$ +\begin{aligned} +\eta &= \text{rate} \\ +\Delta\theta_t &= \eta\,g_t +\end{aligned} +$$ + +where: +- $g_t$ is the current gradient, +- $\eta$ is the learning rate ($rate$). + ## Parameters | # | Name | Default | Type | Description | |---|---|---|---|---| diff --git a/src/NeuralNet/Optimizers/Stochastic/Stochastic.php b/src/NeuralNet/Optimizers/Stochastic/Stochastic.php index 004489a78..b2cd6ebac 100644 --- a/src/NeuralNet/Optimizers/Stochastic/Stochastic.php +++ b/src/NeuralNet/Optimizers/Stochastic/Stochastic.php @@ -35,7 +35,9 @@ class Stochastic implements Optimizer public function __construct(float $rate = 0.01) { if ($rate <= 0.0) { - throw new InvalidArgumentException("Learning rate must be greater than 0, $rate given."); + throw new InvalidArgumentException( + "Learning rate must be greater than 0, $rate given." + ); } $this->rate = $rate; From 331fb3639329c9c021e1a783ea20d8df59815f43 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Tue, 11 Nov 2025 16:59:27 +0200 Subject: [PATCH 09/42] 390 convert Adam to NumPower --- docs/neural-network/optimizers/adam.md | 27 ++- src/NeuralNet/Optimizers/Adam/Adam.php | 181 +++++++++++++++++++ tests/NeuralNet/Optimizers/Adam/AdamTest.php | 101 +++++++++++ 3 files changed, 306 insertions(+), 3 deletions(-) create mode 100644 src/NeuralNet/Optimizers/Adam/Adam.php create mode 100644 tests/NeuralNet/Optimizers/Adam/AdamTest.php diff --git a/docs/neural-network/optimizers/adam.md b/docs/neural-network/optimizers/adam.md index 3b9898649..0470a9d4a 100644 --- a/docs/neural-network/optimizers/adam.md +++ b/docs/neural-network/optimizers/adam.md @@ -1,8 +1,29 @@ -[source] +[source] # Adam Short for *Adaptive Moment Estimation*, the Adam Optimizer combines both Momentum and RMS properties. In addition to storing an exponentially decaying average of past squared gradients like [RMSprop](rms-prop.md), Adam also keeps an exponentially decaying average of past gradients, similar to [Momentum](momentum.md). Whereas Momentum can be seen as a ball running down a slope, Adam behaves like a heavy ball with friction. +## Mathematical formulation +Per step (element-wise), Adam maintains exponentially decaying moving averages of the gradient and its element-wise square and uses them to scale the update: + +$$ +\begin{aligned} +\mathbf{v}_t &= (1 - \beta_1)\,\mathbf{v}_{t-1} + \beta_1\,\mathbf{g}_t \\ +\mathbf{n}_t &= (1 - \beta_2)\,\mathbf{n}_{t-1} + \beta_2\,\mathbf{g}_t^{2} \\ +\Delta{\theta}_t &= \alpha\, \frac{\mathbf{v}_t}{\sqrt{\mathbf{n}_t} + \varepsilon} +\end{aligned} +$$ + +where: +- $t$ is the current step, +- $\alpha$ is the learning rate (`rate`), +- $\beta_1$ is the momentum decay (`momentumDecay`), +- $\beta_2$ is the norm decay (`normDecay`), +- $\mathbf{g}_t$ is the current gradient, and $\mathbf{g}_t^{2}$ denotes element-wise square, +- $\varepsilon$ is a small constant for numerical stability (in the implementation, the denominator is clipped from below by `EPSILON`). + +Note: This formulation follows the implementation in Rubix ML and does not include bias-correction terms. + ## Parameters | # | Name | Default | Type | Description | |---|---|---|---|---| @@ -12,10 +33,10 @@ Short for *Adaptive Moment Estimation*, the Adam Optimizer combines both Momentu ## Example ```php -use Rubix\ML\NeuralNet\Optimizers\Adam; +use Rubix\ML\NeuralNet\Optimizers\Adam\Adam; $optimizer = new Adam(0.0001, 0.1, 0.001); ``` ## References -[^1]: D. P. Kingma et al. (2014). Adam: A Method for Stochastic Optimization. \ No newline at end of file +[^1]: D. P. Kingma et al. (2014). Adam: A Method for Stochastic Optimization. diff --git a/src/NeuralNet/Optimizers/Adam/Adam.php b/src/NeuralNet/Optimizers/Adam/Adam.php new file mode 100644 index 000000000..fad8ac1bf --- /dev/null +++ b/src/NeuralNet/Optimizers/Adam/Adam.php @@ -0,0 +1,181 @@ + + */ +class Adam implements Optimizer, Adaptive +{ + /** + * The learning rate that controls the global step size. + * + * @var float + */ + protected float $rate; + + /** + * The momentum decay rate. + * + * @var float + */ + protected float $momentumDecay; + + /** + * The decay rate of the previous norms. + * + * @var float + */ + protected float $normDecay; + + /** + * The parameter cache of running velocity and squared gradients. + * + * @var array{0: NDArray, 1: NDArray}[] + */ + protected array $cache = [ + // id => [velocity, norm] + ]; + + /** + * @param float $rate + * @param float $momentumDecay + * @param float $normDecay + * @throws InvalidArgumentException + */ + public function __construct(float $rate = 0.001, float $momentumDecay = 0.1, float $normDecay = 0.001) + { + if ($rate <= 0.0) { + throw new InvalidArgumentException( + "Learning rate must be greater than 0, $rate given." + ); + } + + if ($momentumDecay <= 0.0 or $momentumDecay >= 1.0) { + throw new InvalidArgumentException( + "Momentum decay must be between 0 and 1, $momentumDecay given." + ); + } + + if ($normDecay <= 0.0 or $normDecay >= 1.0) { + throw new InvalidArgumentException( + "Norm decay must be between 0 and 1, $normDecay given." + ); + } + + $this->rate = $rate; + $this->momentumDecay = $momentumDecay; + $this->normDecay = $normDecay; + } + + /** + * Warm the parameter cache. + * + * @internal + * + * @param Parameter $param + * @throws RuntimeException + */ + public function warm(Parameter $param) : void + { + $class = get_class($param->param()); + + if (!$class) { + throw new RuntimeException('Could not locate parameter class.'); + } + + $zeros = NumPower::zeros($param->param()->shape()); + + $this->cache[$param->id()] = [clone $zeros, $zeros]; + } + + /** + * Take a step of gradient descent for a given parameter. + * + * Adam update (element-wise): + * v_t = v_{t-1} + β1 · (g_t − v_{t-1}) // exponential moving average of gradients + * n_t = n_{t-1} + β2 · (g_t^2 − n_{t-1}) // exponential moving average of squared gradients + * Δθ_t = η · v_t / max(√n_t, ε) + * + * where: + * - g_t is the current gradient, + * - v_t is the running average of gradients ("velocity"), β1 = momentumDecay, + * - n_t is the running average of squared gradients ("norm"), β2 = normDecay, + * - η is the learning rate (rate), ε is a small constant to avoid division by zero (implemented by clipping √n_t to [ε, +∞)). + * + * @internal + * + * @param Parameter $param + * @param NDArray $gradient + * @return NDArray + */ + public function step(Parameter $param, NDArray $gradient) : NDArray + { + [$velocity, $norm] = $this->cache[$param->id()]; + + $vHat = NumPower::multiply( + NumPower::subtract($gradient, $velocity), + $this->momentumDecay + ); + + $velocity = NumPower::add($velocity, $vHat); + + $nHat = NumPower::multiply( + NumPower::subtract(NumPower::square($gradient), $norm), + $this->normDecay + ); + + $norm = NumPower::add($norm, $nHat); + + $this->cache[$param->id()] = [$velocity, $norm]; + + $denominator = NumPower::sqrt($norm); + $denominator = NumPower::clip($denominator, EPSILON, PHP_FLOAT_MAX); + + return NumPower::divide( + NumPower::multiply($velocity, $this->rate), + $denominator + ); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Adam (rate: {$this->rate}, momentum decay: {$this->momentumDecay}," + . " norm decay: {$this->normDecay})"; + } +} diff --git a/tests/NeuralNet/Optimizers/Adam/AdamTest.php b/tests/NeuralNet/Optimizers/Adam/AdamTest.php new file mode 100644 index 000000000..dbd6cedd6 --- /dev/null +++ b/tests/NeuralNet/Optimizers/Adam/AdamTest.php @@ -0,0 +1,101 @@ += 1) + yield [0.001, 0.0, 0.001]; + yield [0.001, -0.1, 0.001]; + yield [0.001, 1.0, 0.001]; + yield [0.001, 1.1, 0.001]; + + // Invalid normDecay (<= 0 or >= 1) + yield [0.001, 0.1, 0.0]; + yield [0.001, 0.1, -0.1]; + yield [0.001, 0.1, 1.0]; + yield [0.001, 0.1, 1.1]; + } + + public static function stepProvider() : Generator + { + yield [ + new Parameter(NumPower::array([ + [0.1, 0.6, -0.4], + [0.5, 0.6, -0.4], + [0.1, 0.1, -0.7], + ])), + NumPower::array([ + [0.01, 0.05, -0.02], + [-0.01, 0.02, 0.03], + [0.04, -0.01, -0.5], + ]), + [ + [0.0031622, 0.0031622, -0.0031622], + [-0.0031622, 0.0031622, 0.0031622], + [0.0031622, -0.0031622, -0.0031622], + ], + ]; + } + + protected function setUp() : void + { + $this->optimizer = new Adam( + rate: 0.001, + momentumDecay: 0.1, + normDecay: 0.001 + ); + } + + public function testToString() : void + { + $expected = 'Adam (rate: 0.001, momentum decay: 0.1, norm decay: 0.001)'; + self::assertSame($expected, (string) $this->optimizer); + } + + #[DataProvider('invalidConstructorProvider')] + public function testInvalidConstructorParams(float $rate, float $momentumDecay, float $normDecay) : void + { + $this->expectException(InvalidArgumentException::class); + new Adam(rate: $rate, momentumDecay: $momentumDecay, normDecay: $normDecay); + } + + /** + * @param Parameter $param + * @param NDArray $gradient + * @param list> $expected + */ + #[DataProvider('stepProvider')] + public function testStep(Parameter $param, NDArray $gradient, array $expected) : void + { + $this->optimizer->warm($param); + + $step = $this->optimizer->step(param: $param, gradient: $gradient); + + self::assertEqualsWithDelta($expected, $step->toArray(), 1e-7); + } +} From 47ad66599d0a102df19a1dd8eda120a80d8e18c4 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Tue, 11 Nov 2025 17:02:17 +0200 Subject: [PATCH 10/42] 390 refactoring CyclicalTest - added dataprovider for constructor tests --- .../Optimizers/Cyclical/CyclicalTest.php | 100 ++++++------------ 1 file changed, 31 insertions(+), 69 deletions(-) diff --git a/tests/NeuralNet/Optimizers/Cyclical/CyclicalTest.php b/tests/NeuralNet/Optimizers/Cyclical/CyclicalTest.php index 7d1691fe8..5bab9a6c1 100644 --- a/tests/NeuralNet/Optimizers/Cyclical/CyclicalTest.php +++ b/tests/NeuralNet/Optimizers/Cyclical/CyclicalTest.php @@ -23,6 +23,19 @@ class CyclicalTest extends TestCase { protected Cyclical $optimizer; + public static function invalidConstructorProvider() : Generator + { + yield 'zero lower' => [0.0, 0.006, 2000, null]; + yield 'negative lower' => [-0.001, 0.006, 2000, null]; + yield 'lower > upper' => [0.01, 0.006, 2000, null]; + yield 'zero steps' => [0.001, 0.006, 0, null]; + yield 'negative steps' => [0.001, 0.006, -5, null]; + yield 'zero decay' => [0.001, 0.006, 2000, 0.0]; + yield 'decay == 1' => [0.001, 0.006, 2000, 1.0]; + yield 'decay > 1' => [0.001, 0.006, 2000, 1.5]; + yield 'negative decay' => [0.001, 0.006, 2000, -0.1]; + } + public static function stepProvider() : Generator { yield [ @@ -50,82 +63,31 @@ protected function setUp() : void } #[Test] - #[TestDox('Throws exception when constructed with zero lower bound')] - public function testConstructorWithZeroLower() : void - { - $this->expectException(InvalidArgumentException::class); - new Cyclical(lower: 0.0, upper: 0.006, losses: 2000); - } - - #[Test] - #[TestDox('Throws exception when constructed with negative lower bound')] - public function testConstructorWithNegativeLower() : void - { - $this->expectException(InvalidArgumentException::class); - new Cyclical(lower: -0.001, upper: 0.006, losses: 2000); - } - - #[Test] - #[TestDox('Throws exception when lower bound is greater than upper bound')] - public function testConstructorWithLowerGreaterThanUpper() : void - { - $this->expectException(InvalidArgumentException::class); - new Cyclical(lower: 0.01, upper: 0.006, losses: 2000); - } - - #[Test] - #[TestDox('Throws exception when constructed with zero steps per cycle')] - public function testConstructorWithZeroSteps() : void - { - $this->expectException(InvalidArgumentException::class); - new Cyclical(lower: 0.001, upper: 0.006, losses: 0); - } - - #[Test] - #[TestDox('Throws exception when constructed with negative steps per cycle')] - public function testConstructorWithNegativeSteps() : void - { - $this->expectException(InvalidArgumentException::class); - new Cyclical(lower: 0.001, upper: 0.006, losses: -5); - } - - #[Test] - #[TestDox('Throws exception when constructed with zero decay')] - public function testConstructorWithZeroDecay() : void - { - $this->expectException(InvalidArgumentException::class); - new Cyclical(lower: 0.001, upper: 0.006, losses: 2000, decay: 0.0); - } - - #[Test] - #[TestDox('Throws exception when constructed with decay equal to 1')] - public function testConstructorWithDecayEqualToOne() : void - { - $this->expectException(InvalidArgumentException::class); - new Cyclical(lower: 0.001, upper: 0.006, losses: 2000, decay: 1.0); - } - - #[Test] - #[TestDox('Throws exception when constructed with decay greater than 1')] - public function testConstructorWithDecayGreaterThanOne() : void + #[TestDox('Can be cast to a string')] + public function testToString() : void { - $this->expectException(InvalidArgumentException::class); - new Cyclical(lower: 0.001, upper: 0.006, losses: 2000, decay: 1.5); + self::assertEquals('Cyclical (lower: 0.001, upper: 0.006, steps: 2000, decay: 0.99994)', (string) $this->optimizer); } + /** + * @param float $lower + * @param float $upper + * @param int $losses + * @param float|null $decay + * @return void + */ #[Test] - #[TestDox('Throws exception when constructed with negative decay')] - public function testConstructorWithNegativeDecay() : void + #[DataProvider('invalidConstructorProvider')] + #[TestDox('Throws exception when constructed with invalid arguments')] + public function testConstructorInvalidArgs(float $lower, float $upper, int $losses, ?float $decay) : void { $this->expectException(InvalidArgumentException::class); - new Cyclical(lower: 0.001, upper: 0.006, losses: 2000, decay: -0.1); - } - #[Test] - #[TestDox('Can be cast to a string')] - public function testToString() : void - { - self::assertEquals('Cyclical (lower: 0.001, upper: 0.006, steps: 2000, decay: 0.99994)', (string) $this->optimizer); + if ($decay === null) { + new Cyclical(lower: $lower, upper: $upper, losses: $losses); + } else { + new Cyclical(lower: $lower, upper: $upper, losses: $losses, decay: $decay); + } } /** From 3575565b042faa80eae294374479cecf9315e652 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Tue, 11 Nov 2025 17:05:06 +0200 Subject: [PATCH 11/42] 390 refactoring CyclicalTest - added dataprovider for constructor tests --- tests/NeuralNet/Optimizers/Cyclical/CyclicalTest.php | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/NeuralNet/Optimizers/Cyclical/CyclicalTest.php b/tests/NeuralNet/Optimizers/Cyclical/CyclicalTest.php index 5bab9a6c1..aa7102f0f 100644 --- a/tests/NeuralNet/Optimizers/Cyclical/CyclicalTest.php +++ b/tests/NeuralNet/Optimizers/Cyclical/CyclicalTest.php @@ -95,7 +95,9 @@ public function testConstructorInvalidArgs(float $lower, float $upper, int $loss * @param NDArray $gradient * @param list> $expected */ + #[Test] #[DataProvider('stepProvider')] + #[TestDox('Can compute the step')] public function testStep(Parameter $param, NDArray $gradient, array $expected) : void { $step = $this->optimizer->step(param: $param, gradient: $gradient); From 8677c7670eaefb9add6b8d4d8c90bdce92239c4f Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Tue, 11 Nov 2025 17:05:15 +0200 Subject: [PATCH 12/42] 390 refactoring AdamTest - added dataprovider for constructor tests --- tests/NeuralNet/Optimizers/Adam/AdamTest.php | 34 ++++++++++++++------ 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/tests/NeuralNet/Optimizers/Adam/AdamTest.php b/tests/NeuralNet/Optimizers/Adam/AdamTest.php index dbd6cedd6..04444001d 100644 --- a/tests/NeuralNet/Optimizers/Adam/AdamTest.php +++ b/tests/NeuralNet/Optimizers/Adam/AdamTest.php @@ -9,6 +9,8 @@ use PHPUnit\Framework\Attributes\Group; use NDArray; use NumPower; +use PHPUnit\Framework\Attributes\Test; +use PHPUnit\Framework\Attributes\TestDox; use Rubix\ML\Exceptions\InvalidArgumentException; use Rubix\ML\NeuralNet\Parameters\Parameter; use Rubix\ML\NeuralNet\Optimizers\Adam\Adam; @@ -25,20 +27,20 @@ class AdamTest extends TestCase public static function invalidConstructorProvider() : Generator { // Invalid rates (<= 0) - yield [0.0, 0.1, 0.001]; - yield [-0.5, 0.1, 0.001]; + yield 'zero rate' => [0.0, 0.1, 0.001]; + yield 'negative rate' => [-0.5, 0.1, 0.001]; // Invalid momentumDecay (<= 0 or >= 1) - yield [0.001, 0.0, 0.001]; - yield [0.001, -0.1, 0.001]; - yield [0.001, 1.0, 0.001]; - yield [0.001, 1.1, 0.001]; + yield 'zero momentumDecay' => [0.001, 0.0, 0.001]; + yield 'negative momentumDecay' => [0.001, -0.1, 0.001]; + yield 'momentumDecay == 1' => [0.001, 1.0, 0.001]; + yield 'momentumDecay > 1' => [0.001, 1.1, 0.001]; // Invalid normDecay (<= 0 or >= 1) - yield [0.001, 0.1, 0.0]; - yield [0.001, 0.1, -0.1]; - yield [0.001, 0.1, 1.0]; - yield [0.001, 0.1, 1.1]; + yield 'zero normDecay' => [0.001, 0.1, 0.0]; + yield 'negative normDecay' => [0.001, 0.1, -0.1]; + yield 'normDecay == 1' => [0.001, 0.1, 1.0]; + yield 'normDecay > 1' => [0.001, 0.1, 1.1]; } public static function stepProvider() : Generator @@ -71,13 +73,23 @@ protected function setUp() : void ); } + #[Test] + #[TestDox('Can be cast to a string')] public function testToString() : void { $expected = 'Adam (rate: 0.001, momentum decay: 0.1, norm decay: 0.001)'; self::assertSame($expected, (string) $this->optimizer); } + /** + * @param float $rate + * @param float $momentumDecay + * @param float $normDecay + * @return void + */ + #[Test] #[DataProvider('invalidConstructorProvider')] + #[TestDox('Throws exception when constructed with invalid arguments')] public function testInvalidConstructorParams(float $rate, float $momentumDecay, float $normDecay) : void { $this->expectException(InvalidArgumentException::class); @@ -89,7 +101,9 @@ public function testInvalidConstructorParams(float $rate, float $momentumDecay, * @param NDArray $gradient * @param list> $expected */ + #[Test] #[DataProvider('stepProvider')] + #[TestDox('Can compute the step')] public function testStep(Parameter $param, NDArray $gradient, array $expected) : void { $this->optimizer->warm($param); From 269405beeb16678e1a676213bc848f437aaec180 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Tue, 11 Nov 2025 17:07:48 +0200 Subject: [PATCH 13/42] 390 refactoring MomentumTest - added dataprovider for constructor tests --- .../Optimizers/Momentum/MomentumTest.php | 74 ++++++------------- 1 file changed, 23 insertions(+), 51 deletions(-) diff --git a/tests/NeuralNet/Optimizers/Momentum/MomentumTest.php b/tests/NeuralNet/Optimizers/Momentum/MomentumTest.php index 1b2a90378..fb84d6d5c 100644 --- a/tests/NeuralNet/Optimizers/Momentum/MomentumTest.php +++ b/tests/NeuralNet/Optimizers/Momentum/MomentumTest.php @@ -23,6 +23,16 @@ class MomentumTest extends TestCase { protected Momentum $optimizer; + public static function invalidConstructorProvider() : Generator + { + yield 'zero rate' => [0.0, 0.1]; + yield 'negative rate' => [-0.001, 0.1]; + yield 'zero decay' => [0.001, 0.0]; + yield 'decay == 1' => [0.001, 1.0]; + yield 'decay > 1' => [0.001, 1.5]; + yield 'negative decay' => [0.001, -0.1]; + } + public static function stepProvider() : Generator { yield [ @@ -50,64 +60,25 @@ protected function setUp() : void } #[Test] - #[TestDox('Throws exception when constructed with zero rate')] - public function testConstructorWithZeroRate() : void - { - $this->expectException(InvalidArgumentException::class); - - new Momentum(rate: 0.0); - } - - #[Test] - #[TestDox('Throws exception when constructed with negative rate')] - public function testConstructorWithNegativeRate() : void - { - $this->expectException(InvalidArgumentException::class); - - new Momentum(rate: -0.001); - } - - #[Test] - #[TestDox('Throws exception when constructed with zero decay')] - public function testConstructorWithZeroDecay() : void - { - $this->expectException(InvalidArgumentException::class); - - new Momentum(rate: 0.001, decay: 0.0); - } - - #[Test] - #[TestDox('Throws exception when constructed with decay equal to 1')] - public function testConstructorWithDecayEqualToOne() : void - { - $this->expectException(InvalidArgumentException::class); - - new Momentum(rate: 0.001, decay: 1.0); - } - - #[Test] - #[TestDox('Throws exception when constructed with decay greater than 1')] - public function testConstructorWithDecayGreaterThanOne() : void + #[TestDox('Can be cast to a string')] + public function testToString() : void { - $this->expectException(InvalidArgumentException::class); - - new Momentum(rate: 0.001, decay: 1.5); + self::assertEquals('Momentum (rate: 0.001, decay: 0.1, lookahead: false)', (string) $this->optimizer); } + /** + * @param float $rate + * @param float $decay + * @return void + */ #[Test] - #[TestDox('Throws exception when constructed with negative decay')] - public function testConstructorWithNegativeDecay() : void + #[DataProvider('invalidConstructorProvider')] + #[TestDox('Throws exception when constructed with invalid arguments')] + public function testInvalidConstructorParams(float $rate, float $decay) : void { $this->expectException(InvalidArgumentException::class); - new Momentum(rate: 0.001, decay: -0.1); - } - - #[Test] - #[TestDox('Can be cast to a string')] - public function testToString() : void - { - self::assertEquals('Momentum (rate: 0.001, decay: 0.1, lookahead: false)', (string) $this->optimizer); + new Momentum(rate: $rate, decay: $decay); } #[Test] @@ -143,6 +114,7 @@ public function testWarmInitializesZeroedCache() : void * @param list> $expected */ #[DataProvider('stepProvider')] + #[TestDox('Can compute the step')] public function testStep(Parameter $param, NDArray $gradient, array $expected) : void { $this->optimizer->warm($param); From aca753eeaf5420b656ea8631061cb2e5f437cb56 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Tue, 11 Nov 2025 17:09:57 +0200 Subject: [PATCH 14/42] 390 refactoring RMSPropTest - added dataprovider for constructor tests --- .../Optimizers/Momentum/MomentumTest.php | 1 + .../Optimizers/RMSProp/RMSPropTest.php | 70 +++++-------------- 2 files changed, 20 insertions(+), 51 deletions(-) diff --git a/tests/NeuralNet/Optimizers/Momentum/MomentumTest.php b/tests/NeuralNet/Optimizers/Momentum/MomentumTest.php index fb84d6d5c..1b486efa5 100644 --- a/tests/NeuralNet/Optimizers/Momentum/MomentumTest.php +++ b/tests/NeuralNet/Optimizers/Momentum/MomentumTest.php @@ -113,6 +113,7 @@ public function testWarmInitializesZeroedCache() : void * @param NDArray $gradient * @param list> $expected */ + #[Test] #[DataProvider('stepProvider')] #[TestDox('Can compute the step')] public function testStep(Parameter $param, NDArray $gradient, array $expected) : void diff --git a/tests/NeuralNet/Optimizers/RMSProp/RMSPropTest.php b/tests/NeuralNet/Optimizers/RMSProp/RMSPropTest.php index 09d43ac24..f47e4f2b3 100644 --- a/tests/NeuralNet/Optimizers/RMSProp/RMSPropTest.php +++ b/tests/NeuralNet/Optimizers/RMSProp/RMSPropTest.php @@ -23,6 +23,16 @@ class RMSPropTest extends TestCase { protected RMSProp $optimizer; + public static function invalidConstructorProvider() : Generator + { + yield 'zero rate' => [0.0, 0.1]; + yield 'negative rate' => [-0.001, 0.1]; + yield 'zero decay' => [0.001, 0.0]; + yield 'decay == 1' => [0.001, 1.0]; + yield 'decay > 1' => [0.001, 1.5]; + yield 'negative decay' => [0.001, -0.1]; + } + public static function stepProvider() : Generator { yield [ @@ -50,64 +60,20 @@ protected function setUp() : void } #[Test] - #[TestDox('Throws exception when constructed with zero rate')] - public function testConstructorWithZeroRate() : void - { - $this->expectException(InvalidArgumentException::class); - - new RMSProp(rate: 0.0); - } - - #[Test] - #[TestDox('Throws exception when constructed with negative rate')] - public function testConstructorWithNegativeRate() : void - { - $this->expectException(InvalidArgumentException::class); - - new RMSProp(rate: -0.001); - } - - #[Test] - #[TestDox('Throws exception when constructed with zero decay')] - public function testConstructorWithZeroDecay() : void - { - $this->expectException(InvalidArgumentException::class); - - new RMSProp(rate: 0.001, decay: 0.0); - } - - #[Test] - #[TestDox('Throws exception when constructed with decay equal to 1')] - public function testConstructorWithDecayEqualToOne() : void - { - $this->expectException(InvalidArgumentException::class); - - new RMSProp(rate: 0.001, decay: 1.0); - } - - #[Test] - #[TestDox('Throws exception when constructed with decay greater than 1')] - public function testConstructorWithDecayGreaterThanOne() : void + #[TestDox('Can be cast to a string')] + public function testToString() : void { - $this->expectException(InvalidArgumentException::class); - - new RMSProp(rate: 0.001, decay: 1.5); + self::assertEquals('RMS Prop (rate: 0.001, decay: 0.1)', (string) $this->optimizer); } #[Test] - #[TestDox('Throws exception when constructed with negative decay')] - public function testConstructorWithNegativeDecay() : void + #[DataProvider('invalidConstructorProvider')] + #[TestDox('Throws exception when constructed with invalid arguments')] + public function testInvalidConstructorParams(float $rate, float $decay) : void { $this->expectException(InvalidArgumentException::class); - new RMSProp(rate: 0.001, decay: -0.1); - } - - #[Test] - #[TestDox('Can be cast to a string')] - public function testToString() : void - { - self::assertEquals('RMS Prop (rate: 0.001, decay: 0.1)', (string) $this->optimizer); + new RMSProp(rate: $rate, decay: $decay); } #[Test] @@ -142,7 +108,9 @@ public function testWarmInitializesZeroedCache() : void * @param NDArray $gradient * @param list> $expected */ + #[Test] #[DataProvider('stepProvider')] + #[TestDox('Can compute the step')] public function testStep(Parameter $param, NDArray $gradient, array $expected) : void { $this->optimizer->warm($param); From e9c48315a21f9cb66160ab1f3a8b3059db1e19cc Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Tue, 11 Nov 2025 17:12:11 +0200 Subject: [PATCH 15/42] 390 refactoring StepDecayTest - added dataprovider for constructor tests --- .../Optimizers/StepDecay/StepDecayTest.php | 48 +++++++++---------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/tests/NeuralNet/Optimizers/StepDecay/StepDecayTest.php b/tests/NeuralNet/Optimizers/StepDecay/StepDecayTest.php index f3535552b..7d581e31b 100644 --- a/tests/NeuralNet/Optimizers/StepDecay/StepDecayTest.php +++ b/tests/NeuralNet/Optimizers/StepDecay/StepDecayTest.php @@ -24,6 +24,15 @@ class StepDecayTest extends TestCase { protected StepDecay $optimizer; + public static function invalidConstructorProvider() : Generator + { + yield 'zero rate' => [0.0, 100, 0.001]; + yield 'negative rate' => [-0.001, 100, 0.001]; + yield 'zero losses' => [0.01, 0, 0.001]; + yield 'negative losses' => [0.01, -5, 0.001]; + yield 'negative decay' => [0.01, 100, -0.1]; + } + public static function stepProvider() : Generator { yield [ @@ -51,37 +60,26 @@ protected function setUp() : void } #[Test] - #[TestDox('Throws exception when constructed with invalid learning rate')] - public function testConstructorWithInvalidRate() : void - { - $this->expectException(InvalidArgumentException::class); - - new StepDecay(rate: 0.0); - } - - #[Test] - #[TestDox('Throws exception when constructed with invalid losses')] - public function testConstructorWithInvalidLosses() : void + #[TestDox('Can be cast to a string')] + public function testToString() : void { - $this->expectException(InvalidArgumentException::class); - - new StepDecay(rate: 0.01, losses: 0); + self::assertEquals('Step Decay (rate: 0.001, steps: 100, decay: 0.001)', (string) $this->optimizer); } + /** + * @param float $rate + * @param int $losses + * @param float $decay + * @return void + */ #[Test] - #[TestDox('Throws exception when constructed with invalid decay')] - public function testConstructorWithInvalidDecay() : void + #[DataProvider('invalidConstructorProvider')] + #[TestDox('Throws exception when constructed with invalid arguments')] + public function testInvalidConstructorParams(float $rate, int $losses, float $decay) : void { $this->expectException(InvalidArgumentException::class); - new StepDecay(rate: 0.01, losses: 100, decay: -0.1); - } - - #[Test] - #[TestDox('Can be cast to a string')] - public function testToString() : void - { - self::assertEquals('Step Decay (rate: 0.001, steps: 100, decay: 0.001)', (string) $this->optimizer); + new StepDecay(rate: $rate, losses: $losses, decay: $decay); } /** @@ -89,7 +87,9 @@ public function testToString() : void * @param NDArray $gradient * @param list> $expected */ + #[Test] #[DataProvider('stepProvider')] + #[TestDox('Can compute the step')] public function testStep(Parameter $param, NDArray $gradient, array $expected) : void { $step = $this->optimizer->step(param: $param, gradient: $gradient); From 8d3f76a4baf0d28894d66c6a651e73047839c16b Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Tue, 11 Nov 2025 17:13:39 +0200 Subject: [PATCH 16/42] 390 refactoring StochasticTest - added dataprovider for constructor tests --- .../Optimizers/Stochastic/StochasticTest.php | 29 ++++++++++++++----- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/tests/NeuralNet/Optimizers/Stochastic/StochasticTest.php b/tests/NeuralNet/Optimizers/Stochastic/StochasticTest.php index 57a50335f..2e16462d1 100644 --- a/tests/NeuralNet/Optimizers/Stochastic/StochasticTest.php +++ b/tests/NeuralNet/Optimizers/Stochastic/StochasticTest.php @@ -23,6 +23,12 @@ class StochasticTest extends TestCase { protected Stochastic $optimizer; + public static function invalidConstructorProvider() : Generator + { + yield 'zero rate' => [0.0]; + yield 'negative rate' => [-0.001]; + } + public static function stepProvider() : Generator { yield [ @@ -50,19 +56,24 @@ protected function setUp() : void } #[Test] - #[TestDox('Throws exception when constructed with invalid learning rate')] - public function testConstructorWithInvalidRate() : void + #[TestDox('Can be cast to a string')] + public function testToString() : void { - $this->expectException(InvalidArgumentException::class); - - new Stochastic(0.0); + self::assertEquals('Stochastic (rate: 0.001)', (string) $this->optimizer); } + /** + * @param float $rate + * @return void + */ #[Test] - #[TestDox('Can be cast to a string')] - public function testToString() : void + #[DataProvider('invalidConstructorProvider')] + #[TestDox('Throws exception when constructed with invalid arguments')] + public function testInvalidConstructorParams(float $rate) : void { - self::assertEquals('Stochastic (rate: 0.001)', (string) $this->optimizer); + $this->expectException(InvalidArgumentException::class); + + new Stochastic($rate); } /** @@ -70,7 +81,9 @@ public function testToString() : void * @param NDArray $gradient * @param list> $expected */ + #[Test] #[DataProvider('stepProvider')] + #[TestDox('Can compute the step')] public function testStep(Parameter $param, NDArray $gradient, array $expected) : void { $step = $this->optimizer->step(param: $param, gradient: $gradient); From 23397ef90a74606a16fe130b5331c2d38503a662 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Tue, 11 Nov 2025 23:44:16 +0200 Subject: [PATCH 17/42] 390 convert AdaMax to NumPower --- src/NeuralNet/Optimizers/AdaMax/AdaMax.php | 90 ++++++++++++++++++++++ 1 file changed, 90 insertions(+) create mode 100644 src/NeuralNet/Optimizers/AdaMax/AdaMax.php diff --git a/src/NeuralNet/Optimizers/AdaMax/AdaMax.php b/src/NeuralNet/Optimizers/AdaMax/AdaMax.php new file mode 100644 index 000000000..ae13d2249 --- /dev/null +++ b/src/NeuralNet/Optimizers/AdaMax/AdaMax.php @@ -0,0 +1,90 @@ + + */ +class AdaMax extends Adam +{ + /** + * @param float $rate + * @param float $momentumDecay + * @param float $normDecay + */ + public function __construct(float $rate = 0.001, float $momentumDecay = 0.1, float $normDecay = 0.001) + { + parent::__construct($rate, $momentumDecay, $normDecay); + } + + /** + * Take a step of gradient descent for a given parameter. + * + * AdaMax update (element-wise): + * v_t = v_{t-1} + β1 · (g_t − v_{t-1}) + * u_t = max(β2 · u_{t-1}, |g_t|) + * Δθ_t = η · v_t / max(u_t, ε) + * + * @internal + * + * @param Parameter $param + * @param NDArray $gradient + * @return NDArray + */ + public function step(Parameter $param, NDArray $gradient) : NDArray + { + [$velocity, $norm] = $this->cache[$param->id()]; + + $vHat = NumPower::multiply( + NumPower::subtract($gradient, $velocity), + $this->momentumDecay + ); + + $velocity = NumPower::add($velocity, $vHat); + + // Infinity norm accumulator + $norm = NumPower::multiply($norm, 1.0 - $this->normDecay); + $absGrad = NumPower::abs($gradient); + $norm = NumPower::maximum($norm, $absGrad); + + $this->cache[$param->id()] = [$velocity, $norm]; + + $norm = NumPower::clip($norm, EPSILON, PHP_FLOAT_MAX); + + return NumPower::multiply( + NumPower::divide($velocity, $norm), + $this->rate + ); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "AdaMax (rate: {$this->rate}, momentum decay: {$this->momentumDecay}," + . " norm decay: {$this->normDecay})"; + } +} From 223a90e756366e1cade74bd069b31ecc51082edc Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Fri, 14 Nov 2025 16:50:06 +0200 Subject: [PATCH 18/42] 390 convert AdaMax to NumPower --- docs/neural-network/optimizers/adam.md | 2 - docs/neural-network/optimizers/adamax.md | 25 +++- .../Optimizers/AdaMax/AdaMaxTest.php | 111 ++++++++++++++++++ 3 files changed, 133 insertions(+), 5 deletions(-) create mode 100644 tests/NeuralNet/Optimizers/AdaMax/AdaMaxTest.php diff --git a/docs/neural-network/optimizers/adam.md b/docs/neural-network/optimizers/adam.md index 0470a9d4a..b58c70fcb 100644 --- a/docs/neural-network/optimizers/adam.md +++ b/docs/neural-network/optimizers/adam.md @@ -22,8 +22,6 @@ where: - $\mathbf{g}_t$ is the current gradient, and $\mathbf{g}_t^{2}$ denotes element-wise square, - $\varepsilon$ is a small constant for numerical stability (in the implementation, the denominator is clipped from below by `EPSILON`). -Note: This formulation follows the implementation in Rubix ML and does not include bias-correction terms. - ## Parameters | # | Name | Default | Type | Description | |---|---|---|---|---| diff --git a/docs/neural-network/optimizers/adamax.md b/docs/neural-network/optimizers/adamax.md index 6b1d9ea05..ff02f925a 100644 --- a/docs/neural-network/optimizers/adamax.md +++ b/docs/neural-network/optimizers/adamax.md @@ -1,8 +1,27 @@ -[source] +[source] # AdaMax A version of the [Adam](adam.md) optimizer that replaces the RMS property with the infinity norm of the past gradients. As such, AdaMax is generally more suitable for sparse parameter updates and noisy gradients. +## Mathematical formulation +Per step (element-wise), AdaMax maintains an exponentially decaying moving average of the gradient (velocity) and an infinity-norm accumulator of past gradients, and uses them to scale the update: + +$$ +\begin{aligned} +\mathbf{v}_t &= (1 - \beta_1)\,\mathbf{v}_{t-1} + \beta_1\,\mathbf{g}_t \\ +\mathbf{u}_t &= \max\big(\beta_2\,\mathbf{u}_{t-1},\ |\mathbf{g}_t|\big) \\ +\Delta{\theta}_t &= \alpha\, \frac{\mathbf{v}_t}{\max(\mathbf{u}_t, \varepsilon)} +\end{aligned} +$$ + +where: +- $t$ is the current step, +- $\alpha$ is the learning rate (`rate`), +- $\beta_1$ is the momentum decay (`momentumDecay`), +- $\beta_2$ is the norm decay (`normDecay`), +- $\mathbf{g}_t$ is the current gradient and $|\mathbf{g}_t|$ denotes element-wise absolute value, +- $\varepsilon$ is a small constant for numerical stability (in the implementation, the denominator is clipped from below by `EPSILON`). + ## Parameters | # | Name | Default | Type | Description | |---|---|---|---|---| @@ -12,10 +31,10 @@ A version of the [Adam](adam.md) optimizer that replaces the RMS property with t ## Example ```php -use Rubix\ML\NeuralNet\Optimizers\AdaMax; +use Rubix\ML\NeuralNet\Optimizers\AdaMax\AdaMax; $optimizer = new AdaMax(0.0001, 0.1, 0.001); ``` ## References -[^1]: D. P. Kingma et al. (2014). Adam: A Method for Stochastic Optimization. \ No newline at end of file +[^1]: D. P. Kingma et al. (2014). Adam: A Method for Stochastic Optimization. diff --git a/tests/NeuralNet/Optimizers/AdaMax/AdaMaxTest.php b/tests/NeuralNet/Optimizers/AdaMax/AdaMaxTest.php new file mode 100644 index 000000000..d32bf0b3d --- /dev/null +++ b/tests/NeuralNet/Optimizers/AdaMax/AdaMaxTest.php @@ -0,0 +1,111 @@ + [0.0, 0.1, 0.001]; + yield 'negative rate' => [-0.001, 0.1, 0.001]; + yield 'zero momentum decay' => [0.001, 0.0, 0.001]; + yield 'momentum decay == 1' => [0.001, 1.0, 0.001]; + yield 'momentum decay > 1' => [0.001, 1.5, 0.001]; + yield 'negative momentum decay' => [0.001, -0.1, 0.001]; + yield 'zero norm decay' => [0.001, 0.1, 0.0]; + yield 'norm decay == 1' => [0.001, 0.1, 1.0]; + yield 'norm decay > 1' => [0.001, 0.1, 1.5]; + yield 'negative norm decay' => [0.001, 0.1, -0.1]; + } + + public static function stepProvider() : Generator + { + yield [ + new Parameter(NumPower::array([ + [0.1, 0.6, -0.4], + [0.5, 0.6, -0.4], + [0.1, 0.1, -0.7], + ])), + NumPower::array([ + [0.01, 0.05, -0.02], + [-0.01, 0.02, 0.03], + [0.04, -0.01, -0.5], + ]), + [ + [0.0001, 0.0001, -0.0001], + [-0.0001, 0.0001, 0.0001], + [0.0001, -0.0001, -0.0001], + ], + ]; + } + + protected function setUp() : void + { + $this->optimizer = new AdaMax( + rate: 0.001, + momentumDecay: 0.1, + normDecay: 0.001 + ); + } + + #[Test] + #[TestDox('Can be cast to a string')] + public function testToString() : void + { + self::assertEquals('AdaMax (rate: 0.001, momentum decay: 0.1, norm decay: 0.001)', (string) $this->optimizer); + } + + /** + * @param float $rate + * @param float $momentumDecay + * @param float $normDecay + * @return void + */ + #[Test] + #[DataProvider('invalidConstructorProvider')] + #[TestDox('Throws exception when constructed with invalid arguments')] + public function testInvalidConstructorParams(float $rate, float $momentumDecay, float $normDecay) : void + { + $this->expectException(InvalidArgumentException::class); + + new AdaMax(rate: $rate, momentumDecay: $momentumDecay, normDecay: $normDecay); + } + + /** + * @param Parameter $param + * @param NDArray $gradient + * @param list> $expected + */ + #[Test] + #[DataProvider('stepProvider')] + #[TestDox('Can compute the step')] + public function testStep(Parameter $param, NDArray $gradient, array $expected) : void + { + $this->optimizer->warm($param); + + $step = $this->optimizer->step(param: $param, gradient: $gradient); + + self::assertEqualsWithDelta($expected, $step->toArray(), 1e-7); + } +} + + From db1c6dbe675b5b18a2562118cdf2ed1830e003f2 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Fri, 14 Nov 2025 16:52:50 +0200 Subject: [PATCH 19/42] 390 Added warm initialization test for zeroed Adam optimizer caches --- tests/NeuralNet/Optimizers/Adam/AdamTest.php | 28 ++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/tests/NeuralNet/Optimizers/Adam/AdamTest.php b/tests/NeuralNet/Optimizers/Adam/AdamTest.php index 04444001d..b0549ae70 100644 --- a/tests/NeuralNet/Optimizers/Adam/AdamTest.php +++ b/tests/NeuralNet/Optimizers/Adam/AdamTest.php @@ -81,6 +81,33 @@ public function testToString() : void self::assertSame($expected, (string) $this->optimizer); } + #[Test] + #[TestDox('Warm initializes zeroed velocity and norm caches with the parameter\'s shape')] + public function testWarmInitializesZeroedCache() : void + { + $param = new Parameter(NumPower::array([ + [1.0, 2.0, 3.0], + [4.0, 5.0, 6.0], + ])); + + // Warm the optimizer for this parameter + $this->optimizer->warm($param); + + // Inspect protected cache via reflection + $ref = new \ReflectionClass($this->optimizer); + $prop = $ref->getProperty('cache'); + $prop->setAccessible(true); + $cache = $prop->getValue($this->optimizer); + + self::assertArrayHasKey($param->id(), $cache); + + [$velocity, $norm] = $cache[$param->id()]; + + $zeros = NumPower::zeros($param->param()->shape()); + self::assertEqualsWithDelta($zeros->toArray(), $velocity->toArray(), 0.0); + self::assertEqualsWithDelta($zeros->toArray(), $norm->toArray(), 0.0); + } + /** * @param float $rate * @param float $momentumDecay @@ -113,3 +140,4 @@ public function testStep(Parameter $param, NDArray $gradient, array $expected) : self::assertEqualsWithDelta($expected, $step->toArray(), 1e-7); } } + From 548c055bf0339bf60916682a9cdb5447669a5d2d Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Fri, 14 Nov 2025 16:54:57 +0200 Subject: [PATCH 20/42] Code cleanup: removed redundant docblocks, adjusted formatting, and applied consistent style across optimizer tests and Parameter class. --- src/NeuralNet/Parameters/Parameter.php | 13 ++++++------- tests/NeuralNet/Optimizers/AdaMax/AdaMaxTest.php | 3 --- tests/NeuralNet/Optimizers/Adam/AdamTest.php | 2 -- .../NeuralNet/Optimizers/Cyclical/CyclicalTest.php | 3 +-- .../NeuralNet/Optimizers/Momentum/MomentumTest.php | 1 - .../Optimizers/StepDecay/StepDecayTest.php | 3 --- .../Optimizers/Stochastic/StochasticTest.php | 1 - 7 files changed, 7 insertions(+), 19 deletions(-) diff --git a/src/NeuralNet/Parameters/Parameter.php b/src/NeuralNet/Parameters/Parameter.php index efa7cf88a..0cef2e87a 100644 --- a/src/NeuralNet/Parameters/Parameter.php +++ b/src/NeuralNet/Parameters/Parameter.php @@ -22,7 +22,6 @@ /** * Parameter - * */ class Parameter { @@ -61,7 +60,7 @@ public function __construct(NDArray $param) * * @return int */ - public function id(): int + public function id() : int { return $this->id; } @@ -71,7 +70,7 @@ public function id(): int * * @return NDArray */ - public function param(): NDArray + public function param() : NDArray { return $this->param; } @@ -79,10 +78,10 @@ public function param(): NDArray /** * Update the parameter with the gradient and optimizer. * - * @param NDArray $gradient - * @param Optimizer $optimizer + * @param NDArray $gradient + * @param Optimizer $optimizer */ - public function update(NDArray $gradient, Optimizer $optimizer): void + public function update(NDArray $gradient, Optimizer $optimizer) : void { $step = $optimizer->step($this, $gradient); @@ -92,7 +91,7 @@ public function update(NDArray $gradient, Optimizer $optimizer): void /** * Perform a deep copy of the object upon cloning. */ - public function __clone(): void + public function __clone() : void { $this->param = clone $this->param; } diff --git a/tests/NeuralNet/Optimizers/AdaMax/AdaMaxTest.php b/tests/NeuralNet/Optimizers/AdaMax/AdaMaxTest.php index d32bf0b3d..0ca059561 100644 --- a/tests/NeuralNet/Optimizers/AdaMax/AdaMaxTest.php +++ b/tests/NeuralNet/Optimizers/AdaMax/AdaMaxTest.php @@ -78,7 +78,6 @@ public function testToString() : void * @param float $rate * @param float $momentumDecay * @param float $normDecay - * @return void */ #[Test] #[DataProvider('invalidConstructorProvider')] @@ -107,5 +106,3 @@ public function testStep(Parameter $param, NDArray $gradient, array $expected) : self::assertEqualsWithDelta($expected, $step->toArray(), 1e-7); } } - - diff --git a/tests/NeuralNet/Optimizers/Adam/AdamTest.php b/tests/NeuralNet/Optimizers/Adam/AdamTest.php index b0549ae70..bcf19d344 100644 --- a/tests/NeuralNet/Optimizers/Adam/AdamTest.php +++ b/tests/NeuralNet/Optimizers/Adam/AdamTest.php @@ -112,7 +112,6 @@ public function testWarmInitializesZeroedCache() : void * @param float $rate * @param float $momentumDecay * @param float $normDecay - * @return void */ #[Test] #[DataProvider('invalidConstructorProvider')] @@ -140,4 +139,3 @@ public function testStep(Parameter $param, NDArray $gradient, array $expected) : self::assertEqualsWithDelta($expected, $step->toArray(), 1e-7); } } - diff --git a/tests/NeuralNet/Optimizers/Cyclical/CyclicalTest.php b/tests/NeuralNet/Optimizers/Cyclical/CyclicalTest.php index aa7102f0f..302b770be 100644 --- a/tests/NeuralNet/Optimizers/Cyclical/CyclicalTest.php +++ b/tests/NeuralNet/Optimizers/Cyclical/CyclicalTest.php @@ -53,7 +53,7 @@ public static function stepProvider() : Generator [0.00001, 0.00005, -0.00002], [-0.00001, 0.00002, 0.00003], [0.00004, -0.00001, -0.0005], - ] + ], ]; } @@ -74,7 +74,6 @@ public function testToString() : void * @param float $upper * @param int $losses * @param float|null $decay - * @return void */ #[Test] #[DataProvider('invalidConstructorProvider')] diff --git a/tests/NeuralNet/Optimizers/Momentum/MomentumTest.php b/tests/NeuralNet/Optimizers/Momentum/MomentumTest.php index 1b486efa5..03b65f9a7 100644 --- a/tests/NeuralNet/Optimizers/Momentum/MomentumTest.php +++ b/tests/NeuralNet/Optimizers/Momentum/MomentumTest.php @@ -69,7 +69,6 @@ public function testToString() : void /** * @param float $rate * @param float $decay - * @return void */ #[Test] #[DataProvider('invalidConstructorProvider')] diff --git a/tests/NeuralNet/Optimizers/StepDecay/StepDecayTest.php b/tests/NeuralNet/Optimizers/StepDecay/StepDecayTest.php index 7d581e31b..ae7f78810 100644 --- a/tests/NeuralNet/Optimizers/StepDecay/StepDecayTest.php +++ b/tests/NeuralNet/Optimizers/StepDecay/StepDecayTest.php @@ -13,7 +13,6 @@ use PHPUnit\Framework\Attributes\Test; use PHPUnit\Framework\Attributes\TestDox; use Rubix\ML\Exceptions\InvalidArgumentException; -use Rubix\ML\NeuralNet\Optimizers\Stochastic\Stochastic; use Rubix\ML\NeuralNet\Parameters\Parameter; use Rubix\ML\NeuralNet\Optimizers\StepDecay\StepDecay; use PHPUnit\Framework\TestCase; @@ -70,7 +69,6 @@ public function testToString() : void * @param float $rate * @param int $losses * @param float $decay - * @return void */ #[Test] #[DataProvider('invalidConstructorProvider')] @@ -97,4 +95,3 @@ public function testStep(Parameter $param, NDArray $gradient, array $expected) : self::assertEqualsWithDelta($expected, $step->toArray(), 1e-7); } } - diff --git a/tests/NeuralNet/Optimizers/Stochastic/StochasticTest.php b/tests/NeuralNet/Optimizers/Stochastic/StochasticTest.php index 2e16462d1..c24b990f7 100644 --- a/tests/NeuralNet/Optimizers/Stochastic/StochasticTest.php +++ b/tests/NeuralNet/Optimizers/Stochastic/StochasticTest.php @@ -64,7 +64,6 @@ public function testToString() : void /** * @param float $rate - * @return void */ #[Test] #[DataProvider('invalidConstructorProvider')] From 40cf94b3c774ba3b6d7bf898bff1483465ad0165 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Fri, 14 Nov 2025 17:21:32 +0200 Subject: [PATCH 21/42] 390 convert AdaGrad to NumPower --- docs/neural-network/optimizers/adagrad.md | 22 ++- src/NeuralNet/Optimizers/AdaGrad/AdaGrad.php | 134 ++++++++++++++++++ .../Optimizers/AdaGrad/AdaGradTest.php | 94 ++++++++++++ 3 files changed, 247 insertions(+), 3 deletions(-) create mode 100644 src/NeuralNet/Optimizers/AdaGrad/AdaGrad.php create mode 100644 tests/NeuralNet/Optimizers/AdaGrad/AdaGradTest.php diff --git a/docs/neural-network/optimizers/adagrad.md b/docs/neural-network/optimizers/adagrad.md index 9cfddff25..2e55a9953 100644 --- a/docs/neural-network/optimizers/adagrad.md +++ b/docs/neural-network/optimizers/adagrad.md @@ -1,8 +1,24 @@ -[source] +[source] # AdaGrad Short for *Adaptive Gradient*, the AdaGrad Optimizer speeds up the learning of parameters that do not change often and slows down the learning of parameters that do enjoy heavy activity. Due to AdaGrad's infinitely decaying step size, training may be slow or fail to converge using a low learning rate. +## Mathematical formulation +Per step (element-wise), AdaGrad accumulates the sum of squared gradients and scales the update by the root of this sum: + +$$ +\begin{aligned} +\mathbf{n}_t &= \mathbf{n}_{t-1} + \mathbf{g}_t^{2} \\ +\Delta{\theta}_t &= \alpha\, \frac{\mathbf{g}_t}{\sqrt{\mathbf{n}_t} + \varepsilon} +\end{aligned} +$$ + +where: +- $t$ is the current step, +- $\alpha$ is the learning rate (`rate`), +- $\mathbf{g}_t$ is the current gradient, and $\mathbf{g}_t^{2}$ denotes element-wise square, +- $\varepsilon$ is a small constant for numerical stability (in the implementation, the denominator is clipped from below by `EPSILON`). + ## Parameters | # | Name | Default | Type | Description | |---|---|---|---|---| @@ -10,10 +26,10 @@ Short for *Adaptive Gradient*, the AdaGrad Optimizer speeds up the learning of p ## Example ```php -use Rubix\ML\NeuralNet\Optimizers\AdaGrad; +use Rubix\ML\NeuralNet\Optimizers\AdaGrad\AdaGrad; $optimizer = new AdaGrad(0.125); ``` ## References -[^1]: J. Duchi et al. (2011). Adaptive Subgradient Methods for Online Learning and Stochastic Optimization. \ No newline at end of file +[^1]: J. Duchi et al. (2011). Adaptive Subgradient Methods for Online Learning and Stochastic Optimization. diff --git a/src/NeuralNet/Optimizers/AdaGrad/AdaGrad.php b/src/NeuralNet/Optimizers/AdaGrad/AdaGrad.php new file mode 100644 index 000000000..b6c92bd56 --- /dev/null +++ b/src/NeuralNet/Optimizers/AdaGrad/AdaGrad.php @@ -0,0 +1,134 @@ + + */ +class AdaGrad implements Optimizer, Adaptive +{ + /** + * The learning rate that controls the global step size. + * + * @var float + */ + protected float $rate; + + /** + * The cache of sum of squared gradients. + * + * @var NDArray[] + */ + protected array $cache = [ + // + ]; + + /** + * @param float $rate + * @throws InvalidArgumentException + */ + public function __construct(float $rate = 0.01) + { + if ($rate <= 0.0) { + throw new InvalidArgumentException("Learning rate must be greater than 0, $rate given."); + } + + $this->rate = $rate; + } + + /** + * Warm the parameter cache. + * + * @internal + * + * @param Parameter $param + * @throws RuntimeException + */ + public function warm(Parameter $param) : void + { + $class = get_class($param->param()); + + if (!$class) { + throw new RuntimeException('Could not locate parameter class.'); + } + + $this->cache[$param->id()] = NumPower::zeros($param->param()->shape()); + } + + /** + * Take a step of gradient descent for a given parameter. + * + * AdaGrad update (element-wise): + * n_t = n_{t-1} + g_t^2 + * Δθ_t = η · g_t / max(√n_t, ε) + * + * where: + * - g_t is the current gradient, + * - n_t is the accumulated (running) sum of squared gradients, + * - η is the learning rate (rate), + * - ε is a small constant to avoid division by zero (implemented via clipping √n_t to [ε, +∞)). + * + * @internal + * + * @param Parameter $param + * @param NDArray $gradient + * @return NDArray + */ + public function step(Parameter $param, NDArray $gradient) : NDArray + { + $norm = $this->cache[$param->id()]; + + // Update accumulated squared gradients: norm = norm + gradient^2 + $norm = NumPower::add($norm, NumPower::square($gradient)); + + $this->cache[$param->id()] = $norm; + + // denominator = max(sqrt(norm), EPSILON) + $denominator = NumPower::sqrt($norm); + $denominator = NumPower::clip($denominator, EPSILON, PHP_FLOAT_MAX); + + // return rate * gradient / denominator + return NumPower::divide( + NumPower::multiply($gradient, $this->rate), + $denominator + ); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "AdaGrad (rate: {$this->rate})"; + } +} diff --git a/tests/NeuralNet/Optimizers/AdaGrad/AdaGradTest.php b/tests/NeuralNet/Optimizers/AdaGrad/AdaGradTest.php new file mode 100644 index 000000000..44ff773f5 --- /dev/null +++ b/tests/NeuralNet/Optimizers/AdaGrad/AdaGradTest.php @@ -0,0 +1,94 @@ + [0.0]; + yield 'negative rate' => [-0.001]; + } + + public static function stepProvider() : Generator + { + yield [ + new Parameter(NumPower::array([ + [0.1, 0.6, -0.4], + [0.5, 0.6, -0.4], + [0.1, 0.1, -0.7], + ])), + NumPower::array([ + [0.01, 0.05, -0.02], + [-0.01, 0.02, 0.03], + [0.04, -0.01, -0.5], + ]), + [ + [0.001, 0.001, -0.001], + [-0.001, 0.001, 0.001], + [0.001, -0.001, -0.001], + ], + ]; + } + + protected function setUp() : void + { + $this->optimizer = new AdaGrad(0.001); + } + + #[Test] + #[TestDox('Can be cast to a string')] + public function testToString() : void + { + self::assertSame('AdaGrad (rate: 0.01)', (string) (new AdaGrad())); + } + + /** + * @param float $rate + */ + #[Test] + #[DataProvider('invalidConstructorProvider')] + #[TestDox('Throws exception when constructed with invalid arguments')] + public function testInvalidConstructorParams(float $rate) : void + { + $this->expectException(InvalidArgumentException::class); + + new AdaGrad(rate: $rate); + } + + /** + * @param Parameter $param + * @param NDArray $gradient + * @param list> $expected + */ + #[Test] + #[DataProvider('stepProvider')] + #[TestDox('Can compute the step')] + public function testStep(Parameter $param, NDArray $gradient, array $expected) : void + { + $this->optimizer->warm($param); + + $step = $this->optimizer->step(param: $param, gradient: $gradient); + + self::assertEqualsWithDelta($expected, $step->toArray(), 1e-7); + } +} From a67655fa021955db28047eb567a435ecdbde7b29 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Fri, 14 Nov 2025 17:40:39 +0200 Subject: [PATCH 22/42] 390- Fix broken link to the Adam optimizer source file in documentation --- docs/neural-network/optimizers/adam.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/neural-network/optimizers/adam.md b/docs/neural-network/optimizers/adam.md index b58c70fcb..d10a469f3 100644 --- a/docs/neural-network/optimizers/adam.md +++ b/docs/neural-network/optimizers/adam.md @@ -1,4 +1,4 @@ -[source] +[source] # Adam Short for *Adaptive Moment Estimation*, the Adam Optimizer combines both Momentum and RMS properties. In addition to storing an exponentially decaying average of past squared gradients like [RMSprop](rms-prop.md), Adam also keeps an exponentially decaying average of past gradients, similar to [Momentum](momentum.md). Whereas Momentum can be seen as a ball running down a slope, Adam behaves like a heavy ball with friction. From fc14381ad0ec82c0d469089718a85318936d2cac Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sat, 6 Dec 2025 18:55:48 +0200 Subject: [PATCH 23/42] ML-392 Added `Layer` and `Hidden` interfaces to define contracts for neural network layers. --- .../Layers/Base/Contracts/Hidden.php | 29 ++++++++++ src/NeuralNet/Layers/Base/Contracts/Layer.php | 57 +++++++++++++++++++ 2 files changed, 86 insertions(+) create mode 100644 src/NeuralNet/Layers/Base/Contracts/Hidden.php create mode 100644 src/NeuralNet/Layers/Base/Contracts/Layer.php diff --git a/src/NeuralNet/Layers/Base/Contracts/Hidden.php b/src/NeuralNet/Layers/Base/Contracts/Hidden.php new file mode 100644 index 000000000..b73b63521 --- /dev/null +++ b/src/NeuralNet/Layers/Base/Contracts/Hidden.php @@ -0,0 +1,29 @@ + + */ +interface Hidden extends Layer +{ + /** + * Calculate the gradient and update the parameters of the layer. + * + * @internal + * + * @param Deferred $prevGradient + * @param Optimizer $optimizer + * @return Deferred + */ + public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred; +} diff --git a/src/NeuralNet/Layers/Base/Contracts/Layer.php b/src/NeuralNet/Layers/Base/Contracts/Layer.php new file mode 100644 index 000000000..10cf17b6e --- /dev/null +++ b/src/NeuralNet/Layers/Base/Contracts/Layer.php @@ -0,0 +1,57 @@ + + */ +interface Layer extends Stringable +{ + /** + * The width of the layer. i.e. the number of neurons or computation nodes. + * + * @internal + * + * @return positive-int + */ + public function width() : int; + + /** + * Initialize the layer with the fan in from the previous layer and return + * the fan out for this layer. + * + * @internal + * + * @param positive-int $fanIn + * @return positive-int + */ + public function initialize(int $fanIn) : int; + + /** + * Feed the input forward to the next layer in the network. + * + * @internal + * + * @param NDArray $input + * @return NDArray + */ + public function forward(NDArray $input) : NDArray; + + /** + * Forward pass during inference. + * + * @internal + * + * @param NDArray $input + * @return NDArray + */ + public function infer(NDArray $input) : NDArray; +} From 9546c9d84fce16f02efd3db0475352083eea8212 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sat, 6 Dec 2025 19:14:33 +0200 Subject: [PATCH 24/42] ML-393 Added standalone `Activation` layer implementation with comprehensive unit tests and fixed broken source file link in the documentation --- .../hidden-layers/activation.md | 8 +- .../Layers/Activation/Activation.php | 184 ++++++++++++++++++ .../Layers/Base/Contracts/Hidden.php | 1 - .../Layers/Activation/ActivationTest.php | 181 +++++++++++++++++ 4 files changed, 369 insertions(+), 5 deletions(-) create mode 100644 src/NeuralNet/Layers/Activation/Activation.php create mode 100644 tests/NeuralNet/Layers/Activation/ActivationTest.php diff --git a/docs/neural-network/hidden-layers/activation.md b/docs/neural-network/hidden-layers/activation.md index a4e4cde73..57d4dc46c 100644 --- a/docs/neural-network/hidden-layers/activation.md +++ b/docs/neural-network/hidden-layers/activation.md @@ -1,4 +1,4 @@ -[source] +[source] # Activation Activation layers apply a user-defined non-linear activation function to their inputs. They often work in conjunction with [Dense](dense.md) layers as a way to transform their output. @@ -10,8 +10,8 @@ Activation layers apply a user-defined non-linear activation function to their i ## Example ```php -use Rubix\ML\NeuralNet\Layers\Activation; -use Rubix\ML\NeuralNet\ActivationFunctions\ReLU; +use Rubix\ML\NeuralNet\Layers\Activation\Activation; +use Rubix\ML\NeuralNet\ActivationFunctions\ReLU\ReLU; $layer = new Activation(new ReLU()); -``` \ No newline at end of file +``` diff --git a/src/NeuralNet/Layers/Activation/Activation.php b/src/NeuralNet/Layers/Activation/Activation.php new file mode 100644 index 000000000..4394350b4 --- /dev/null +++ b/src/NeuralNet/Layers/Activation/Activation.php @@ -0,0 +1,184 @@ + + */ +class Activation implements Hidden +{ + /** + * The function that computes the output of the layer. + * + * @var ActivationFunction + */ + protected ActivationFunction $activationFn; + + /** + * The width of the layer. + * + * @var positive-int|null + */ + protected ?int $width = null; + + /** + * The memorized input matrix. + * + * @var NDArray|null + */ + protected ?NDArray $input = null; + + /** + * The memorized activation matrix. + * + * @var NDArray|null + */ + protected ?NDArray $output = null; + + /** + * @param ActivationFunction $activationFn + */ + public function __construct(ActivationFunction $activationFn) + { + $this->activationFn = $activationFn; + } + + /** + * Return the width of the layer. + * + * @internal + * + * @throws RuntimeException + * @return positive-int + */ + public function width() : int + { + if ($this->width === null) { + throw new RuntimeException('Layer has not been initialized.'); + } + + return $this->width; + } + + /** + * Initialize the layer with the fan in from the previous layer and return + * the fan out for this layer. + * + * @internal + * + * @param positive-int $fanIn + * @return positive-int + */ + public function initialize(int $fanIn) : int + { + $fanOut = $fanIn; + + $this->width = $fanOut; + + return $fanOut; + } + + /** + * Compute a forward pass through the layer. + * + * @internal + * + * @param NDArray $input + * @return NDArray + */ + public function forward(NDArray $input) : NDArray + { + $output = $this->activationFn->activate($input); + + $this->input = $input; + $this->output = $output; + + return $output; + } + + /** + * Compute an inferential pass through the layer. + * + * @internal + * + * @param NDArray $input + * @return NDArray + */ + public function infer(NDArray $input) : NDArray + { + return $this->activationFn->activate($input); + } + + /** + * Calculate the gradient and update the parameters of the layer. + * + * @internal + * + * @param Deferred $prevGradient + * @param Optimizer $optimizer + * @throws RuntimeException + * @return Deferred + */ + public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred + { + if (!$this->input or !$this->output) { + throw new RuntimeException('Must perform forward pass before backpropagating.'); + } + + $input = $this->input; + $output = $this->output; + + $this->input = $this->output = null; + + return new Deferred( + [$this, 'gradient'], + [$input, $output, $prevGradient] + ); + } + + /** + * Calculate the gradient for the previous layer. + * + * @internal + * + * @param NDArray $input + * @param NDArray $output + * @param Deferred $prevGradient + * @return NDArray + */ + public function gradient(NDArray $input, NDArray $output, Deferred $prevGradient) : NDArray + { + return NumPower::multiply( + $this->activationFn->differentiate($input), + $prevGradient() + ); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Activation (activation fn: {$this->activationFn})"; + } +} diff --git a/src/NeuralNet/Layers/Base/Contracts/Hidden.php b/src/NeuralNet/Layers/Base/Contracts/Hidden.php index b73b63521..f903e3916 100644 --- a/src/NeuralNet/Layers/Base/Contracts/Hidden.php +++ b/src/NeuralNet/Layers/Base/Contracts/Hidden.php @@ -3,7 +3,6 @@ namespace Rubix\ML\NeuralNet\Layers\Base\Contracts; use Rubix\ML\Deferred; -use Rubix\ML\NeuralNet\Layers\Base\Contracts\Layer; use Rubix\ML\NeuralNet\Optimizers\Base\Optimizer; /** diff --git a/tests/NeuralNet/Layers/Activation/ActivationTest.php b/tests/NeuralNet/Layers/Activation/ActivationTest.php new file mode 100644 index 000000000..2c203ad18 --- /dev/null +++ b/tests/NeuralNet/Layers/Activation/ActivationTest.php @@ -0,0 +1,181 @@ + + */ + public static function forwardProvider() : array + { + return [ + [ + NumPower::array([ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ]), + [ + [1.0, 2.5, 0.0], + [0.1, 0.0, 3.0], + [0.002, 0.0, 0.0], + ], + ], + ]; + } + + /** + * @return array + */ + public static function backProvider() : array + { + return [ + [ + NumPower::array([ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ]), + NumPower::array([ + [0.25, 0.7, 0.1], + [0.50, 0.2, 0.01], + [0.25, 0.1, 0.89], + ]), + [ + [0.25, 0.7, 0.0], + [0.5, 0.0, 0.01], + [0.25, 0, 0.0], + ], + ], + ]; + } + + protected function setUp() : void + { + $this->fanIn = 3; + + $this->input = NumPower::array([ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ]); + + $this->prevGrad = new Deferred(fn: function () : NDArray { + return NumPower::array([ + [0.25, 0.7, 0.1], + [0.50, 0.2, 0.01], + [0.25, 0.1, 0.89], + ]); + }); + + $this->optimizer = new Stochastic(0.001); + + $this->layer = new Activation(new ReLU()); + } + + #[Test] + #[TestDox('Can be cast to a string')] + public function testToString() : void + { + self::assertEquals('Activation (activation fn: ReLU)', (string) $this->layer); + } + + #[Test] + #[TestDox('Initializes width equal to fan-in')] + public function testInitializeSetsWidth() : void + { + $this->layer->initialize($this->fanIn); + + self::assertEquals($this->fanIn, $this->layer->width()); + } + + #[Test] + #[TestDox('Computes forward activations')] + #[DataProvider('forwardProvider')] + public function testForward(NDArray $input, array $expected) : void + { + $this->layer->initialize($this->fanIn); + + $forward = $this->layer->forward($input); + self::assertEqualsWithDelta($expected, $forward->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes backpropagated gradients after forward pass')] + #[DataProvider('backProvider')] + public function testBack(NDArray $input, NDArray $prevGrad, array $expected) : void + { + $this->layer->initialize($this->fanIn); + + // Forward pass to set internal input/output state + $this->layer->forward($input); + + $gradient = $this->layer + ->back(prevGradient: new Deferred(fn: fn () => $prevGrad), optimizer: $this->optimizer) + ->compute(); + + self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes inference activations')] + #[DataProvider('forwardProvider')] + public function testInfer(NDArray $input, array $expected) : void + { + $this->layer->initialize($this->fanIn); + + $infer = $this->layer->infer($input); + self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes gradient correctly given input, output, and previous gradient')] + #[DataProvider('backProvider')] + public function testGradient(NDArray $input, NDArray $prevGrad, array $expected) : void + { + $this->layer->initialize($this->fanIn); + + // Produce output to pass explicitly to gradient + $output = $this->layer->forward($input); + + $gradient = $this->layer->gradient( + $input, + $output, + new Deferred(fn: fn () => $prevGrad) + ); + + self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); + } +} From 14a2b6f51d142a339a69fa9492a354138133611f Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sun, 7 Dec 2025 00:41:57 +0200 Subject: [PATCH 25/42] ML-394 Implemented `BatchNorm` layer with comprehensive unit tests and updated documentation with fixed source file link. Added `Parametric` interface to define parameterized layers. --- .../hidden-layers/batch-norm.md | 10 +- .../Layers/Base/Contracts/Parametric.php | 33 ++ src/NeuralNet/Layers/BatchNorm/BatchNorm.php | 424 ++++++++++++++++++ .../Layers/BatchNorm/BatchNormTest.php | 103 +++++ 4 files changed, 565 insertions(+), 5 deletions(-) create mode 100644 src/NeuralNet/Layers/Base/Contracts/Parametric.php create mode 100644 src/NeuralNet/Layers/BatchNorm/BatchNorm.php create mode 100644 tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php diff --git a/docs/neural-network/hidden-layers/batch-norm.md b/docs/neural-network/hidden-layers/batch-norm.md index 99fdefd22..373113e14 100644 --- a/docs/neural-network/hidden-layers/batch-norm.md +++ b/docs/neural-network/hidden-layers/batch-norm.md @@ -1,4 +1,4 @@ -[source] +[source] # Batch Norm Batch Norm layers normalize the activations of the previous layer such that the mean activation is *close* to 0 and the standard deviation is *close* to 1. Adding Batch Norm reduces the amount of covariate shift within the network which makes it possible to use higher learning rates and thus converge faster under some circumstances. @@ -12,12 +12,12 @@ Batch Norm layers normalize the activations of the previous layer such that the ## Example ```php -use Rubix\ML\NeuralNet\Layers\BatchNorm; -use Rubix\ML\NeuralNet\Initializers\Constant; -use Rubix\ML\NeuralNet\Initializers\Normal; +use Rubix\ML\NeuralNet\Layers\BatchNorm\BatchNorm; +use Rubix\ML\NeuralNet\Initializers\Constant\Constant; +use Rubix\ML\NeuralNet\Initializers\Normal\Normal; $layer = new BatchNorm(0.7, new Constant(0.), new Normal(1.)); ``` ## References -[^1]: S. Ioffe et al. (2015). Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift. \ No newline at end of file +[^1]: S. Ioffe et al. (2015). Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift. diff --git a/src/NeuralNet/Layers/Base/Contracts/Parametric.php b/src/NeuralNet/Layers/Base/Contracts/Parametric.php new file mode 100644 index 000000000..ed772c85d --- /dev/null +++ b/src/NeuralNet/Layers/Base/Contracts/Parametric.php @@ -0,0 +1,33 @@ + + */ +interface Parametric +{ + /** + * Return the parameters of the layer. + * + * @return Generator<\Rubix\ML\NeuralNet\Parameter> + */ + public function parameters() : Generator; + + /** + * Restore the parameters on the layer from an associative array. + * + * @param Parameter[] $parameters + */ + public function restore(array $parameters) : void; +} diff --git a/src/NeuralNet/Layers/BatchNorm/BatchNorm.php b/src/NeuralNet/Layers/BatchNorm/BatchNorm.php new file mode 100644 index 000000000..98c401f48 --- /dev/null +++ b/src/NeuralNet/Layers/BatchNorm/BatchNorm.php @@ -0,0 +1,424 @@ + 1.0) { + throw new InvalidArgumentException("Decay must be between 0 and 1, $decay given."); + } + + $this->decay = $decay; + $this->betaInitializer = $betaInitializer ?? new Constant(0.0); + $this->gammaInitializer = $gammaInitializer ?? new Constant(1.0); + } + + /** + * Return the width of the layer. + * + * @internal + * + * @throws RuntimeException + * @return positive-int + */ + public function width() : int + { + if ($this->width === null) { + throw new RuntimeException('Layer has not been initialized.'); + } + + return $this->width; + } + + /** + * Initialize the layer with the fan in from the previous layer and return + * the fan out for this layer. + * + * @internal + * + * @param positive-int $fanIn + * @return positive-int + */ + public function initialize(int $fanIn) : int + { + $fanOut = $fanIn; + + // Initialize beta and gamma as vectors of length fanOut + // We request a [fanOut, 1] NDArray and then flatten to 1-D + $betaMat = $this->betaInitializer->initialize(1, $fanOut); + $gammaMat = $this->gammaInitializer->initialize(1, $fanOut); + + $beta = NumPower::flatten($betaMat); + $gamma = NumPower::flatten($gammaMat); + + $this->beta = new Parameter($beta); + $this->gamma = new Parameter($gamma); + + $this->width = $fanOut; + + return $fanOut; + } + + /** + * Compute a forward pass through the layer. + * + * @internal + * + * @param NDArray $input + * @throws RuntimeException + * @return NDArray + */ + public function forward(NDArray $input) : NDArray + { + if (!$this->beta or !$this->gamma) { + throw new RuntimeException('Layer has not been initialized.'); + } + + $rows = $input->shape()[0]; + $meanArr = []; + $varArr = []; + $stdInvArr = []; + + for ($i = 0; $i < $rows; $i++) { + $meanArr[$i] = NumPower::mean($input->toArray()[$i]); + $varArr[$i] = NumPower::variance($input->toArray()[$i]); + $stdInvArr[$i] = 1.0 / sqrt($varArr[$i]); + } + + $mean = NumPower::array($meanArr); + + $variance = NumPower::array($varArr); + $variance = NumPower::clip($variance, EPSILON, PHP_FLOAT_MAX); + + $stdInv = NumPower::array($stdInvArr); + + $xHat = NumPower::multiply( + NumPower::subtract(NumPower::transpose($input, [1, 0]), $mean), + $stdInv + ); + $xHat = NumPower::transpose($xHat, [1, 0]); + + // Initialize running stats if needed + if (!$this->mean or !$this->variance) { + $this->mean = $mean; + $this->variance = $variance; + } + + // Update running mean/variance: running = running*(1-decay) + current*decay + $this->mean = NumPower::add( + NumPower::multiply($this->mean, 1.0 - $this->decay), + NumPower::multiply($mean, $this->decay) + ); + + $this->variance = NumPower::add( + NumPower::multiply($this->variance, 1.0 - $this->decay), + NumPower::multiply($variance, $this->decay) + ); + + $this->stdInv = $stdInv; + $this->xHat = $xHat; + + // gamma * xHat + beta (per-column scale/shift) using NDArray ops + return NumPower::add(NumPower::multiply($xHat, $this->gamma->param()), $this->beta->param()); + } + + /** + * Compute an inferential pass through the layer. + * + * @internal + * + * @param NDArray $input + * @throws RuntimeException + * @return NDArray + */ + public function infer(NDArray $input) : NDArray + { + if (!$this->mean or !$this->variance or !$this->beta or !$this->gamma) { + throw new RuntimeException('Layer has not been initialized.'); + } + + $xHat = NumPower::divide( + NumPower::subtract($input, $this->mean), + NumPower::sqrt($this->variance) + ); + + + $return = NumPower::add( + NumPower::multiply( + $xHat, + $this->gamma->param() + ), + $this->beta->param() + ); + //pp("xxxxxxxxxxxxxxxxxxxxxxxxxx", $return->toArray()); + + return $return; + } + + /** + * Calculate the errors and gradients of the layer and update the parameters. + * + * @internal + * + * @param Deferred $prevGradient + * @param Optimizer $optimizer + * @throws RuntimeException + * @return Deferred + */ + public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred + { + if (!$this->beta or !$this->gamma) { + throw new RuntimeException('Layer has not been initialized.'); + } + + if (!$this->stdInv or !$this->xHat) { + throw new RuntimeException('Must perform forward pass before' + . ' backpropagating.'); + } + + $dOut = $prevGradient(); +// pp('New dOut: ', $dOut->toArray()); + + $dBeta = NumPower::sum($dOut, 1); +// pp('New dBeta: ', $dBeta->toArray()); + + $dGamma = NumPower::sum(NumPower::multiply($dOut, $this->xHat), 1); +// pp('New dGamma: ', $dGamma->toArray()); + + $gamma = $this->gamma->param(); + //pp('New Gamma: ', $gamma->toArray()); + + $this->beta->update($dBeta, $optimizer); + $this->gamma->update($dGamma, $optimizer); + + $stdInv = $this->stdInv; + $xHat = $this->xHat; + + $this->stdInv = $this->xHat = null; + + $return = new Deferred( + [$this, 'gradient'], + [$dOut, $gamma, $stdInv, $xHat] + ); + + //pp('New back: ', $dOut->toArray(), $gamma->toArray(), $stdInv->toArray(), $xHat->toArray(), end: "\n"); + + return $return; + } + + /** + * Calculate the gradient for the previous layer. + * + * @internal + * + * @param NDArray $dOut + * @param NDArray $gamma + * @param NDArray $stdInv + * @param NDArray $xHat + * @return NDArray + */ + public function gradient(NDArray $dOut, NDArray $gamma, NDArray $stdInv, NDArray $xHat) : NDArray + { + // Implement the same formula using PHP arrays + $dOutArr = $dOut->toArray(); + $gammaArr = $gamma->toArray(); // 1-D length n + $stdInvArr = $stdInv->toArray(); // 1-D length n + $xHatArr = $xHat->toArray(); // [m, n] + + $m = count($dOutArr); + $n = $m > 0 ? count($dOutArr[0]) : 0; + + // dXHat = dOut * gamma (per column) + $dXHatArr = []; + for ($i = 0; $i < $m; $i++) { + $row = []; + for ($j = 0; $j < $n; $j++) { + $row[] = $dOutArr[$i][$j] * $gammaArr[$j]; + } + $dXHatArr[] = $row; + } + + // xHatSigma = sum(dXHat * xHat) per column + $xHatSigma = array_fill(0, $n, 0.0); + $dXHatSigma = array_fill(0, $n, 0.0); + for ($j = 0; $j < $n; $j++) { + $sum1 = 0.0; + $sum2 = 0.0; + for ($i = 0; $i < $m; $i++) { + $sum1 += $dXHatArr[$i][$j] * $xHatArr[$i][$j]; + $sum2 += $dXHatArr[$i][$j]; + } + $xHatSigma[$j] = $sum1; + $dXHatSigma[$j] = $sum2; + } + + // Compute gradient for previous layer per formula: + // dX = (dXHat * m - dXHatSigma - xHat * xHatSigma) * (stdInv / m) + $dXArr = []; + for ($i = 0; $i < $m; $i++) { + $row = []; + for ($j = 0; $j < $n; $j++) { + $val = ($dXHatArr[$i][$j] * $m) + - $dXHatSigma[$j] + - ($xHatArr[$i][$j] * $xHatSigma[$j]); + $row[] = $val * ($stdInvArr[$j] / ($m ?: 1)); + } + $dXArr[] = $row; + } + + return NumPower::array($dXArr); + } + + /** + * Return the parameters of the layer. + * + * @internal + * + * @throws RuntimeException + * @return Generator + */ + public function parameters() : Generator + { + if (!$this->beta or !$this->gamma) { + throw new RuntimeException('Layer has not been initialized.'); + } + + yield 'beta' => $this->beta; + yield 'gamma' => $this->gamma; + } + + /** + * Restore the parameters in the layer from an associative array. + * + * @internal + * + * @param Parameter[] $parameters + */ + public function restore(array $parameters) : void + { + $this->beta = $parameters['beta']; + $this->gamma = $parameters['gamma']; + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Batch Norm (decay: {$this->decay}, beta initializer: {$this->betaInitializer}," + . " gamma initializer: {$this->gammaInitializer})"; + } +} diff --git a/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php b/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php new file mode 100644 index 000000000..ad5fcdc07 --- /dev/null +++ b/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php @@ -0,0 +1,103 @@ +fanIn = 3; + + $this->input = NumPower::array([ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ]); + + $this->prevGrad = new Deferred(fn: function () : NDArray { + return NumPower::array([ + [0.25, 0.7, 0.1], + [0.50, 0.2, 0.01], + [0.25, 0.1, 0.89], + ]); + }); + + $this->optimizer = new Stochastic(0.001); + + $this->layer = new BatchNorm( + decay: 0.9, + betaInitializer: new Constant(0.0), + gammaInitializer: new Constant(1.0) + ); + } + + public function testInitializeForwardBackInfer() : void + { + $this->layer->initialize($this->fanIn); + + self::assertEquals($this->fanIn, $this->layer->width()); + + $expected = [ + [-0.1251222, 1.2825030, -1.1573808], + [-0.6708631, -0.7427414, 1.4136046], + [0.7974157, -1.4101899, 0.6127743], + ]; + + $forward = $this->layer->forward($this->input); + + self::assertEqualsWithDelta($expected, $forward->toArray(), 1e-7); + + $gradient = $this->layer->back( + prevGradient: $this->prevGrad, + optimizer: $this->optimizer + )->compute(); + + $expected = [ + [-0.06445877134888621, 0.027271018647605647, 0.03718775270128047], + [0.11375900761901864, -0.10996704069838469, -0.0037919669206339162], + [-0.11909780311643131, -0.01087038130262698, 0.1299681844190583], + ]; + + self::assertInstanceOf(NDArray::class, $gradient); + self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); + +// $expected = [ +// [-0.1260783, 1.2804902385302876, -1.1575619225761131], +// [-0.6718883801743488, -0.7438003494787433, 1.4135587296530918], +// [0.7956943312039361, -1.4105786650534555, 0.6111643338495193], +// ]; +// +// $infer = $this->layer->infer($this->input); +// +// self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); +// self::assertTrue(true); + } +} From 46e101b00b689352c8b919787fbd36ccd1d18d18 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sun, 7 Dec 2025 18:17:21 +0200 Subject: [PATCH 26/42] ML-395 Refactored `BatchNorm` layer to improve row/column normalization support --- src/NeuralNet/Layers/BatchNorm/BatchNorm.php | 99 +++---- .../Layers/BatchNorm/BatchNormTest.php | 247 ++++++++++++++++-- 2 files changed, 258 insertions(+), 88 deletions(-) diff --git a/src/NeuralNet/Layers/BatchNorm/BatchNorm.php b/src/NeuralNet/Layers/BatchNorm/BatchNorm.php index 98c401f48..b7c170abb 100644 --- a/src/NeuralNet/Layers/BatchNorm/BatchNorm.php +++ b/src/NeuralNet/Layers/BatchNorm/BatchNorm.php @@ -105,6 +105,13 @@ class BatchNorm implements Hidden, Parametric */ protected ?NDArray $xHat = null; + /** + * Row-wise or column-wise normalization. + * + * @var int + */ + protected const int COLUMN_WISE = 1; + /** * @param float $decay * @param Initializer|null $betaInitializer @@ -246,20 +253,21 @@ public function infer(NDArray $input) : NDArray throw new RuntimeException('Layer has not been initialized.'); } + // Number of rows + $m = $input->shape()[0]; + $xHat = NumPower::divide( - NumPower::subtract($input, $this->mean), - NumPower::sqrt($this->variance) + NumPower::subtract($input, NumPower::reshape($this->mean, [$m, 1])), + NumPower::reshape(NumPower::sqrt($this->variance), [$m, 1]) ); - - $return = NumPower::add( + return NumPower::add( NumPower::multiply( $xHat, $this->gamma->param() ), $this->beta->param() ); - //pp("xxxxxxxxxxxxxxxxxxxxxxxxxx", $return->toArray()); return $return; } @@ -286,16 +294,9 @@ public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred } $dOut = $prevGradient(); -// pp('New dOut: ', $dOut->toArray()); - - $dBeta = NumPower::sum($dOut, 1); -// pp('New dBeta: ', $dBeta->toArray()); - - $dGamma = NumPower::sum(NumPower::multiply($dOut, $this->xHat), 1); -// pp('New dGamma: ', $dGamma->toArray()); - + $dBeta = NumPower::sum($dOut, self::COLUMN_WISE); + $dGamma = NumPower::sum(NumPower::multiply($dOut, $this->xHat), self::COLUMN_WISE); $gamma = $this->gamma->param(); - //pp('New Gamma: ', $gamma->toArray()); $this->beta->update($dBeta, $optimizer); $this->gamma->update($dGamma, $optimizer); @@ -305,13 +306,11 @@ public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred $this->stdInv = $this->xHat = null; - $return = new Deferred( + return new Deferred( [$this, 'gradient'], [$dOut, $gamma, $stdInv, $xHat] ); - //pp('New back: ', $dOut->toArray(), $gamma->toArray(), $stdInv->toArray(), $xHat->toArray(), end: "\n"); - return $return; } @@ -328,54 +327,26 @@ public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred */ public function gradient(NDArray $dOut, NDArray $gamma, NDArray $stdInv, NDArray $xHat) : NDArray { - // Implement the same formula using PHP arrays - $dOutArr = $dOut->toArray(); - $gammaArr = $gamma->toArray(); // 1-D length n - $stdInvArr = $stdInv->toArray(); // 1-D length n - $xHatArr = $xHat->toArray(); // [m, n] - - $m = count($dOutArr); - $n = $m > 0 ? count($dOutArr[0]) : 0; - - // dXHat = dOut * gamma (per column) - $dXHatArr = []; - for ($i = 0; $i < $m; $i++) { - $row = []; - for ($j = 0; $j < $n; $j++) { - $row[] = $dOutArr[$i][$j] * $gammaArr[$j]; - } - $dXHatArr[] = $row; - } - - // xHatSigma = sum(dXHat * xHat) per column - $xHatSigma = array_fill(0, $n, 0.0); - $dXHatSigma = array_fill(0, $n, 0.0); - for ($j = 0; $j < $n; $j++) { - $sum1 = 0.0; - $sum2 = 0.0; - for ($i = 0; $i < $m; $i++) { - $sum1 += $dXHatArr[$i][$j] * $xHatArr[$i][$j]; - $sum2 += $dXHatArr[$i][$j]; - } - $xHatSigma[$j] = $sum1; - $dXHatSigma[$j] = $sum2; - } - - // Compute gradient for previous layer per formula: - // dX = (dXHat * m - dXHatSigma - xHat * xHatSigma) * (stdInv / m) - $dXArr = []; - for ($i = 0; $i < $m; $i++) { - $row = []; - for ($j = 0; $j < $n; $j++) { - $val = ($dXHatArr[$i][$j] * $m) - - $dXHatSigma[$j] - - ($xHatArr[$i][$j] * $xHatSigma[$j]); - $row[] = $val * ($stdInvArr[$j] / ($m ?: 1)); - } - $dXArr[] = $row; - } + $dXHat = NumPower::multiply($dOut, $gamma); + $xHatSigma = NumPower::sum(NumPower::multiply($dXHat, $xHat), self::COLUMN_WISE); + $dXHatSigma = NumPower::sum($dXHat, self::COLUMN_WISE); + + // Number of rows + $m = $dOut->shape()[0]; + + // Compute gradient per formula: dX = (dXHat * m - dXHatSigma - xHat * xHatSigma) * (stdInv / m) + return NumPower::multiply( + NumPower::subtract( + NumPower::subtract( + NumPower::multiply($dXHat, $m), + NumPower::reshape($dXHatSigma, [$m, 1]) + ), + NumPower::multiply($xHat, NumPower::reshape($xHatSigma, [$m, 1])) + ), + NumPower::reshape(NumPower::divide($stdInv, $m), [$m, 1]) + ); - return NumPower::array($dXArr); + return $return; } /** diff --git a/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php b/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php index ad5fcdc07..9f05ab47b 100644 --- a/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php +++ b/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php @@ -8,11 +8,17 @@ use PHPUnit\Framework\Attributes\Group; use NDArray; use NumPower; +use PHPUnit\Framework\Attributes\Test; +use PHPUnit\Framework\Attributes\TestDox; +use PHPUnit\Framework\Attributes\DataProvider; use Rubix\ML\Deferred; use Rubix\ML\NeuralNet\Layers\BatchNorm\BatchNorm; use Rubix\ML\NeuralNet\Optimizers\Base\Optimizer; use Rubix\ML\NeuralNet\Optimizers\Stochastic\Stochastic; use Rubix\ML\NeuralNet\Initializers\Constant\Constant; +use Rubix\ML\NeuralNet\Parameters\Parameter as TrainableParameter; +use Rubix\ML\Exceptions\InvalidArgumentException; +use Rubix\ML\Exceptions\RuntimeException as RubixRuntimeException; use PHPUnit\Framework\TestCase; #[Group('Layers')] @@ -32,6 +38,83 @@ class BatchNormTest extends TestCase protected BatchNorm $layer; + /** + * @return array + */ + public static function initializeProvider() : array + { + return [ + 'fanIn=3' => [3], + ]; + } + + /** + * @return array + */ + public static function forwardProvider() : array + { + return [ + 'expectedForward' => [[ + [-0.1251222, 1.2825030, -1.1573808], + [-0.6708631, -0.7427414, 1.4136046], + [0.7974157, -1.4101899, 0.6127743], + ]], + ]; + } + + /** + * @return array + */ + public static function backProvider() : array + { + return [ + 'expectedGradient' => [[ + [-0.0644587, 0.0272710, 0.0371877], + [0.1137590, -0.1099670, -0.0037919], + [-0.1190978, -0.0108703, 0.1299681], + ]], + ]; + } + + /** + * @return array + */ + public static function inferProvider() : array + { + return [ + 'expectedInfer' => [[ + [-0.1251222, 1.2825031, -1.1573808], + [-0.6708631, -0.7427414, 1.4136046], + [0.7974158, -1.4101899, 0.6127743], + ]], + ]; + } + + /** + * @return array + */ + public static function gradientProvider() : array + { + return [ + 'expectedGradient' => [[ + [-0.0644587, 0.0272710, 0.0371877], + [0.1137590, -0.1099670, -0.0037919], + [-0.1190978, -0.0108703, 0.1299681], + ]], + ]; + } + + /** + * @return array + */ + public static function badDecayProvider() : array + { + return [ + 'negative' => [-0.01], + 'greaterThanOne' => [1.01], + ]; + } + protected function setUp() : void { $this->fanIn = 3; @@ -59,45 +142,161 @@ protected function setUp() : void ); } - public function testInitializeForwardBackInfer() : void + #[Test] + #[TestDox('Can be cast to a string')] + public function testToString() : void { - $this->layer->initialize($this->fanIn); + self::assertEquals( + 'Batch Norm (decay: 0.9, beta initializer: Constant (value: 0), gamma initializer: Constant (value: 1))', + (string) $this->layer + ); + } - self::assertEquals($this->fanIn, $this->layer->width()); + #[Test] + #[TestDox('Initializes width and returns fan out')] + #[DataProvider('initializeProvider')] + public function testInitialize(int $fanIn) : void + { + $fanOut = $this->layer->initialize($fanIn); + self::assertEquals($fanIn, $fanOut); + self::assertEquals($fanIn, $this->layer->width()); + } - $expected = [ - [-0.1251222, 1.2825030, -1.1573808], - [-0.6708631, -0.7427414, 1.4136046], - [0.7974157, -1.4101899, 0.6127743], - ]; + #[Test] + #[TestDox('Computes forward pass')] + #[DataProvider('forwardProvider')] + public function testForward(array $expected) : void + { + $this->layer->initialize($this->fanIn); $forward = $this->layer->forward($this->input); self::assertEqualsWithDelta($expected, $forward->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Backpropagates and returns gradient for previous layer')] + #[DataProvider('backProvider')] + public function testBack(array $expected) : void + { + $this->layer->initialize($this->fanIn); + $this->layer->forward($this->input); $gradient = $this->layer->back( prevGradient: $this->prevGrad, optimizer: $this->optimizer )->compute(); - $expected = [ - [-0.06445877134888621, 0.027271018647605647, 0.03718775270128047], - [0.11375900761901864, -0.10996704069838469, -0.0037919669206339162], - [-0.11909780311643131, -0.01087038130262698, 0.1299681844190583], - ]; - self::assertInstanceOf(NDArray::class, $gradient); self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Infers using running statistics')] + #[DataProvider('inferProvider')] + public function testInfer(array $expected) : void + { + $this->layer->initialize($this->fanIn); + // Perform a forward pass to set running mean/variance + $this->layer->forward($this->input); + + $infer = $this->layer->infer($this->input); + + self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Throws when width is requested before initialization')] + public function testWidthThrowsBeforeInitialize() : void + { + $layer = new BatchNorm(); + $this->expectException(RubixRuntimeException::class); + $layer->width(); + } + + #[Test] + #[TestDox('Constructor rejects invalid decay values')] + #[DataProvider('badDecayProvider')] + public function testConstructorRejectsInvalidDecay(float $decay) : void + { + $this->expectException(InvalidArgumentException::class); + new BatchNorm(decay: $decay); + } + + #[Test] + #[TestDox('Yields trainable parameters beta and gamma')] + public function testParameters() : void + { + $this->layer->initialize($this->fanIn); + + $params = iterator_to_array($this->layer->parameters()); -// $expected = [ -// [-0.1260783, 1.2804902385302876, -1.1575619225761131], -// [-0.6718883801743488, -0.7438003494787433, 1.4135587296530918], -// [0.7956943312039361, -1.4105786650534555, 0.6111643338495193], -// ]; -// -// $infer = $this->layer->infer($this->input); -// -// self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); -// self::assertTrue(true); + self::assertArrayHasKey('beta', $params); + self::assertArrayHasKey('gamma', $params); + self::assertInstanceOf(TrainableParameter::class, $params['beta']); + self::assertInstanceOf(TrainableParameter::class, $params['gamma']); + + self::assertEquals([0.0, 0.0, 0.0], $params['beta']->param()->toArray()); + self::assertEquals([1.0, 1.0, 1.0], $params['gamma']->param()->toArray()); + } + + #[Test] + #[TestDox('Restores parameters from array')] + public function testRestore() : void + { + $this->layer->initialize($this->fanIn); + + $betaNew = new TrainableParameter(NumPower::full([3], 2.0)); + $gammaNew = new TrainableParameter(NumPower::full([3], 3.0)); + + $this->layer->restore([ + 'beta' => $betaNew, + 'gamma' => $gammaNew, + ]); + + $restored = iterator_to_array($this->layer->parameters()); + self::assertSame($betaNew, $restored['beta']); + self::assertSame($gammaNew, $restored['gamma']); + self::assertEquals([2.0, 2.0, 2.0], $restored['beta']->param()->toArray()); + self::assertEquals([3.0, 3.0, 3.0], $restored['gamma']->param()->toArray()); + } + + #[Test] + #[TestDox('Computes gradient for previous layer directly')] + #[DataProvider('gradientProvider')] + public function testGradient(array $expected) : void + { + $this->layer->initialize($this->fanIn); + + // Compute forward-time caches manually to pass into gradient() + $input = $this->input; + $rows = $input->shape()[0]; + $meanArr = []; + $varArr = []; + $stdInvArr = []; + + for ($i = 0; $i < $rows; $i++) { + $row = $input->toArray()[$i]; + $meanArr[$i] = NumPower::mean($row); + $varArr[$i] = NumPower::variance($row); + $stdInvArr[$i] = 1.0 / sqrt($varArr[$i]); + } + + $mean = NumPower::array($meanArr); + $stdInv = NumPower::array($stdInvArr); + + $xHat = NumPower::multiply( + NumPower::subtract(NumPower::transpose($input, [1, 0]), $mean), + $stdInv + ); + $xHat = NumPower::transpose($xHat, [1, 0]); + + // Use provided prevGrad as dOut and current gamma parameter + $dOut = ($this->prevGrad)(); + $gamma = iterator_to_array($this->layer->parameters())['gamma']->param(); + + $gradient = $this->layer->gradient($dOut, $gamma, $stdInv, $xHat); + + self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); } } From a44e86a9d4aa2f7e098861a094bf2b13d47d3e21 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sun, 7 Dec 2025 19:01:46 +0200 Subject: [PATCH 27/42] ML-396 Refactored `BatchNorm` layer to optimize normalization logic with `NumPower` utilities --- src/NeuralNet/Layers/BatchNorm/BatchNorm.php | 34 +++++++++---------- .../Layers/BatchNorm/BatchNormTest.php | 4 +-- 2 files changed, 18 insertions(+), 20 deletions(-) diff --git a/src/NeuralNet/Layers/BatchNorm/BatchNorm.php b/src/NeuralNet/Layers/BatchNorm/BatchNorm.php index b7c170abb..1e29a81ec 100644 --- a/src/NeuralNet/Layers/BatchNorm/BatchNorm.php +++ b/src/NeuralNet/Layers/BatchNorm/BatchNorm.php @@ -190,29 +190,27 @@ public function forward(NDArray $input) : NDArray throw new RuntimeException('Layer has not been initialized.'); } - $rows = $input->shape()[0]; - $meanArr = []; - $varArr = []; - $stdInvArr = []; - - for ($i = 0; $i < $rows; $i++) { - $meanArr[$i] = NumPower::mean($input->toArray()[$i]); - $varArr[$i] = NumPower::variance($input->toArray()[$i]); - $stdInvArr[$i] = 1.0 / sqrt($varArr[$i]); - } + // Shape: [m, n] + [$m, $n] = $input->shape(); + + // Row-wise mean across features (axis 1), length m + $sum = NumPower::sum($input, 1); + $mean = NumPower::divide($sum, $n); - $mean = NumPower::array($meanArr); + // Center the input: broadcast mean to [m, n] + $centered = NumPower::subtract($input, NumPower::reshape($mean, [$m, 1])); - $variance = NumPower::array($varArr); + // Row-wise variance across features (axis 1) + $centeredSq = NumPower::multiply($centered, $centered); + $varSum = NumPower::sum($centeredSq, 1); + $variance = NumPower::divide($varSum, $n); $variance = NumPower::clip($variance, EPSILON, PHP_FLOAT_MAX); - $stdInv = NumPower::array($stdInvArr); + // Inverse std from clipped variance + $stdInv = NumPower::reciprocal(NumPower::sqrt($variance)); - $xHat = NumPower::multiply( - NumPower::subtract(NumPower::transpose($input, [1, 0]), $mean), - $stdInv - ); - $xHat = NumPower::transpose($xHat, [1, 0]); + // Normalize: (x - mean) * stdInv + $xHat = NumPower::multiply($centered, NumPower::reshape($stdInv, [$m, 1])); // Initialize running stats if needed if (!$this->mean or !$this->variance) { diff --git a/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php b/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php index 9f05ab47b..4912857e3 100644 --- a/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php +++ b/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php @@ -55,9 +55,9 @@ public static function forwardProvider() : array { return [ 'expectedForward' => [[ - [-0.1251222, 1.2825030, -1.1573808], + [-0.1251222, 1.2825031, -1.1573808], [-0.6708631, -0.7427414, 1.4136046], - [0.7974157, -1.4101899, 0.6127743], + [0.7974158, -1.4101899, 0.6127743], ]], ]; } From 5a3f5a718f2742ca9f7adcd66e6d5edfbbf5f405 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sun, 7 Dec 2025 19:10:52 +0200 Subject: [PATCH 28/42] ML-397 Enhanced `BatchNorm` layer with improved axis constants, numerical stability during inference, and gradient computation logic --- src/NeuralNet/Layers/BatchNorm/BatchNorm.php | 28 +++++++++----------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/src/NeuralNet/Layers/BatchNorm/BatchNorm.php b/src/NeuralNet/Layers/BatchNorm/BatchNorm.php index 1e29a81ec..7b786b1f0 100644 --- a/src/NeuralNet/Layers/BatchNorm/BatchNorm.php +++ b/src/NeuralNet/Layers/BatchNorm/BatchNorm.php @@ -110,7 +110,8 @@ class BatchNorm implements Hidden, Parametric * * @var int */ - protected const int COLUMN_WISE = 1; + protected const int AXIS_SAMPLES = 0; + protected const int AXIS_FEATURES = 1; /** * @param float $decay @@ -218,7 +219,8 @@ public function forward(NDArray $input) : NDArray $this->variance = $variance; } - // Update running mean/variance: running = running*(1-decay) + current*decay + // Update running mean/variance using exponential moving average (EMA) + // Convention: running = running*(1 - decay) + current*decay $this->mean = NumPower::add( NumPower::multiply($this->mean, 1.0 - $this->decay), NumPower::multiply($mean, $this->decay) @@ -254,9 +256,11 @@ public function infer(NDArray $input) : NDArray // Number of rows $m = $input->shape()[0]; + // Use clipped variance for numerical stability during inference + $varianceClipped = NumPower::clip($this->variance, EPSILON, PHP_FLOAT_MAX); $xHat = NumPower::divide( NumPower::subtract($input, NumPower::reshape($this->mean, [$m, 1])), - NumPower::reshape(NumPower::sqrt($this->variance), [$m, 1]) + NumPower::reshape(NumPower::sqrt($varianceClipped), [$m, 1]) ); return NumPower::add( @@ -266,8 +270,6 @@ public function infer(NDArray $input) : NDArray ), $this->beta->param() ); - - return $return; } /** @@ -287,13 +289,13 @@ public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred } if (!$this->stdInv or !$this->xHat) { - throw new RuntimeException('Must perform forward pass before' - . ' backpropagating.'); + throw new RuntimeException('Must perform forward pass before backpropagating.'); } $dOut = $prevGradient(); - $dBeta = NumPower::sum($dOut, self::COLUMN_WISE); - $dGamma = NumPower::sum(NumPower::multiply($dOut, $this->xHat), self::COLUMN_WISE); + // Sum across samples (axis 0) for parameter gradients + $dBeta = NumPower::sum($dOut, self::AXIS_SAMPLES); + $dGamma = NumPower::sum(NumPower::multiply($dOut, $this->xHat), self::AXIS_SAMPLES); $gamma = $this->gamma->param(); $this->beta->update($dBeta, $optimizer); @@ -308,8 +310,6 @@ public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred [$this, 'gradient'], [$dOut, $gamma, $stdInv, $xHat] ); - - return $return; } /** @@ -326,8 +326,8 @@ public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred public function gradient(NDArray $dOut, NDArray $gamma, NDArray $stdInv, NDArray $xHat) : NDArray { $dXHat = NumPower::multiply($dOut, $gamma); - $xHatSigma = NumPower::sum(NumPower::multiply($dXHat, $xHat), self::COLUMN_WISE); - $dXHatSigma = NumPower::sum($dXHat, self::COLUMN_WISE); + $xHatSigma = NumPower::sum(NumPower::multiply($dXHat, $xHat), self::AXIS_FEATURES); + $dXHatSigma = NumPower::sum($dXHat, self::AXIS_FEATURES); // Number of rows $m = $dOut->shape()[0]; @@ -343,8 +343,6 @@ public function gradient(NDArray $dOut, NDArray $gamma, NDArray $stdInv, NDArray ), NumPower::reshape(NumPower::divide($stdInv, $m), [$m, 1]) ); - - return $return; } /** From ec3c2362ebc7c20511944b7eeb76b491e7d8e602 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sun, 7 Dec 2025 19:16:16 +0200 Subject: [PATCH 29/42] ML-397 Enhanced `BatchNorm` layer with improved axis constants, numerical stability during inference, and gradient computation logic --- src/NeuralNet/Layers/BatchNorm/BatchNorm.php | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/src/NeuralNet/Layers/BatchNorm/BatchNorm.php b/src/NeuralNet/Layers/BatchNorm/BatchNorm.php index 7b786b1f0..40eafbf3a 100644 --- a/src/NeuralNet/Layers/BatchNorm/BatchNorm.php +++ b/src/NeuralNet/Layers/BatchNorm/BatchNorm.php @@ -190,8 +190,7 @@ public function forward(NDArray $input) : NDArray if (!$this->beta or !$this->gamma) { throw new RuntimeException('Layer has not been initialized.'); } - - // Shape: [m, n] + [$m, $n] = $input->shape(); // Row-wise mean across features (axis 1), length m @@ -253,8 +252,7 @@ public function infer(NDArray $input) : NDArray throw new RuntimeException('Layer has not been initialized.'); } - // Number of rows - $m = $input->shape()[0]; + [$m, $n] = $input->shape(); // Use clipped variance for numerical stability during inference $varianceClipped = NumPower::clip($this->variance, EPSILON, PHP_FLOAT_MAX); @@ -329,8 +327,7 @@ public function gradient(NDArray $dOut, NDArray $gamma, NDArray $stdInv, NDArray $xHatSigma = NumPower::sum(NumPower::multiply($dXHat, $xHat), self::AXIS_FEATURES); $dXHatSigma = NumPower::sum($dXHat, self::AXIS_FEATURES); - // Number of rows - $m = $dOut->shape()[0]; + [$m, $n] = $dOut->shape(); // Compute gradient per formula: dX = (dXHat * m - dXHatSigma - xHat * xHatSigma) * (stdInv / m) return NumPower::multiply( From 00955f981ec72df1d5fc2580a95bbd481131b3c3 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sun, 7 Dec 2025 19:43:32 +0200 Subject: [PATCH 30/42] ML-398 Improved `BatchNorm` behavior for varying batch sizes with additional tests and updated shape handling --- src/NeuralNet/Layers/BatchNorm/BatchNorm.php | 7 +- .../Layers/BatchNorm/BatchNormTest.php | 90 +++++++++++++++++++ 2 files changed, 94 insertions(+), 3 deletions(-) diff --git a/src/NeuralNet/Layers/BatchNorm/BatchNorm.php b/src/NeuralNet/Layers/BatchNorm/BatchNorm.php index 40eafbf3a..a15b1fac5 100644 --- a/src/NeuralNet/Layers/BatchNorm/BatchNorm.php +++ b/src/NeuralNet/Layers/BatchNorm/BatchNorm.php @@ -32,6 +32,7 @@ * @category Machine Learning * @package Rubix/ML * @author Andrew DalPino + * @author Samuel Akopyan */ class BatchNorm implements Hidden, Parametric { @@ -190,7 +191,7 @@ public function forward(NDArray $input) : NDArray if (!$this->beta or !$this->gamma) { throw new RuntimeException('Layer has not been initialized.'); } - + [$m, $n] = $input->shape(); // Row-wise mean across features (axis 1), length m @@ -252,7 +253,7 @@ public function infer(NDArray $input) : NDArray throw new RuntimeException('Layer has not been initialized.'); } - [$m, $n] = $input->shape(); + $m = $input->shape()[0]; // Use clipped variance for numerical stability during inference $varianceClipped = NumPower::clip($this->variance, EPSILON, PHP_FLOAT_MAX); @@ -327,7 +328,7 @@ public function gradient(NDArray $dOut, NDArray $gamma, NDArray $stdInv, NDArray $xHatSigma = NumPower::sum(NumPower::multiply($dXHat, $xHat), self::AXIS_FEATURES); $dXHatSigma = NumPower::sum($dXHat, self::AXIS_FEATURES); - [$m, $n] = $dOut->shape(); + $m = $dOut->shape()[0]; // Compute gradient per formula: dX = (dXHat * m - dXHatSigma - xHat * xHatSigma) * (stdInv / m) return NumPower::multiply( diff --git a/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php b/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php index 4912857e3..e926782f8 100644 --- a/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php +++ b/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php @@ -90,6 +90,30 @@ public static function inferProvider() : array ]; } + /** + * Additional inputs to validate behavior across different batch sizes. + * + * @return array + */ + public static function batchInputsProvider() : array + { + return [ + 'batch1x3' => [[ + [2.0, -1.0, 0.0], + ]], + 'batch2x3' => [[ + [1.0, 2.0, 3.0], + [3.0, 3.0, 3.0], + ]], + 'batch4x3' => [[ + [0.5, -0.5, 1.5], + [10.0, -10.0, 0.0], + [7.2, 3.3, -2.4], + [-1.0, -2.0, 4.0], + ]], + ]; + } + /** * @return array */ @@ -205,6 +229,36 @@ public function testInfer(array $expected) : void self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); } + #[Test] + #[TestDox('Computes forward pass (row-wise) with zero mean and unit variance per sample for various batch sizes')] + #[DataProvider('batchInputsProvider')] + public function testForwardStatsMultipleBatches(array $input) : void + { + $this->layer->initialize($this->fanIn); + + $forward = $this->layer->forward(NumPower::array($input)); + $out = $forward->toArray(); + + // Check per-row mean ~ 0 and variance ~ 1 (allow 0 for degenerate rows) + $this->assertRowwiseStats($input, $out, true); + } + + #[Test] + #[TestDox('Infers (row-wise) with zero mean and unit variance per sample for various batch sizes')] + #[DataProvider('batchInputsProvider')] + public function testInferStatsMultipleBatches(array $input) : void + { + $this->layer->initialize($this->fanIn); + + // Perform a forward pass on the same input to initialize running stats + $this->layer->forward(NumPower::array($input)); + + $infer = $this->layer->infer(NumPower::array($input)); + $out = $infer->toArray(); + + $this->assertRowwiseStats($input, $out, false); + } + #[Test] #[TestDox('Throws when width is requested before initialization')] public function testWidthThrowsBeforeInitialize() : void @@ -299,4 +353,40 @@ public function testGradient(array $expected) : void self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); } + + /** + * @param array> $inputRows + * @param array> $outRows + */ + private function assertRowwiseStats(array $inputRows, array $outRows, bool $checkMean) : void + { + foreach ($outRows as $i => $row) { + $mean = array_sum($row) / count($row); + $var = 0.0; + foreach ($row as $v) { + $var += ($v - $mean) * ($v - $mean); + } + $var /= count($row); + + $orig = $inputRows[$i]; + $origMean = array_sum($orig) / count($orig); + $origVar = 0.0; + foreach ($orig as $ov) { + $origVar += ($ov - $origMean) * ($ov - $origMean); + } + $origVar /= count($orig); + + $expectedVar = $origVar < 1e-12 ? 0.0 : 1.0; + + if ($checkMean) { + self::assertEqualsWithDelta(0.0, $mean, 1e-7); + } + + if ($expectedVar === 0.0) { + self::assertLessThan(5e-3, $var); + } else { + self::assertEqualsWithDelta(1.0, $var, 1e-6); + } + } + } } From 3fb79a919b06941e45dbf79a3c4f02ae2f4cc5c6 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sun, 7 Dec 2025 19:46:00 +0200 Subject: [PATCH 31/42] ML-398 Improved `BatchNorm` behavior for varying batch sizes with additional tests and updated shape handling --- tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php b/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php index e926782f8..dd5380941 100644 --- a/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php +++ b/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php @@ -11,6 +11,7 @@ use PHPUnit\Framework\Attributes\Test; use PHPUnit\Framework\Attributes\TestDox; use PHPUnit\Framework\Attributes\DataProvider; +use PHPUnit\Framework\MockObject\Rule\Parameters; use Rubix\ML\Deferred; use Rubix\ML\NeuralNet\Layers\BatchNorm\BatchNorm; use Rubix\ML\NeuralNet\Optimizers\Base\Optimizer; @@ -383,7 +384,7 @@ private function assertRowwiseStats(array $inputRows, array $outRows, bool $chec } if ($expectedVar === 0.0) { - self::assertLessThan(5e-3, $var); + self::assertLessThan(1e-6, $var); } else { self::assertEqualsWithDelta(1.0, $var, 1e-6); } From b4e507d18aab5b8580ca17bc446fa11fa19679d7 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sun, 7 Dec 2025 22:53:33 +0200 Subject: [PATCH 32/42] ML-399 Added `Binary` output layer and comprehensive unit tests with interface definition for output layers. --- .../Layers/Base/Contracts/Output.php | 29 +++ src/NeuralNet/Layers/Binary/Binary.php | 222 ++++++++++++++++++ tests/NeuralNet/Layers/Binary/BinaryTest.php | 192 +++++++++++++++ 3 files changed, 443 insertions(+) create mode 100644 src/NeuralNet/Layers/Base/Contracts/Output.php create mode 100644 src/NeuralNet/Layers/Binary/Binary.php create mode 100644 tests/NeuralNet/Layers/Binary/BinaryTest.php diff --git a/src/NeuralNet/Layers/Base/Contracts/Output.php b/src/NeuralNet/Layers/Base/Contracts/Output.php new file mode 100644 index 000000000..49e11bb4b --- /dev/null +++ b/src/NeuralNet/Layers/Base/Contracts/Output.php @@ -0,0 +1,29 @@ + + */ +interface Output extends Layer +{ + /** + * Compute the gradient and loss at the output. + * + * @param (string|int|float)[] $labels + * @param Optimizer $optimizer + * @throws RuntimeException + * @return mixed[] + */ + public function back(array $labels, Optimizer $optimizer) : array; +} diff --git a/src/NeuralNet/Layers/Binary/Binary.php b/src/NeuralNet/Layers/Binary/Binary.php new file mode 100644 index 000000000..37b6f145b --- /dev/null +++ b/src/NeuralNet/Layers/Binary/Binary.php @@ -0,0 +1,222 @@ + + */ +class Binary implements Output +{ + /** + * The labels of either of the possible outcomes. + * + * @var float[] + */ + protected array $classes = [ + // + ]; + + /** + * The function that computes the loss of erroneous activations. + * + * @var ClassificationLoss + */ + protected ClassificationLoss $costFn; + + /** + * The sigmoid activation function. + * + * @var Sigmoid + */ + protected Sigmoid $sigmoid; + + /** + * The memorized input matrix. + * + * @var NDArray|null + */ + protected ?NDArray $input = null; + + /** + * The memorized activation matrix. + * + * @var NDArray|null + */ + protected ?NDArray $output = null; + + /** + * @param string[] $classes + * @param ClassificationLoss|null $costFn + * @throws InvalidArgumentException + */ + public function __construct(array $classes, ?ClassificationLoss $costFn = null) + { + $classes = array_values(array_unique($classes)); + + if (count($classes) !== 2) { + throw new InvalidArgumentException('Number of classes must be 2, ' . count($classes) . ' given.'); + } + + $classes = [ + $classes[0] => 0.0, + $classes[1] => 1.0, + ]; + + $this->classes = $classes; + $this->costFn = $costFn ?? new CrossEntropy(); + $this->sigmoid = new Sigmoid(); + } + + /** + * Return the width of the layer. + * + * @return positive-int + */ + public function width() : int + { + return 1; + } + + /** + * Initialize the layer with the fan in from the previous layer and return + * the fan out for this layer. + * + * @param positive-int $fanIn + * @throws InvalidArgumentException + * @return positive-int + */ + public function initialize(int $fanIn) : int + { + if ($fanIn !== 1) { + throw new InvalidArgumentException("Fan in must be equal to 1, $fanIn given."); + } + + return 1; + } + + /** + * Compute a forward pass through the layer. + * + * @param NDArray $input + * @return NDArray + */ + public function forward(NDArray $input) : NDArray + { + $output = $this->sigmoid->activate($input); + + $this->input = $input; + $this->output = $output; + + return $output; + } + + /** + * Compute an inferential pass through the layer. + * + * @param NDArray $input + * @return NDArray + */ + public function infer(NDArray $input) : NDArray + { + return $this->sigmoid->activate($input); + } + + /** + * Compute the gradient and loss at the output. + * + * @param string[] $labels + * @param Optimizer $optimizer + * @throws RuntimeException + * @return (Deferred|float)[] + */ + public function back(array $labels, Optimizer $optimizer) : array + { + if (!$this->input or !$this->output) { + throw new RuntimeException('Must perform forward pass before backpropagating.'); + } + + $expected = []; + + foreach ($labels as $label) { + $expected[] = $this->classes[$label]; + } + + $expected = NumPower::array([$expected]); + + $input = $this->input; + $output = $this->output; + + $gradient = new Deferred([$this, 'gradient'], [$input, $output, $expected]); + + $loss = $this->costFn->compute($output, $expected); + + $this->input = $this->output = null; + + return [$gradient, $loss]; + } + + /** + * Calculate the gradient for the previous layer. + * + * @param NDArray $input + * @param NDArray $output + * @param NDArray $expected + * @return NDArray + */ + public function gradient(NDArray $input, NDArray $output, NDArray $expected) : NDArray + { + $n = $output->shape()[1]; + + if ($this->costFn instanceof CrossEntropy) { + return NumPower::divide( + NumPower::subtract($output, $expected), + $n + ); + } + + $dLoss = NumPower::divide( + $this->costFn->differentiate($output, $expected), + $n + ); + + return NumPower::multiply( + $this->sigmoid->differentiate($output), + $dLoss + ); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Binary (cost function: {$this->costFn})"; + } +} diff --git a/tests/NeuralNet/Layers/Binary/BinaryTest.php b/tests/NeuralNet/Layers/Binary/BinaryTest.php new file mode 100644 index 000000000..645d7c86b --- /dev/null +++ b/tests/NeuralNet/Layers/Binary/BinaryTest.php @@ -0,0 +1,192 @@ + + */ + public static function forwardProvider() : array + { + return [ + [ + [ + [0.7310585, 0.9241418, 0.4750207], + ], + ], + ]; + } + + /** + * @return array + */ + public static function backProvider() : array + { + return [ + [ + [ + [0.2436861, -0.0252860, 0.1583402], + ], + ], + ]; + } + + /** + * @return array}> + */ + public static function badClassesProvider() : array + { + return [ + 'empty' => [[]], + 'single' => [['hot']], + 'duplicatesToOne' => [['hot', 'hot']], + 'threeUnique' => [['hot', 'cold', 'warm']], + ]; + } + + protected function setUp() : void + { + $this->input = NumPower::array([ + [1.0, 2.5, -0.1], + ]); + + $this->labels = ['hot', 'cold', 'hot']; + + $this->optimizer = new Stochastic(0.001); + + $this->layer = new Binary(classes: ['hot', 'cold'], costFn: new CrossEntropy()); + } + + #[Test] + #[TestDox('Returns string representation')] + public function testToString() : void + { + $this->layer->initialize(1); + + self::assertEquals('Binary (cost function: Cross Entropy)', (string) $this->layer); + } + + #[Test] + #[TestDox('Initializes and reports width')] + public function testInitializeWidth() : void + { + $this->layer->initialize(1); + self::assertEquals(1, $this->layer->width()); + } + + #[Test] + #[TestDox('Constructor rejects invalid classes arrays')] + #[DataProvider('badClassesProvider')] + public function testConstructorRejectsInvalidClasses(array $classes) : void + { + $this->expectException(InvalidArgumentException::class); + new Binary(classes: $classes, costFn: new CrossEntropy()); + } + + #[Test] + #[TestDox('Constructor accepts classes arrays that dedupe to exactly 2 labels')] + public function testConstructorAcceptsDuplicateClassesThatDedupeToTwo() : void + { + $layer = new Binary(classes: ['hot', 'cold', 'hot'], costFn: new CrossEntropy()); + // Should initialize without throwing and report correct width + $layer->initialize(1); + self::assertEquals(1, $layer->width()); + } + + #[Test] + #[TestDox('Computes forward pass')] + #[DataProvider('forwardProvider')] + public function testForward(array $expected) : void + { + $this->layer->initialize(1); + + $forward = $this->layer->forward($this->input); + self::assertEqualsWithDelta($expected, $forward->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Backpropagates and returns gradient for previous layer')] + #[DataProvider('backProvider')] + public function testBack(array $expectedGradient) : void + { + $this->layer->initialize(1); + $this->layer->forward($this->input); + + [$computation, $loss] = $this->layer->back(labels: $this->labels, optimizer: $this->optimizer); + + self::assertInstanceOf(Deferred::class, $computation); + self::assertIsFloat($loss); + + $gradient = $computation->compute(); + + self::assertInstanceOf(NDArray::class, $gradient); + self::assertEqualsWithDelta($expectedGradient, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes gradient directly given input, output, expected, and batch size')] + #[DataProvider('backProvider')] + public function testGradient(array $expectedGradient) : void + { + $this->layer->initialize(1); + + $input = $this->input; + $output = $this->layer->forward($input); + + // Build expected NDArray (1, batch) using the Binary classes mapping: hot=>0.0, cold=>1.0 + $expected = []; + foreach ($this->labels as $label) { + $expected[] = ($label === 'cold') ? 1.0 : 0.0; + } + $expected = NumPower::array([$expected]); + + $batchSize = count($this->labels); + + $gradient = $this->layer->gradient($input, $output, $expected, $batchSize); + + self::assertInstanceOf(NDArray::class, $gradient); + self::assertEqualsWithDelta($expectedGradient, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes inference activations')] + #[DataProvider('forwardProvider')] + public function testInfer(array $expected) : void + { + $this->layer->initialize(1); + + $infer = $this->layer->infer($this->input); + self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); + } +} From d5f7c5778fb4cf572b065a4a8dc61db0213082ec Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sun, 7 Dec 2025 23:18:14 +0200 Subject: [PATCH 33/42] ML-400 Added `Continuous` output layer with complete interface implementation and unit tests --- .../Layers/Continuous/Continuous.php | 157 +++++++++++++++++ .../Layers/Continuous/ContinuousTest.php | 159 ++++++++++++++++++ 2 files changed, 316 insertions(+) create mode 100644 src/NeuralNet/Layers/Continuous/Continuous.php create mode 100644 tests/NeuralNet/Layers/Continuous/ContinuousTest.php diff --git a/src/NeuralNet/Layers/Continuous/Continuous.php b/src/NeuralNet/Layers/Continuous/Continuous.php new file mode 100644 index 000000000..7a07e9735 --- /dev/null +++ b/src/NeuralNet/Layers/Continuous/Continuous.php @@ -0,0 +1,157 @@ + + */ +class Continuous implements Output +{ + /** + * The function that computes the loss of erroneous activations. + * + * @var RegressionLoss + */ + protected RegressionLoss $costFn; + + /** + * The memorized input matrix. + * + * @var NDArray|null + */ + protected ?NDArray $input = null; + + /** + * @param RegressionLoss|null $costFn + */ + public function __construct(?RegressionLoss $costFn = null) + { + $this->costFn = $costFn ?? new LeastSquares(); + } + + /** + * Return the width of the layer. + * + * @return positive-int + */ + public function width() : int + { + return 1; + } + + /** + * Initialize the layer with the fan in from the previous layer and return + * the fan out for this layer. + * + * @param positive-int $fanIn + * @throws InvalidArgumentException + * @return positive-int + */ + public function initialize(int $fanIn) : int + { + if ($fanIn !== 1) { + throw new InvalidArgumentException("Fan in must be equal to 1, $fanIn given."); + } + + return 1; + } + + /** + * Compute a forward pass through the layer. + * + * @param NDArray $input + * @return NDArray + */ + public function forward(NDArray $input) : NDArray + { + $this->input = $input; + + return $input; + } + + /** + * Compute an inferential pass through the layer. + * + * @param NDArray $input + * @return NDArray + */ + public function infer(NDArray $input) : NDArray + { + return $input; + } + + /** + * Compute the gradient and loss at the output. + * + * @param (int|float)[] $labels + * @param Optimizer $optimizer + * @throws RuntimeException + * @return (Deferred|float)[] + */ + public function back(array $labels, Optimizer $optimizer) : array + { + if (!$this->input) { + throw new RuntimeException('Must perform forward pass before backpropagating.'); + } + + $expected = NumPower::array([$labels]); + + $input = $this->input; + + $gradient = new Deferred([$this, 'gradient'], [$input, $expected]); + + $loss = $this->costFn->compute($input, $expected); + + $this->input = null; + + return [$gradient, $loss]; + } + + /** + * Calculate the gradient for the previous layer. + * + * @param NDArray $input + * @param NDArray $expected + * @return NDArray + */ + public function gradient(NDArray $input, NDArray $expected) : NDArray + { + $n = $input->shape()[1]; + + return NumPower::divide( + $this->costFn->differentiate($input, $expected), + $n + ); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Continuous (cost function: {$this->costFn})"; + } +} diff --git a/tests/NeuralNet/Layers/Continuous/ContinuousTest.php b/tests/NeuralNet/Layers/Continuous/ContinuousTest.php new file mode 100644 index 000000000..39592cdcb --- /dev/null +++ b/tests/NeuralNet/Layers/Continuous/ContinuousTest.php @@ -0,0 +1,159 @@ + + */ + public static function forwardProvider() : array + { + return [ + [ + [ + [2.5, 0.0, -6.0], + ], + ], + ]; + } + + /** + * @return array + */ + public static function gradientProvider() : array + { + return [ + [ + [ + [0.8333333, 0.8333333, -32.0], + ], + ], + ]; + } + + protected function setUp() : void + { + $this->input = NumPower::array([ + [2.5, 0.0, -6.0], + ]); + + $this->labels = [0.0, -2.5, 90.0]; + + $this->optimizer = new Stochastic(0.001); + + $this->layer = new Continuous(new LeastSquares()); + } + + #[Test] + #[TestDox('Returns string representation')] + public function testToString() : void + { + $this->layer->initialize(1); + + self::assertEquals('Continuous (cost function: Least Squares)', (string) $this->layer); + } + + #[Test] + #[TestDox('Initializes and reports width')] + public function testInitializeWidth() : void + { + $this->layer->initialize(1); + self::assertEquals(1, $this->layer->width()); + } + + #[Test] + #[TestDox('Initialize rejects fan-in not equal to 1')] + public function testInitializeRejectsInvalidFanIn() : void + { + $this->expectException(InvalidArgumentException::class); + $this->layer->initialize(2); + } + + #[Test] + #[TestDox('Computes forward pass')] + #[DataProvider('forwardProvider')] + public function testForward(array $expected) : void + { + $this->layer->initialize(1); + + $forward = $this->layer->forward($this->input); + self::assertEqualsWithDelta($expected, $forward->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Backpropagates and returns gradient for previous layer')] + #[DataProvider('gradientProvider')] + public function testBack(array $expectedGradient) : void + { + $this->layer->initialize(1); + $this->layer->forward($this->input); + + [$computation, $loss] = $this->layer->back(labels: $this->labels, optimizer: $this->optimizer); + + self::assertInstanceOf(Deferred::class, $computation); + self::assertIsFloat($loss); + + $gradient = $computation->compute(); + + self::assertInstanceOf(NDArray::class, $gradient); + self::assertEqualsWithDelta($expectedGradient, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes gradient directly given input and expected')] + #[DataProvider('gradientProvider')] + public function testGradient(array $expectedGradient) : void + { + $this->layer->initialize(1); + + $input = $this->input; + $expected = NumPower::array([$this->labels]); + + $gradient = $this->layer->gradient($input, $expected); + + self::assertInstanceOf(NDArray::class, $gradient); + self::assertEqualsWithDelta($expectedGradient, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes inference activations')] + #[DataProvider('forwardProvider')] + public function testInfer(array $expected) : void + { + $this->layer->initialize(1); + + $infer = $this->layer->infer($this->input); + self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); + } +} From cca97f96faaa213ba8f0d1b7a9e119c0925ad762 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Wed, 10 Dec 2025 20:23:52 +0200 Subject: [PATCH 34/42] ML-392 Added `Dense` hidden layer implementation with complete forward/backward passes --- docs/neural-network/hidden-layers/dense.md | 6 +- src/NeuralNet/Layers/Dense/Dense.php | 348 +++++++++++++++++++++ tests/NeuralNet/Layers/Dense/DenseTest.php | 308 ++++++++++++++++++ 3 files changed, 659 insertions(+), 3 deletions(-) create mode 100644 src/NeuralNet/Layers/Dense/Dense.php create mode 100644 tests/NeuralNet/Layers/Dense/DenseTest.php diff --git a/docs/neural-network/hidden-layers/dense.md b/docs/neural-network/hidden-layers/dense.md index cf4a7bd4c..db382d0a0 100644 --- a/docs/neural-network/hidden-layers/dense.md +++ b/docs/neural-network/hidden-layers/dense.md @@ -1,4 +1,4 @@ -[source] +[source] # Dense Dense (or *fully connected*) hidden layers are layers of neurons that connect to each node in the previous layer by a parameterized synapse. They perform a linear transformation on their input and are usually followed by an [Activation](activation.md) layer. The majority of the trainable parameters in a standard feed forward neural network are contained within Dense hidden layers. @@ -14,9 +14,9 @@ Dense (or *fully connected*) hidden layers are layers of neurons that connect to ## Example ```php -use Rubix\ML\NeuralNet\Layers\Dense; +use Rubix\ML\NeuralNet\Layers\Dense\Dense; use Rubix\ML\NeuralNet\Initializers\He; use Rubix\ML\NeuralNet\Initializers\Constant; $layer = new Dense(100, 1e-4, true, new He(), new Constant(0.0)); -``` \ No newline at end of file +``` diff --git a/src/NeuralNet/Layers/Dense/Dense.php b/src/NeuralNet/Layers/Dense/Dense.php new file mode 100644 index 000000000..ee62d1e75 --- /dev/null +++ b/src/NeuralNet/Layers/Dense/Dense.php @@ -0,0 +1,348 @@ + + */ +class Dense implements Hidden, Parametric +{ + /** + * The number of nodes in the layer. + * + * @var positive-int + */ + protected int $neurons; + + /** + * The amount of L2 regularization applied to the weights. + * + * @var float + */ + protected float $l2Penalty; + + /** + * Should the layer include a bias parameter? + * + * @var bool + */ + protected bool $bias; + + /** + * The weight initializer. + * + * @var Initializer + */ + protected Initializer $weightInitializer; + + /** + * The bias initializer. + * + * @var Initializer + */ + protected Initializer $biasInitializer; + + /** + * The weights. + * + * @var Parameter|null + */ + protected ?Parameter $weights = null; + + /** + * The biases. + * + * @var Parameter|null + */ + protected ?Parameter $biases = null; + + /** + * The memorized inputs to the layer. + * + * @var NDArray|null + */ + protected ?NDArray $input = null; + + /** + * @param int $neurons + * @param float $l2Penalty + * @param bool $bias + * @param Initializer|null $weightInitializer + * @param Initializer|null $biasInitializer + * @throws InvalidArgumentException + */ + public function __construct( + int $neurons, + float $l2Penalty = 0.0, + bool $bias = true, + ?Initializer $weightInitializer = null, + ?Initializer $biasInitializer = null + ) { + if ($neurons < 1) { + throw new InvalidArgumentException("Number of neurons must be greater than 0, $neurons given."); + } + + if ($l2Penalty < 0.0) { + throw new InvalidArgumentException("L2 Penalty must be greater than 0, $l2Penalty given."); + } + + $this->neurons = $neurons; + $this->l2Penalty = $l2Penalty; + $this->bias = $bias; + $this->weightInitializer = $weightInitializer ?? new HeUniform(); + $this->biasInitializer = $biasInitializer ?? new Constant(0.0); + } + + /** + * Return the width of the layer. + * + * @internal + * + * @return positive-int + */ + public function width() : int + { + return $this->neurons; + } + + /** + * Return the weight matrix. + * + * @internal + * + * @throws RuntimeException + * @return NDArray + */ + public function weights() : NDArray + { + if (!$this->weights) { + throw new RuntimeException('Layer is not initialized'); + } + + return $this->weights->param(); + } + + /** + * Initialize the layer with the fan in from the previous layer and return + * the fan out for this layer. + * + * @internal + * + * @param positive-int $fanIn + * @return positive-int + */ + public function initialize(int $fanIn) : int + { + $fanOut = $this->neurons; + + $weights = $this->weightInitializer->initialize($fanIn, $fanOut); + + $this->weights = new Parameter($weights); + + if ($this->bias) { + // Initialize biases as a vector of length fanOut + $biasMat = $this->biasInitializer->initialize(1, $fanOut); + $biases = NumPower::flatten($biasMat); + + $this->biases = new Parameter($biases); + } + + return $fanOut; + } + + /** + * Compute a forward pass through the layer. + * + * @param NDArray $input + * @return NDArray + * @internal + * + */ + public function forward(NDArray $input) : NDArray + { + if (!$this->weights) { + throw new RuntimeException('Layer is not initialized'); + } + + $output = NumPower::matmul($this->weights->param(), $input); + + if ($this->biases) { + // Reshape bias vector [fanOut] to column [fanOut, 1] to match output [fanOut, n] + $bias = NumPower::reshape($this->biases->param(), [$this->neurons, 1]); + // Manual “broadcast”: [neurons, n] + [neurons, 1] + $output = NumPower::add($output, $bias); + } + + $this->input = $input; + + return $output; + } + + /** + * Compute an inference pass through the layer. + * + * @param NDArray $input + * @return NDArray + * @internal + * + */ + public function infer(NDArray $input) : NDArray + { + if (!$this->weights) { + throw new RuntimeException('Layer is not initialized'); + } + + $output = NumPower::matmul($this->weights->param(), $input); + + if ($this->biases) { + // Reshape bias vector [fanOut] to column [fanOut, 1] to match output [fanOut, n] + $bias = NumPower::reshape($this->biases->param(), [$this->neurons, 1]); + // Manual “broadcast”: [neurons, n] + [neurons, 1] + $output = NumPower::add($output, $bias); + } + + return $output; + } + + /** + * Calculate the gradient and update the parameters of the layer. + * + * @internal + * + * @param Deferred $prevGradient + * @param Optimizer $optimizer + * @throws RuntimeException + * @return Deferred + */ + public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred + { + if (!$this->weights) { + throw new RuntimeException('Layer has not been initialized.'); + } + + if (!$this->input) { + throw new RuntimeException('Must perform forward pass before backpropagating.'); + } + + /** @var NDArray $dOut */ + $dOut = $prevGradient(); + + $inputT = NumPower::transpose($this->input, [1, 0]); + + $dW = NumPower::matmul($dOut, $inputT); + + $weights = $this->weights->param(); + + if ($this->l2Penalty) { + $dW = NumPower::add( + $dW, + NumPower::multiply($weights, $this->l2Penalty) + ); + } + + $this->weights->update($dW, $optimizer); + + if ($this->biases) { + // Sum gradients over the batch dimension to obtain a bias gradient + // with the same shape as the bias vector [neurons] + $dB = NumPower::sum($dOut, axis: 1); + + $this->biases->update($dB, $optimizer); + } + + $this->input = null; + + return new Deferred([$this, 'gradient'], [$weights, $dOut]); + } + + /** + * Calculate the gradient for the previous layer. + * + * @internal + * + * @param NDArray $weights + * @param NDArray $dOut + * @return NDArray + */ + public function gradient(NDArray $weights, NDArray $dOut) : NDArray + { + $weightsT = NumPower::transpose($weights, [1, 0]); + + return NumPower::matmul($weightsT, $dOut); + } + + /** + * Return the parameters of the layer. + * + * @internal + * + * @throws RuntimeException + * @return Generator + */ + public function parameters() : Generator + { + if (!$this->weights) { + throw new RuntimeException('Layer has not been initialized.'); + } + + yield 'weights' => $this->weights; + + if ($this->biases) { + yield 'biases' => $this->biases; + } + } + + /** + * Restore the parameters in the layer from an associative array. + * + * @internal + * + * @param Parameter[] $parameters + */ + public function restore(array $parameters) : void + { + $this->weights = $parameters['weights']; + $this->biases = $parameters['biases'] ?? null; + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Dense (neurons: {$this->neurons}, l2 penalty: {$this->l2Penalty}," + . ' bias: ' . Params::toString($this->bias) . ',' + . " weight initializer: {$this->weightInitializer}," + . " bias initializer: {$this->biasInitializer})"; + } +} diff --git a/tests/NeuralNet/Layers/Dense/DenseTest.php b/tests/NeuralNet/Layers/Dense/DenseTest.php new file mode 100644 index 000000000..d8c920aa3 --- /dev/null +++ b/tests/NeuralNet/Layers/Dense/DenseTest.php @@ -0,0 +1,308 @@ +>, array, array>}> + */ + public static function forwardProvider() : array + { + return [ + [ + // weights 2x3 + [ + [1.0, 0.0, 0.0], + [0.0, 1.0, 0.0], + ], + // biases length-2 + [0.0, 0.0], + // expected forward output 2x3 for the fixed input in setUp() + // input = [ + // [1.0, 2.5, -0.1], + // [0.1, 0.0, 3.0], + // [0.002, -6.0, -0.5], + // ]; + // so W * input = first two rows of input + [ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + ], + ], + ]; + } + + /** + * @return array>, array, array>, array>}> + */ + public static function backProvider() : array + { + return [ + [ + // weights 2x3 + [ + [1.0, 0.0, 0.0], + [0.0, 1.0, 0.0], + ], + // biases length-2 + [0.0, 0.0], + // prev gradient 2x3 + [ + [0.50, 0.2, 0.01], + [0.25, 0.1, 0.89], + ], + // expected gradient for previous layer 3x3 + [ + [0.50, 0.2, 0.01], + [0.25, 0.1, 0.89], + [0.0, 0.0, 0.0], + ], + ], + ]; + } + + protected function setUp() : void + { + $this->fanIn = 3; + + $this->input = NumPower::array([ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ]); + + $this->prevGrad = new Deferred(fn: function () : NDArray { + return NumPower::array([ + [0.50, 0.2, 0.01], + [0.25, 0.1, 0.89], + ]); + }); + + $this->optimizer = new Stochastic(0.001); + + $this->layer = new Dense( + neurons: 2, + l2Penalty: 0.0, + bias: true, + weightInitializer: new HeUniform(), + biasInitializer: new Constant(0.0) + ); + + srand(self::RANDOM_SEED); + } + + #[Test] + #[TestDox('Throws an exception for invalid constructor arguments')] + public function testConstructorValidation() : void + { + $this->expectException(InvalidArgumentException::class); + + new Dense( + neurons: 0, + l2Penalty: -0.1, + bias: true, + weightInitializer: new HeUniform(), + biasInitializer: new Constant(0.0) + ); + } + + #[Test] + #[TestDox('Computes forward activations for fixed weights and biases')] + #[DataProvider('forwardProvider')] + public function testForward(array $weights, array $biases, array $expected) : void + { + $this->layer->initialize($this->fanIn); + self::assertEquals(2, $this->layer->width()); + + $this->layer->restore([ + 'weights' => new TrainableParameter(NumPower::array($weights)), + 'biases' => new TrainableParameter(NumPower::array($biases)), + ]); + + $forward = $this->layer->forward($this->input); + + self::assertEqualsWithDelta($expected, $forward->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Method weights() returns the restored weight matrix')] + public function testWeightsReturnsExpectedValues() : void + { + $this->layer->initialize($this->fanIn); + + $weightsArray = [ + [1.0, 0.0, 0.0], + [0.0, 1.0, 0.0], + ]; + + $this->layer->restore([ + 'weights' => new TrainableParameter(NumPower::array($weightsArray)), + 'biases' => new TrainableParameter(NumPower::array([0.0, 0.0])), + ]); + + $weights = $this->layer->weights(); + + self::assertEqualsWithDelta($weightsArray, $weights->toArray(), 1e-7); + } + + #[Test] + #[TestDox('width() returns the number of neurons')] + public function testWidthReturnsNeuronsCount() : void + { + // Layer is constructed in setUp() with neurons: 2 + self::assertSame(2, $this->layer->width()); + } + + #[Test] + #[TestDox('Computes backpropagated gradients for previous layer')] + #[DataProvider('backProvider')] + public function testBack(array $weights, array $biases, array $prevGrad, array $expected) : void + { + $this->layer->initialize($this->fanIn); + + $this->layer->restore([ + 'weights' => new TrainableParameter(NumPower::array($weights)), + 'biases' => new TrainableParameter(NumPower::array($biases)), + ]); + + $prevGradNd = NumPower::array($prevGrad); + + // Forward pass to set internal input cache + $this->layer->forward($this->input); + + $gradient = $this->layer->back( + prevGradient: new Deferred(fn: fn () => $prevGradNd), + optimizer: $this->optimizer + )->compute(); + + self::assertInstanceOf(NDArray::class, $gradient); + self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes inference activations equal to forward for fixed parameters')] + #[DataProvider('forwardProvider')] + public function testInfer(array $weights, array $biases, array $expected) : void + { + $this->layer->initialize($this->fanIn); + + $this->layer->restore([ + 'weights' => new TrainableParameter(NumPower::array($weights)), + 'biases' => new TrainableParameter(NumPower::array($biases)), + ]); + + $infer = $this->layer->infer($this->input); + + self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Method restore() correctly replaces layer parameters')] + public function testRestoreReplacesParameters() : void + { + $this->layer->initialize($this->fanIn); + + // Use the same deterministic weights and biases as in forwardProvider + $weights = [ + [1.0, 0.0, 0.0], + [0.0, 1.0, 0.0], + ]; + + $biases = [0.0, 0.0]; + + $expected = [ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + ]; + + $this->layer->restore([ + 'weights' => new TrainableParameter(NumPower::array($weights)), + 'biases' => new TrainableParameter(NumPower::array($biases)), + ]); + + $forward = $this->layer->forward($this->input); + + self::assertEqualsWithDelta($expected, $forward->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Method parameters() yields restored weights and biases')] + public function testParametersReturnsRestoredParameters() : void + { + $this->layer->initialize($this->fanIn); + + $weightsArray = [ + [1.0, 0.0, 0.0], + [0.0, 1.0, 0.0], + ]; + + $biasesArray = [0.0, 0.0]; + + $weightsParam = new TrainableParameter(NumPower::array($weightsArray)); + $biasesParam = new TrainableParameter(NumPower::array($biasesArray)); + + $this->layer->restore([ + 'weights' => $weightsParam, + 'biases' => $biasesParam, + ]); + + $params = iterator_to_array($this->layer->parameters()); + + self::assertArrayHasKey('weights', $params); + self::assertArrayHasKey('biases', $params); + + self::assertSame($weightsParam, $params['weights']); + self::assertSame($biasesParam, $params['biases']); + + self::assertEqualsWithDelta($weightsArray, $params['weights']->param()->toArray(), 1e-7); + self::assertEqualsWithDelta($biasesArray, $params['biases']->param()->toArray(), 1e-7); + } + + #[Test] + #[TestDox('It returns correct string representation')] + public function testToStringReturnsCorrectValue() : void + { + $expected = 'Dense (neurons: 2, l2 penalty: 0, bias: true, weight initializer: He Uniform, bias initializer: Constant (value: 0))'; + + self::assertSame($expected, (string) $this->layer); + } +} From 9767a1fdc32a007f7569535b6ff1f4319f644d04 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Wed, 10 Dec 2025 23:53:18 +0200 Subject: [PATCH 35/42] ML-401 Added `Dropout` hidden layer implementation with forward/inference/backward passes, unit tests, and documentation updates --- docs/neural-network/hidden-layers/dropout.md | 6 +- src/NeuralNet/Layers/Dropout/Dropout.php | 208 ++++++++++++++++++ .../NeuralNet/Layers/Dropout/DropoutTest.php | 143 ++++++++++++ 3 files changed, 354 insertions(+), 3 deletions(-) create mode 100644 src/NeuralNet/Layers/Dropout/Dropout.php create mode 100644 tests/NeuralNet/Layers/Dropout/DropoutTest.php diff --git a/docs/neural-network/hidden-layers/dropout.md b/docs/neural-network/hidden-layers/dropout.md index 566f83bad..28414f8ca 100644 --- a/docs/neural-network/hidden-layers/dropout.md +++ b/docs/neural-network/hidden-layers/dropout.md @@ -1,4 +1,4 @@ -[source] +[source] # Dropout Dropout is a regularization technique to reduce overfitting in neural networks by preventing complex co-adaptations on training data. It works by temporarily disabling output nodes during each training pass. It also acts as an efficient way of performing model averaging with the parameters of neural networks. @@ -10,10 +10,10 @@ Dropout is a regularization technique to reduce overfitting in neural networks b ## Example ```php -use Rubix\ML\NeuralNet\Layers\Dropout; +use Rubix\ML\NeuralNet\Layers\Dropout\Dropout; $layer = new Dropout(0.2); ``` ## References -[^1]: N. Srivastava et al. (2014). Dropout: A Simple Way to Prevent Neural Networks from Overfitting. \ No newline at end of file +[^1]: N. Srivastava et al. (2014). Dropout: A Simple Way to Prevent Neural Networks from Overfitting. diff --git a/src/NeuralNet/Layers/Dropout/Dropout.php b/src/NeuralNet/Layers/Dropout/Dropout.php new file mode 100644 index 000000000..54abaf861 --- /dev/null +++ b/src/NeuralNet/Layers/Dropout/Dropout.php @@ -0,0 +1,208 @@ += 1.0) { + throw new InvalidArgumentException("Ratio must be between 0 and 1, $ratio given."); + } + + $this->ratio = $ratio; + $this->scale = 1.0 / (1.0 - $ratio); + } + + /** + * Return the width of the layer. + * + * @internal + * + * @throws RuntimeException + * @return positive-int + */ + public function width() : int + { + if ($this->width === null) { + throw new RuntimeException('Layer has not been initialized.'); + } + + return $this->width; + } + + /** + * Initialize the layer with the fan in from the previous layer and return + * the fan out for this layer. + * + * @internal + * + * @param positive-int $fanIn + * @return positive-int + */ + public function initialize(int $fanIn) : int + { + $fanOut = $fanIn; + + $this->width = $fanOut; + + return $fanOut; + } + + /** + * Compute a forward pass through the layer. + * + * @internal + * + * @param NDArray $input + * @param NDArray|null $mask Custom dropout mask to use instead of generating one. + * @return NDArray + */ + public function forward(NDArray $input, ?NDArray $mask = null) : NDArray + { + if ($mask === null) { + // Build dropout mask using PHP's RNG. Each unit is kept with + // probability (1 - ratio) and scaled by $this->scale. + $inputArray = $input->toArray(); + + $maskArray = []; + + foreach ($inputArray as $i => $row) { + foreach ($row as $j => $_value) { + $u = rand() / getrandmax(); + + $maskArray[$i][$j] = $u > $this->ratio ? $this->scale : 0.0; + } + } + + $mask = NumPower::array($maskArray); + } + + $output = NumPower::multiply($input, $mask); + + $this->mask = $mask; + + return $output; + } + + /** + * Compute an inferential pass through the layer. + * + * @internal + * + * @param NDArray $input + * @return NDArray + */ + public function infer(NDArray $input) : NDArray + { + return $input; + } + + /** + * Calculate the gradients of the layer and update the parameters. + * + * @internal + * + * @param Deferred $prevGradient + * @param Optimizer $optimizer + * @throws RuntimeException + * @return Deferred + */ + public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred + { + if (!$this->mask) { + throw new RuntimeException('Must perform forward pass before backpropagating.'); + } + + $mask = $this->mask; + + $this->mask = null; + + return new Deferred([$this, 'gradient'], [$prevGradient, $mask]); + } + + /** + * Calculate the gradient for the previous layer. + * + * @internal + * + * @param Deferred $prevGradient + * @param NDArray $mask + * @return NDArray + */ + public function gradient(Deferred $prevGradient, NDArray $mask) : NDArray + { + /** @var NDArray $dOut */ + $dOut = $prevGradient(); + + return NumPower::multiply($dOut, $mask); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Dropout (ratio: {$this->ratio})"; + } +} diff --git a/tests/NeuralNet/Layers/Dropout/DropoutTest.php b/tests/NeuralNet/Layers/Dropout/DropoutTest.php new file mode 100644 index 000000000..337466986 --- /dev/null +++ b/tests/NeuralNet/Layers/Dropout/DropoutTest.php @@ -0,0 +1,143 @@ +fanIn = 3; + + $this->input = NumPower::array([ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ]); + + $this->prevGrad = new Deferred(fn: function () : NDArray { + return NumPower::array([ + [0.25, 0.7, 0.1], + [0.50, 0.2, 0.01], + [0.25, 0.1, 0.89], + ]); + }); + + $this->optimizer = new Stochastic(0.001); + + $this->layer = new Dropout(0.5); + } + + #[Test] + #[TestDox('Initializes width equal to fan-in')] + public function testInitializeSetsWidth() : void + { + $this->layer->initialize($this->fanIn); + + self::assertEquals($this->fanIn, $this->layer->width()); + } + + #[Test] + #[TestDox('forward() returns an NDArray with the same shape as the input')] + public function testForward() : void + { + $this->layer->initialize($this->fanIn); + + // Deterministic mask so that forward output is predictable + $mask = NumPower::array([ + [2.0, 2.0, 2.0], + [2.0, 0.0, 2.0], + [2.0, 2.0, 0.0], + ]); + + $forward = $this->layer->forward($this->input, $mask); + + $expected = [ + [2.0, 5.0, -0.2], + [0.2, 0.0, 6.0], + [0.004, -12.0, 0.0], + ]; + + self::assertSame($this->input->shape(), $forward->shape()); + self::assertEqualsWithDelta($expected, $forward->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Backpropagates gradients using the same dropout mask')] + public function testBack() : void + { + $this->layer->initialize($this->fanIn); + + // Use the same deterministic mask as in testForward so that the + // gradient is fully predictable: grad = prevGrad * mask. + $mask = NumPower::array([ + [2.0, 2.0, 2.0], + [2.0, 0.0, 2.0], + [2.0, 2.0, 0.0], + ]); + + // Forward pass to set internal mask cache + $this->layer->forward($this->input, $mask); + + $gradient = $this->layer->back( + prevGradient: $this->prevGrad, + optimizer: $this->optimizer + )->compute(); + + $expected = [ + [0.5, 1.4, 0.2], + [1.0, 0.0, 0.02], + [0.5, 0.2, 0.0], + ]; + + self::assertInstanceOf(NDArray::class, $gradient); + self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Inference pass leaves inputs unchanged')] + public function testInfer() : void + { + $this->layer->initialize($this->fanIn); + + $expected = [ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ]; + + $infer = $this->layer->infer($this->input); + + self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); + } +} From 8d7c938b2578e1cc747b9eed5c513c252edb7a4b Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Thu, 11 Dec 2025 16:37:32 +0200 Subject: [PATCH 36/42] ML-392 Refactored `Dropout` layer to replace custom mask generation with `NumPower` utilities --- src/NeuralNet/Layers/Dropout/Dropout.php | 26 +-- .../NeuralNet/Layers/Dropout/DropoutTest.php | 210 ++++++++++++++---- 2 files changed, 181 insertions(+), 55 deletions(-) diff --git a/src/NeuralNet/Layers/Dropout/Dropout.php b/src/NeuralNet/Layers/Dropout/Dropout.php index 54abaf861..ce2409f7d 100644 --- a/src/NeuralNet/Layers/Dropout/Dropout.php +++ b/src/NeuralNet/Layers/Dropout/Dropout.php @@ -111,28 +111,20 @@ public function initialize(int $fanIn) : int * @internal * * @param NDArray $input - * @param NDArray|null $mask Custom dropout mask to use instead of generating one. * @return NDArray */ - public function forward(NDArray $input, ?NDArray $mask = null) : NDArray + public function forward(NDArray $input) : NDArray { - if ($mask === null) { - // Build dropout mask using PHP's RNG. Each unit is kept with - // probability (1 - ratio) and scaled by $this->scale. - $inputArray = $input->toArray(); + // Build dropout mask using NumPower's uniform RNG. Each unit is kept + // with probability (1 - ratio) and scaled by $this->scale. + $shape = $input->shape(); - $maskArray = []; + // Uniform random numbers in [0, 1) with same shape as input + $rand = NumPower::uniform($shape, 0.0, 1.0); - foreach ($inputArray as $i => $row) { - foreach ($row as $j => $_value) { - $u = rand() / getrandmax(); - - $maskArray[$i][$j] = $u > $this->ratio ? $this->scale : 0.0; - } - } - - $mask = NumPower::array($maskArray); - } + // mask = (rand > ratio) * scale + $mask = NumPower::greater($rand, $this->ratio); + $mask = NumPower::multiply($mask, $this->scale); $output = NumPower::multiply($input, $mask); diff --git a/tests/NeuralNet/Layers/Dropout/DropoutTest.php b/tests/NeuralNet/Layers/Dropout/DropoutTest.php index 337466986..47cf1ece1 100644 --- a/tests/NeuralNet/Layers/Dropout/DropoutTest.php +++ b/tests/NeuralNet/Layers/Dropout/DropoutTest.php @@ -7,10 +7,12 @@ use NDArray; use NumPower; use PHPUnit\Framework\Attributes\CoversClass; +use PHPUnit\Framework\Attributes\DataProvider; use PHPUnit\Framework\Attributes\Group; use PHPUnit\Framework\Attributes\Test; use PHPUnit\Framework\Attributes\TestDox; use Rubix\ML\Deferred; +use Rubix\ML\Exceptions\InvalidArgumentException; use Rubix\ML\NeuralNet\Layers\Dropout\Dropout; use Rubix\ML\NeuralNet\Optimizers\Base\Optimizer; use Rubix\ML\NeuralNet\Optimizers\Stochastic\Stochastic; @@ -20,8 +22,6 @@ #[CoversClass(Dropout::class)] class DropoutTest extends TestCase { - protected const int RANDOM_SEED = 0; - /** * @var positive-int */ @@ -58,6 +58,43 @@ protected function setUp() : void $this->layer = new Dropout(0.5); } + /** + * @return array + */ + public static function badRatioProvider() : array + { + return [ + 'zero' => [0.0], + 'negative' => [-0.1], + 'one' => [1.0], + 'greaterThanOne'=> [1.1], + ]; + } + + /** + * @return array>}> + */ + public static function inferProvider() : array + { + return [ + 'identityOnInput' => [[ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ]], + ]; + } + + #[Test] + #[TestDox('Constructor rejects invalid ratio values')] + #[DataProvider('badRatioProvider')] + public function testConstructorRejectsInvalidRatio(float $ratio) : void + { + $this->expectException(InvalidArgumentException::class); + + new Dropout($ratio); + } + #[Test] #[TestDox('Initializes width equal to fan-in')] public function testInitializeSetsWidth() : void @@ -68,28 +105,51 @@ public function testInitializeSetsWidth() : void } #[Test] - #[TestDox('forward() returns an NDArray with the same shape as the input')] + #[TestDox('Method forward() applies dropout mask with correct shape and scaling')] public function testForward() : void { $this->layer->initialize($this->fanIn); - // Deterministic mask so that forward output is predictable - $mask = NumPower::array([ - [2.0, 2.0, 2.0], - [2.0, 0.0, 2.0], - [2.0, 2.0, 0.0], - ]); + $forward = $this->layer->forward($this->input); - $forward = $this->layer->forward($this->input, $mask); + $inputArray = $this->input->toArray(); + $forwardArray = $forward->toArray(); - $expected = [ - [2.0, 5.0, -0.2], - [0.2, 0.0, 6.0], - [0.004, -12.0, 0.0], - ]; + self::assertSameSize($inputArray, $forwardArray); + + $scale = 1.0 / (1.0 - 0.5); // ratio = 0.5 + + $nonZero = 0; + $total = 0; + + foreach ($inputArray as $i => $row) { + foreach ($row as $j => $x) { + $y = $forwardArray[$i][$j]; + $total++; - self::assertSame($this->input->shape(), $forward->shape()); - self::assertEqualsWithDelta($expected, $forward->toArray(), 1e-7); + if (abs($x) < 1e-12) { + // If input is (near) zero, output should also be ~0 + self::assertEqualsWithDelta(0.0, $y, 1e-7); + continue; + } + + if (abs($y) < 1e-12) { + // Dropped unit + continue; + } + + $nonZero++; + + // Kept unit should be scaled input + self::assertEqualsWithDelta($x * $scale, $y, 1e-6); + } + } + + // Roughly (1 - ratio) of units should be non-zero; allow wide tolerance + $expectedKept = (1.0 - 0.5) * $total; + self::assertGreaterThan(0, $nonZero); + self::assertLessThan($total, $nonZero); + self::assertEqualsWithDelta($expectedKept, $nonZero, $total * 0.5); } #[Test] @@ -98,21 +158,104 @@ public function testBack() : void { $this->layer->initialize($this->fanIn); - // Use the same deterministic mask as in testForward so that the - // gradient is fully predictable: grad = prevGrad * mask. + // Forward pass to generate and store mask + $forward = $this->layer->forward($this->input); + $forwardArray = $forward->toArray(); + $inputArray = $this->input->toArray(); + + // Approximate mask from forward output: mask ≈ forward / input + $maskArray = []; + foreach ($inputArray as $i => $row) { + foreach ($row as $j => $x) { + $y = $forwardArray[$i][$j]; + + if (abs($x) < 1e-12) { + $maskArray[$i][$j] = 0.0; + } else { + $maskArray[$i][$j] = $y / $x; + } + } + } + + $gradient = $this->layer->back( + prevGradient: $this->prevGrad, + optimizer: $this->optimizer + )->compute(); + + $gradArray = $gradient->toArray(); + $prevGradArray = ($this->prevGrad)()->toArray(); + + // Expected gradient per element: prevGrad * mask for non-zero inputs. + // For zero inputs, the mask cannot be inferred from the forward output + // (forward is always 0 regardless of mask), so we accept the actual + // gradient value there. + $expectedGrad = []; + foreach ($prevGradArray as $i => $row) { + foreach ($row as $j => $g) { + if (abs($inputArray[$i][$j]) < 1e-12) { + $expectedGrad[$i][$j] = $gradArray[$i][$j]; + } else { + $expectedGrad[$i][$j] = $g * $maskArray[$i][$j]; + } + } + } + + self::assertEqualsWithDelta($expectedGrad, $gradArray, 1e-6); + } + + #[Test] + #[TestDox('Inference pass leaves inputs unchanged')] + #[DataProvider('inferProvider')] + public function testInfer(array $expected) : void + { + $this->layer->initialize($this->fanIn); + + $infer = $this->layer->infer($this->input); + + self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Method initialize() returns fan out equal to fan in')] + public function testInitializeReturnsFanOut() : void + { + $fanOut = $this->layer->initialize($this->fanIn); + + self::assertSame($this->fanIn, $fanOut); + } + + #[Test] + #[TestDox('Method width() returns the initialized width')] + public function testWidthAfterInitialize() : void + { + $this->layer->initialize($this->fanIn); + + self::assertSame($this->fanIn, $this->layer->width()); + } + + #[Test] + #[TestDox('Method gradient() multiplies previous gradient by the dropout mask')] + public function testGradient() : void + { + // Deterministic previous gradient (same shape as input) + $prevGradNd = NumPower::array([ + [0.25, 0.7, 0.1], + [0.50, 0.2, 0.01], + [0.25, 0.1, 0.89], + ]); + + // Same deterministic mask as used in testForward/testBack $mask = NumPower::array([ [2.0, 2.0, 2.0], [2.0, 0.0, 2.0], [2.0, 2.0, 0.0], ]); - // Forward pass to set internal mask cache - $this->layer->forward($this->input, $mask); + $prevGradient = new Deferred(fn: static function () use ($prevGradNd) : NDArray { + return $prevGradNd; + }); - $gradient = $this->layer->back( - prevGradient: $this->prevGrad, - optimizer: $this->optimizer - )->compute(); + $gradient = $this->layer->gradient($prevGradient, $mask); $expected = [ [0.5, 1.4, 0.2], @@ -120,24 +263,15 @@ public function testBack() : void [0.5, 0.2, 0.0], ]; - self::assertInstanceOf(NDArray::class, $gradient); self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); } #[Test] - #[TestDox('Inference pass leaves inputs unchanged')] - public function testInfer() : void + #[TestDox('It returns correct string representation')] + public function testToString() : void { - $this->layer->initialize($this->fanIn); - - $expected = [ - [1.0, 2.5, -0.1], - [0.1, 0.0, 3.0], - [0.002, -6.0, -0.5], - ]; + $expected = 'Dropout (ratio: 0.5)'; - $infer = $this->layer->infer($this->input); - - self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); + self::assertSame($expected, (string) $this->layer); } } From 5ad0ed2da331f89ceb15cc92dd21ea3509653ef5 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Thu, 11 Dec 2025 19:41:29 +0200 Subject: [PATCH 37/42] ML-392 Added `Multiclass` output layer with complete interface, forward/inference/backward passes, unit tests --- src/NeuralNet/Layers/Dropout/Dropout.php | 1 + .../Layers/Multiclass/Multiclass.php | 229 ++++++++++++++++++ .../Layers/Multiclass/MulticlassTest.php | 217 +++++++++++++++++ 3 files changed, 447 insertions(+) create mode 100644 src/NeuralNet/Layers/Multiclass/Multiclass.php create mode 100644 tests/NeuralNet/Layers/Multiclass/MulticlassTest.php diff --git a/src/NeuralNet/Layers/Dropout/Dropout.php b/src/NeuralNet/Layers/Dropout/Dropout.php index ce2409f7d..45d88e57a 100644 --- a/src/NeuralNet/Layers/Dropout/Dropout.php +++ b/src/NeuralNet/Layers/Dropout/Dropout.php @@ -25,6 +25,7 @@ * @category Machine Learning * @package Rubix/ML * @author Andrew DalPino + * @author Samuel Akopyan */ class Dropout implements Hidden { diff --git a/src/NeuralNet/Layers/Multiclass/Multiclass.php b/src/NeuralNet/Layers/Multiclass/Multiclass.php new file mode 100644 index 000000000..b6e33a5ac --- /dev/null +++ b/src/NeuralNet/Layers/Multiclass/Multiclass.php @@ -0,0 +1,229 @@ + + */ +class Multiclass implements Output +{ + /** + * The unique class labels. + * + * @var string[] + */ + protected array $classes = [ + // + ]; + + /** + * The function that computes the loss of erroneous activations. + * + * @var ClassificationLoss + */ + protected ClassificationLoss $costFn; + + /** + * The softmax activation function. + * + * @var Softmax + */ + protected Softmax $softmax; + + /** + * The memorized input matrix. + * + * @var NDArray|null + */ + protected ?NDArray $input = null; + + /** + * The memorized activation matrix. + * + * @var NDArray|null + */ + protected ?NDArray $output = null; + + /** + * @param string[] $classes + * @param ClassificationLoss|null $costFn + * @throws InvalidArgumentException + */ + public function __construct(array $classes, ?ClassificationLoss $costFn = null) + { + $classes = array_values(array_unique($classes)); + + if (count($classes) < 2) { + throw new InvalidArgumentException('Number of classes' + . ' must be greater than 1, ' . count($classes) + . ' given.'); + } + + $this->classes = $classes; + $this->costFn = $costFn ?? new CrossEntropy(); + $this->softmax = new Softmax(); + } + + /** + * Return the width of the layer. + * + * @return positive-int + */ + public function width() : int + { + return max(1, count($this->classes)); + } + + /** + * Initialize the layer with the fan in from the previous layer and return + * the fan out for this layer. + * + * @param positive-int $fanIn + * @throws InvalidArgumentException + * @return positive-int + */ + public function initialize(int $fanIn) : int + { + $fanOut = count($this->classes); + + if ($fanIn !== $fanOut) { + throw new InvalidArgumentException('Fan in must be' + . " equal to fan out, $fanOut expected but" + . " $fanIn given."); + } + + return $fanOut; + } + + /** + * Compute a forward pass through the layer. + * + * @param NDArray $input + * @return NDArray + */ + public function forward(NDArray $input) : NDArray + { + $output = $this->softmax->activate($input); + + $this->input = $input; + $this->output = $output; + + return $output; + } + + /** + * Compute an inferential pass through the layer. + * + * @param NDArray $input + * @throws RuntimeException + * @return NDArray + */ + public function infer(NDArray $input) : NDArray + { + return $this->softmax->activate($input); + } + + /** + * Compute the gradient and loss at the output. + * + * @param string[] $labels + * @param Optimizer $optimizer + * @throws RuntimeException + * @return array + */ + public function back(array $labels, Optimizer $optimizer) : array + { + if (!$this->input or !$this->output) { + throw new RuntimeException('Must perform forward pass' + . ' before backpropagating.'); + } + + $expected = []; + + foreach ($labels as $label) { + $dist = []; + + foreach ($this->classes as $class) { + $dist[] = $class == $label ? 1.0 : 0.0; + } + + $expected[] = $dist; + } + + $expected = NumPower::array($expected); + + $input = $this->input; + $output = $this->output; + + $gradient = new Deferred([$this, 'gradient'], [$input, $output, $expected]); + + $loss = $this->costFn->compute($output, $expected); + + $this->input = $this->output = null; + + return [$gradient, $loss]; + } + + /** + * Calculate the gradient for the previous layer. + * + * @param NDArray $input + * @param NDArray $output + * @param NDArray $expected + * @return NDArray + */ + public function gradient(NDArray $input, NDArray $output, NDArray $expected) : NDArray + { + $n = array_product($output->shape()); + + if ($this->costFn instanceof CrossEntropy) { + return NumPower::divide( + NumPower::subtract($output, $expected), + $n + ); + } + + $dLoss = NumPower::divide( + $this->costFn->differentiate($output, $expected), + $n + ); + + return NumPower::multiply( + $this->softmax->differentiate($output), + $dLoss + ); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Multiclass (cost function: {$this->costFn})"; + } +} diff --git a/tests/NeuralNet/Layers/Multiclass/MulticlassTest.php b/tests/NeuralNet/Layers/Multiclass/MulticlassTest.php new file mode 100644 index 000000000..a920a4272 --- /dev/null +++ b/tests/NeuralNet/Layers/Multiclass/MulticlassTest.php @@ -0,0 +1,217 @@ + + */ + public static function initializeProvider() : array + { + return [ + 'fanInEqualsClasses' => [3], + ]; + } + + /** + * @return array>}> + */ + public static function forwardProvider() : array + { + return [ + 'expectedForward' => [[ + [0.1719820, 0.7707700, 0.0572478], + [0.0498033, 0.0450639, 0.9051327], + [0.6219707, 0.0015385, 0.3764905], + ]], + ]; + } + + /** + * @return array>}> + */ + public static function backProvider() : array + { + return [ + 'expectedGradient' => [[ + [-0.0920019, 0.0856411, 0.0063608], + [0.0055337, -0.1061040, 0.1005703], + [0.0691078, 0.00017093, -0.0692788], + ]], + ]; + } + + /** + * @return array>}> + */ + public static function inferProvider() : array + { + // Same expectations as forward + return self::forwardProvider(); + } + + protected function setUp() : void + { + $this->input = NumPower::array([ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ]); + + $this->labels = ['hot', 'cold', 'ice cold']; + + $this->optimizer = new Stochastic(0.001); + + $this->layer = new Multiclass( + classes: ['hot', 'cold', 'ice cold'], + costFn: new CrossEntropy() + ); + } + + #[Test] + #[TestDox('Constructor rejects invalid number of classes')] + public function testConstructorRejectsInvalidClasses() : void + { + $this->expectException(InvalidArgumentException::class); + + new Multiclass(classes: ['only-one-class']); + } + + #[Test] + #[TestDox('Method width() returns number of classes')] + public function testWidthReturnsNumberOfClasses() : void + { + self::assertSame(3, $this->layer->width()); + } + + #[Test] + #[TestDox('Initializes and returns correct fan out')] + #[DataProvider('initializeProvider')] + public function testInitializeReturnsFanOut(int $fanIn) : void + { + $fanOut = $this->layer->initialize($fanIn); + + self::assertSame($fanIn, $fanOut); + self::assertSame(3, $this->layer->width()); + } + + #[Test] + #[TestDox('Computes forward softmax probabilities')] + #[DataProvider('forwardProvider')] + public function testForward(array $expected) : void + { + $this->layer->initialize(3); + + self::assertEquals(3, $this->layer->width()); + + $forward = $this->layer->forward($this->input); + + self::assertEqualsWithDelta($expected, $forward->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Backpropagates and returns output gradient')] + #[DataProvider('backProvider')] + public function testBack(array $expected) : void + { + $this->layer->initialize(3); + + // Set internal caches + $this->layer->forward($this->input); + + [$computation, $loss] = $this->layer->back( + labels: $this->labels, + optimizer: $this->optimizer + ); + + self::assertInstanceOf(Deferred::class, $computation); + self::assertIsFloat($loss); + + $gradient = $computation->compute(); + + self::assertInstanceOf(NDArray::class, $gradient); + self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes gradient for previous layer directly')] + #[DataProvider('backProvider')] + public function testGradient(array $expectedGradient) : void + { + $this->layer->initialize(3); + + // Forward pass to obtain output probabilities + $output = $this->layer->forward($this->input); + + // Rebuild expected one-hot matrix the same way as Multiclass::back() + $expected = []; + + foreach ($this->labels as $label) { + $dist = []; + + foreach (['hot', 'cold', 'ice cold'] as $class) { + $dist[] = $class === $label ? 1.0 : 0.0; + } + + $expected[] = $dist; + } + + $expectedNd = NumPower::array($expected); + + $gradient = $this->layer->gradient($this->input, $output, $expectedNd); + + self::assertEqualsWithDelta($expectedGradient, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes infer softmax probabilities')] + #[DataProvider('inferProvider')] + public function testInfer(array $expected) : void + { + $this->layer->initialize(3); + + $infer = $this->layer->infer($this->input); + + self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); + } + + #[Test] + #[TestDox('It returns correct string representation')] + public function testToStringReturnsCorrectValue() : void + { + $expected = 'Multiclass (cost function: Cross Entropy)'; + + self::assertSame($expected, (string) $this->layer); + } +} From a6c634f1493763c529d3592a876251030e03fffe Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Fri, 19 Dec 2025 16:51:29 +0200 Subject: [PATCH 38/42] ML-392 Added `Noise` output layer with complete interface, forward/inference/backward passes, unit tests --- docs/neural-network/hidden-layers/noise.md | 6 +- src/NeuralNet/Layers/Noise/Noise.php | 157 ++++++++++++++++ tests/NeuralNet/Layers/Noise/NoiseTest.php | 208 +++++++++++++++++++++ 3 files changed, 368 insertions(+), 3 deletions(-) create mode 100644 src/NeuralNet/Layers/Noise/Noise.php create mode 100644 tests/NeuralNet/Layers/Noise/NoiseTest.php diff --git a/docs/neural-network/hidden-layers/noise.md b/docs/neural-network/hidden-layers/noise.md index 7979549af..4d29732cb 100644 --- a/docs/neural-network/hidden-layers/noise.md +++ b/docs/neural-network/hidden-layers/noise.md @@ -1,4 +1,4 @@ -[source] +[source] # Noise This layer adds random Gaussian noise to the inputs with a user-defined standard deviation. Noise added to neural network activations acts as a regularizer by indirectly adding a penalty to the weights through the cost function in the output layer. @@ -10,10 +10,10 @@ This layer adds random Gaussian noise to the inputs with a user-defined standard ## Example ```php -use Rubix\ML\NeuralNet\Layers\Noise; +use Rubix\ML\NeuralNet\Layers\Noise\Noise; $layer = new Noise(1e-3); ``` ## References -[^1]: C. Gulcehre et al. (2016). Noisy Activation Functions. \ No newline at end of file +[^1]: C. Gulcehre et al. (2016). Noisy Activation Functions. diff --git a/src/NeuralNet/Layers/Noise/Noise.php b/src/NeuralNet/Layers/Noise/Noise.php new file mode 100644 index 000000000..934265bb3 --- /dev/null +++ b/src/NeuralNet/Layers/Noise/Noise.php @@ -0,0 +1,157 @@ + + */ +class Noise implements Hidden +{ + /** + * The amount (standard deviation) of the gaussian noise to add to the inputs. + * + * @var float + */ + protected float $stdDev; + + /** + * The width of the layer. + * + * @var positive-int|null + */ + protected ?int $width = null; + + /** + * @param float $stdDev + * @throws InvalidArgumentException + */ + public function __construct(float $stdDev) + { + if ($stdDev < 0.0) { + throw new InvalidArgumentException("Standard deviation must be 0 or greater, $stdDev given."); + } + + $this->stdDev = $stdDev; + } + + /** + * Return the width of the layer. + * + * @internal + * + * @throws RuntimeException + * @return positive-int + */ + public function width() : int + { + if ($this->width === null) { + throw new RuntimeException('Layer has not been initialized.'); + } + + return $this->width; + } + + /** + * Initialize the layer with the fan in from the previous layer and return + * the fan out for this layer. + * + * @internal + * + * @param positive-int $fanIn + * @return positive-int + */ + public function initialize(int $fanIn) : int + { + $fanOut = $fanIn; + + $this->width = $fanOut; + + return $fanOut; + } + + /** + * Compute a forward pass through the layer. + * + * @internal + * + * @param NDArray $input + * @return NDArray + */ + public function forward(NDArray $input) : NDArray + { + if ($this->width === null) { + throw new RuntimeException('Layer has not been initialized.'); + } + + if ($this->stdDev === 0.0) { + return $input; + } + + $shape = $input->shape(); + + // Gaussian noise with mean 0 and standard deviation $this->stdDev + $noise = NumPower::normal(size: $shape, loc: 0.0, scale: $this->stdDev); + + return NumPower::add($input, $noise); + } + + /** + * Compute an inferential pass through the layer. + * + * @internal + * + * @param NDArray $input + * @return NDArray + */ + public function infer(NDArray $input) : NDArray + { + return $input; + } + + /** + * Calculate the gradients of the layer and update the parameters. + * + * @internal + * + * @param Deferred $prevGradient + * @param Optimizer $optimizer + * @return Deferred + */ + public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred + { + return $prevGradient; + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Noise (std dev: {$this->stdDev})"; + } +} diff --git a/tests/NeuralNet/Layers/Noise/NoiseTest.php b/tests/NeuralNet/Layers/Noise/NoiseTest.php new file mode 100644 index 000000000..4eaf11770 --- /dev/null +++ b/tests/NeuralNet/Layers/Noise/NoiseTest.php @@ -0,0 +1,208 @@ +fanIn = 3; + + $this->input = NumPower::array([ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ]); + + $this->prevGrad = new Deferred(fn: function () : NDArray { + return NumPower::array([ + [0.25, 0.7, 0.1], + [0.50, 0.2, 0.01], + [0.25, 0.1, 0.89], + ]); + }); + + $this->optimizer = new Stochastic(0.001); + + $this->layer = new Noise(0.1); + } + + /** + * @return array>}> + */ + public static function backProvider() : array + { + return [ + [ + [ + [0.25, 0.7, 0.1], + [0.5, 0.2, 0.01], + [0.25, 0.1, 0.89], + ], + ], + ]; + } + + /** + * @return array>}> + */ + public static function inferProvider() : array + { + return [ + [ + [ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ], + ], + ]; + } + + #[Test] + #[TestDox('Can be cast to a string')] + public function testToString() : void + { + self::assertEquals('Noise (std dev: 0.1)', (string) $this->layer); + } + + #[Test] + #[TestDox('Constructor rejects invalid standard deviation')] + public function testConstructorRejectsInvalidStdDev() : void + { + $this->expectException(InvalidArgumentException::class); + + // Negative std dev should be rejected + new Noise(-0.1); + } + + #[Test] + #[TestDox('Forward throws if layer is not initialized')] + public function testForwardThrowsIfNotInitialized() : void + { + $layer = new Noise(0.1); + + $this->expectException(RuntimeException::class); + + $layer->forward($this->input); + } + + #[Test] + #[TestDox('Initializes width equal to fan-in')] + public function testInitializeSetsWidth() : void + { + $this->layer->initialize($this->fanIn); + + self::assertEquals($this->fanIn, $this->layer->width()); + } + + #[Test] + #[TestDox('Computes forward pass that adds Gaussian noise with correct shape and scale')] + public function testForwardAddsNoiseWithCorrectProperties() : void + { + $this->layer->initialize($this->fanIn); + + $forward = $this->layer->forward($this->input); + + self::assertInstanceOf(NDArray::class, $forward); + + $inputArray = $this->input->toArray(); + $forwardArray = $forward->toArray(); + + // 1) Shape is preserved + self::assertSameSize($inputArray, $forwardArray); + + // 2) At least one element differs (very high probability) + $allEqual = true; + foreach ($inputArray as $i => $row) { + if ($row !== $forwardArray[$i]) { + $allEqual = false; + break; + } + } + self::assertFalse($allEqual, 'Expected forward output to differ from input due to noise.'); + + // 3) Empirical std dev of (forward - input) is ~ stdDev, within tolerance + $diffs = []; + foreach ($inputArray as $i => $row) { + foreach ($row as $j => $v) { + $diffs[] = $forwardArray[$i][$j] - $v; + } + } + + $n = count($diffs); + $mean = array_sum($diffs) / $n; + + $var = 0.0; + foreach ($diffs as $d) { + $var += ($d - $mean) * ($d - $mean); + } + $var /= $n; + $std = sqrt($var); + + // Mean of noise should be near 0, std near $this->stdDev + self::assertEqualsWithDelta(0.0, $mean, 2e-1); // +/-0.2 around 0 + self::assertEqualsWithDelta(0.1, $std, 1e-1); // +/-0.1 around 0.1 + } + + #[Test] + #[TestDox('Backpropagates and returns previous gradient unchanged')] + #[DataProvider('backProvider')] + public function testBackReturnsPrevGradient(array $expected) : void + { + $this->layer->initialize($this->fanIn); + $this->layer->forward($this->input); + + $gradient = $this->layer->back( + prevGradient: $this->prevGrad, + optimizer: $this->optimizer + )->compute(); + + self::assertInstanceOf(NDArray::class, $gradient); + self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Infer returns input unchanged')] + #[DataProvider('inferProvider')] + public function testInferIdentity(array $expected) : void + { + $this->layer->initialize($this->fanIn); + + $infer = $this->layer->infer($this->input); + + self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); + } +} From 7013a569df2ce38573027d626d39cbf0b11c095d Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Fri, 19 Dec 2025 17:58:38 +0200 Subject: [PATCH 39/42] ML-392 Added `Placeholder1D` output layer with complete interface, forward/inference/backward passes, unit tests --- .../hidden-layers/placeholder1d.md | 17 +++ src/NeuralNet/Layers/Base/Contracts/Input.php | 18 +++ .../Layers/Placeholder1D/Placeholder1D.php | 108 +++++++++++++++++ .../Placeholder1D/Placeholder1DTest.php | 114 ++++++++++++++++++ 4 files changed, 257 insertions(+) create mode 100644 docs/neural-network/hidden-layers/placeholder1d.md create mode 100644 src/NeuralNet/Layers/Base/Contracts/Input.php create mode 100644 src/NeuralNet/Layers/Placeholder1D/Placeholder1D.php create mode 100644 tests/NeuralNet/Layers/Placeholder1D/Placeholder1DTest.php diff --git a/docs/neural-network/hidden-layers/placeholder1d.md b/docs/neural-network/hidden-layers/placeholder1d.md new file mode 100644 index 000000000..f70575eee --- /dev/null +++ b/docs/neural-network/hidden-layers/placeholder1d.md @@ -0,0 +1,17 @@ +[source] + +# Placeholder 1D + +The Placeholder 1D input layer represents the future input values of a mini batch (matrix) of single dimensional tensors (vectors) to the neural network. It performs shape validation on the input and then forwards it unchanged to the next layer. + +## Parameters +| # | Name | Default | Type | Description | +|---|---|---|---|---| +| 1 | inputs | | int | The number of input nodes (features). | + +## Example +```php +use Rubix\ML\NeuralNet\Layers\Placeholder1D\Placeholder1D; + +$layer = new Placeholder1D(10); +``` diff --git a/src/NeuralNet/Layers/Base/Contracts/Input.php b/src/NeuralNet/Layers/Base/Contracts/Input.php new file mode 100644 index 000000000..f0d755253 --- /dev/null +++ b/src/NeuralNet/Layers/Base/Contracts/Input.php @@ -0,0 +1,18 @@ + + */ +interface Input extends Layer +{ + // +} diff --git a/src/NeuralNet/Layers/Placeholder1D/Placeholder1D.php b/src/NeuralNet/Layers/Placeholder1D/Placeholder1D.php new file mode 100644 index 000000000..45f8fc49d --- /dev/null +++ b/src/NeuralNet/Layers/Placeholder1D/Placeholder1D.php @@ -0,0 +1,108 @@ + + */ +class Placeholder1D implements Input +{ + /** + * The number of input nodes. i.e. feature inputs. + * + * @var positive-int + */ + protected int $inputs; + + /** + * @param int $inputs + * @throws InvalidArgumentException + */ + public function __construct(int $inputs) + { + if ($inputs < 1) { + throw new InvalidArgumentException("Number of input nodes must be greater than 0, $inputs given."); + } + + $this->inputs = $inputs; + } + + /** + * @return positive-int + */ + public function width() : int + { + return $this->inputs; + } + + /** + * Initialize the layer with the fan in from the previous layer and return + * the fan out for this layer. + * + * @param positive-int $fanIn + * @return positive-int + */ + public function initialize(int $fanIn) : int + { + return $this->inputs; + } + + /** + * Compute a forward pass through the layer. + * + * @param NDArray $input + * @throws InvalidArgumentException + * @return NDArray + */ + public function forward(NDArray $input) : NDArray + { + $shape = $input->shape(); + + if (empty($shape) || $shape[0] !== $this->inputs) { + $features = $shape[0] ?? 0; + + throw new InvalidArgumentException( + 'The number of features and input nodes must be equal,' + . " {$this->inputs} expected but {$features} given."); + } + + return $input; + } + + /** + * Compute an inferential pass through the layer. + * + * @param NDArray $input + * @return NDArray + */ + public function infer(NDArray $input) : NDArray + { + return $this->forward($input); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Placeholder 1D (inputs: {$this->inputs})"; + } +} diff --git a/tests/NeuralNet/Layers/Placeholder1D/Placeholder1DTest.php b/tests/NeuralNet/Layers/Placeholder1D/Placeholder1DTest.php new file mode 100644 index 000000000..7aa3168c8 --- /dev/null +++ b/tests/NeuralNet/Layers/Placeholder1D/Placeholder1DTest.php @@ -0,0 +1,114 @@ +input = NumPower::array([ + [1.0, 2.5], + [0.1, 0.0], + [0.002, -6.0], + ]); + + $this->layer = new Placeholder1D(3); + } + + /** + * @return array>}> + */ + public static function inputProvider() : array + { + return [ + [ + NumPower::array([ + [1.0, 2.5], + [0.1, 0.0], + [0.002, -6.0], + ]), + [ + [1.0, 2.5], + [0.1, 0.0], + [0.002, -6.0], + ], + ], + ]; + } + + #[Test] + #[TestDox('Can be cast to a string')] + public function testToString() : void + { + self::assertEquals('Placeholder 1D (inputs: 3)', (string) $this->layer); + } + + #[Test] + #[TestDox('Returns width equal to number of inputs')] + public function testWidth() : void + { + self::assertEquals(3, $this->layer->width()); + } + + #[Test] + #[TestDox('Constructor rejects invalid number of inputs')] + public function testConstructorRejectsInvalidInputs() : void + { + $this->expectException(InvalidArgumentException::class); + + new Placeholder1D(0); + } + + #[Test] + #[TestDox('Initialize returns fan out equal to inputs without changing width')] + public function testInitialize() : void + { + $fanOut = $this->layer->initialize(5); + + self::assertEquals(3, $fanOut); + self::assertEquals(3, $this->layer->width()); + } + + #[Test] + #[TestDox('Computes forward pass')] + #[DataProvider('inputProvider')] + public function testForward(NDArray $input, array $expected) : void + { + self::assertEquals(3, $this->layer->width()); + + $forward = $this->layer->forward($input); + + self::assertEqualsWithDelta($expected, $forward->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes inference pass')] + #[DataProvider('inputProvider')] + public function testInfer(NDArray $input, array $expected) : void + { + self::assertEquals(3, $this->layer->width()); + + $infer = $this->layer->infer($input); + + self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); + } +} From 08ad6b4e3b0faba8e697453cfe64c18457ed6053 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Fri, 19 Dec 2025 18:12:45 +0200 Subject: [PATCH 40/42] ML-392 Fixed wrong exception for AssertsShapes and exception texts --- src/NeuralNet/Initializers/He/HeNormal.php | 2 +- src/NeuralNet/Initializers/LeCun/LeCunNormal.php | 2 +- src/NeuralNet/Initializers/Normal/Normal.php | 2 +- src/NeuralNet/Initializers/Normal/TruncatedNormal.php | 2 +- src/NeuralNet/Initializers/Xavier/XavierNormal.php | 2 +- src/Traits/AssertsShapes.php | 4 ++-- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/NeuralNet/Initializers/He/HeNormal.php b/src/NeuralNet/Initializers/He/HeNormal.php index 3d68844e4..193c7ff16 100644 --- a/src/NeuralNet/Initializers/He/HeNormal.php +++ b/src/NeuralNet/Initializers/He/HeNormal.php @@ -35,7 +35,7 @@ public function initialize(int $fanIn, int $fanOut) : NDArray $stdDev = sqrt(2 / $fanOut); - return NumPower::truncatedNormal(size: [$fanOut, $fanIn], scale: $stdDev); + return NumPower::truncatedNormal(size: [$fanOut, $fanIn], loc: 0.0, scale: $stdDev); } /** diff --git a/src/NeuralNet/Initializers/LeCun/LeCunNormal.php b/src/NeuralNet/Initializers/LeCun/LeCunNormal.php index 81d8add56..3fc5832bc 100644 --- a/src/NeuralNet/Initializers/LeCun/LeCunNormal.php +++ b/src/NeuralNet/Initializers/LeCun/LeCunNormal.php @@ -36,7 +36,7 @@ public function initialize(int $fanIn, int $fanOut) : NDArray $stdDev = sqrt(1 / $fanOut); - return NumPower::truncatedNormal(size: [$fanOut, $fanIn], scale: $stdDev); + return NumPower::truncatedNormal(size: [$fanOut, $fanIn], loc: 0.0, scale: $stdDev); } /** diff --git a/src/NeuralNet/Initializers/Normal/Normal.php b/src/NeuralNet/Initializers/Normal/Normal.php index 08c77ff38..acb4ad050 100644 --- a/src/NeuralNet/Initializers/Normal/Normal.php +++ b/src/NeuralNet/Initializers/Normal/Normal.php @@ -43,7 +43,7 @@ public function initialize(int $fanIn, int $fanOut) : NDArray { $this->validateFanInFanOut(fanIn: $fanIn, fanOut: $fanOut); - return NumPower::normal(size: [$fanOut, $fanIn], scale: $this->stdDev); + return NumPower::normal(size: [$fanOut, $fanIn], loc: 0.0, scale: $this->stdDev); } /** diff --git a/src/NeuralNet/Initializers/Normal/TruncatedNormal.php b/src/NeuralNet/Initializers/Normal/TruncatedNormal.php index c0c90196d..af9ed43fe 100644 --- a/src/NeuralNet/Initializers/Normal/TruncatedNormal.php +++ b/src/NeuralNet/Initializers/Normal/TruncatedNormal.php @@ -44,7 +44,7 @@ public function initialize(int $fanIn, int $fanOut) : NDArray { $this->validateFanInFanOut(fanIn: $fanIn, fanOut: $fanOut); - return NumPower::truncatedNormal(size: [$fanOut, $fanIn], scale: $this->stdDev); + return NumPower::truncatedNormal(size: [$fanOut, $fanIn], loc: 0.0, scale: $this->stdDev); } /** diff --git a/src/NeuralNet/Initializers/Xavier/XavierNormal.php b/src/NeuralNet/Initializers/Xavier/XavierNormal.php index dfe5bc956..428c74e49 100644 --- a/src/NeuralNet/Initializers/Xavier/XavierNormal.php +++ b/src/NeuralNet/Initializers/Xavier/XavierNormal.php @@ -36,7 +36,7 @@ public function initialize(int $fanIn, int $fanOut) : NDArray $stdDev = sqrt(2 / ($fanOut + $fanIn)); - return NumPower::truncatedNormal(size: [$fanOut, $fanIn], scale: $stdDev); + return NumPower::truncatedNormal(size: [$fanOut, $fanIn], loc: 0.0, scale: $stdDev); } /** diff --git a/src/Traits/AssertsShapes.php b/src/Traits/AssertsShapes.php index 7fabc316f..88fe23c1e 100644 --- a/src/Traits/AssertsShapes.php +++ b/src/Traits/AssertsShapes.php @@ -4,7 +4,7 @@ namespace Rubix\ML\Traits; -use InvalidArgumentException; +use Rubix\ML\Exceptions\InvalidArgumentException; use NDArray; /** @@ -29,7 +29,7 @@ trait AssertsShapes protected function assertSameShape(NDArray $output, NDArray $target) : void { if ($output->shape() !== $target->shape()) { - throw new InvalidArgumentException('Output and target must have identical shapes.'); + throw new InvalidArgumentException('Output and target must have the same shape.'); } } } From ce99147475683eac03624289ff85627ac6c6695c Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Fri, 19 Dec 2025 18:23:08 +0200 Subject: [PATCH 41/42] ML-392 Increased memory for tests --- phpunit.xml | 1 + 1 file changed, 1 insertion(+) diff --git a/phpunit.xml b/phpunit.xml index 22063bc22..379cdc0a2 100644 --- a/phpunit.xml +++ b/phpunit.xml @@ -83,5 +83,6 @@ + From 7628fecbe13408f2914163288fc8051a96e79ea8 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Mon, 22 Dec 2025 20:14:49 +0200 Subject: [PATCH 42/42] ML-392 Added `PReLU` output layer with complete interface and unit tests --- docs/neural-network/hidden-layers/prelu.md | 6 +- src/NeuralNet/Layers/PReLU/PReLU.php | 287 ++++++++++++++++++++ tests/NeuralNet/Layers/PReLU/PReLUTest.php | 291 +++++++++++++++++++++ 3 files changed, 581 insertions(+), 3 deletions(-) create mode 100644 src/NeuralNet/Layers/PReLU/PReLU.php create mode 100644 tests/NeuralNet/Layers/PReLU/PReLUTest.php diff --git a/docs/neural-network/hidden-layers/prelu.md b/docs/neural-network/hidden-layers/prelu.md index baaef2f32..22a5b4762 100644 --- a/docs/neural-network/hidden-layers/prelu.md +++ b/docs/neural-network/hidden-layers/prelu.md @@ -1,4 +1,4 @@ -[source] +[source] # PReLU Parametric Rectified Linear Units are leaky rectifiers whose *leakage* coefficient is learned during training. Unlike standard [Leaky ReLUs](../activation-functions/leaky-relu.md) whose leakage remains constant, PReLU layers can adjust the leakage to better suite the model on a per node basis. @@ -14,8 +14,8 @@ $$ ## Example ```php -use Rubix\ML\NeuralNet\Layers\PReLU; -use Rubix\ML\NeuralNet\Initializers\Normal; +use Rubix\ML\NeuralNet\Layers\PReLU\PReLU; +use Rubix\ML\NeuralNet\Initializers\Normal\Normal; $layer = new PReLU(new Normal(0.5)); ``` diff --git a/src/NeuralNet/Layers/PReLU/PReLU.php b/src/NeuralNet/Layers/PReLU/PReLU.php new file mode 100644 index 000000000..a8986cce4 --- /dev/null +++ b/src/NeuralNet/Layers/PReLU/PReLU.php @@ -0,0 +1,287 @@ + + */ +class PReLU implements Hidden, Parametric +{ + /** + * The initializer of the alpha (leakage) parameter. + * + * @var Initializer + */ + protected Initializer $initializer; + + /** + * The width of the layer. + * + * @var positive-int|null + */ + protected ?int $width = null; + + /** + * The parameterized leakage coefficients. + * + * @var Parameter|null + */ + protected ?Parameter $alpha = null; + + /** + * The memoized input matrix. + * + * @var NDArray|null + */ + protected ?NDArray $input = null; + + /** + * @param Initializer|null $initializer + */ + public function __construct(?Initializer $initializer = null) + { + $this->initializer = $initializer ?? new Constant(0.25); + } + + /** + * Return the width of the layer. + * + * @internal + * + * @throws RuntimeException + * @return positive-int + */ + public function width() : int + { + if ($this->width === null) { + throw new RuntimeException('Layer has not been initialized.'); + } + + return $this->width; + } + + /** + * Initialize the layer with the fan in from the previous layer and return + * the fan out for this layer. + * + * @internal + * + * @param positive-int $fanIn + * @return positive-int + */ + public function initialize(int $fanIn) : int + { + $fanOut = $fanIn; + + // Initialize alpha as a vector of length fanOut (one alpha per neuron) + // Using shape [fanOut, 1] then flattening to [fanOut] + $alphaMat = $this->initializer->initialize(1, $fanOut); + $alpha = NumPower::flatten($alphaMat); + + $this->width = $fanOut; + $this->alpha = new Parameter($alpha); + + return $fanOut; + } + + /** + * Compute a forward pass through the layer. + * + * @internal + * + * @param NDArray $input + * @return NDArray + */ + public function forward(NDArray $input) : NDArray + { + $this->input = $input; + + return $this->activate($input); + } + + /** + * Compute an inferential pass through the layer. + * + * @internal + * + * @param NDArray $input + * @return NDArray + */ + public function infer(NDArray $input) : NDArray + { + return $this->activate($input); + } + + /** + * Calculate the gradient and update the parameters of the layer. + * + * @internal + * + * @param Deferred $prevGradient + * @param Optimizer $optimizer + * @throws RuntimeException + * @return Deferred + */ + public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred + { + if (!$this->alpha) { + throw new RuntimeException('Layer has not been initialized.'); + } + + if (!$this->input) { + throw new RuntimeException('Must perform forward pass before backpropagating.'); + } + + /** @var NDArray $dOut */ + $dOut = $prevGradient(); + + // Negative part of the input (values <= 0), used for dL/dalpha + $negativeInput = NumPower::minimum($this->input, 0.0); + + $dAlphaFull = NumPower::multiply($dOut, $negativeInput); + + // Sum over the batch axis (axis = 1) to obtain a gradient vector [width] + $dAlpha = NumPower::sum($dAlphaFull, axis: 1); + + $this->alpha->update($dAlpha, $optimizer); + + $input = $this->input; + + $this->input = null; + + return new Deferred([$this, 'gradient'], [$input, $dOut]); + } + + /** + * Calculate the gradient for the previous layer. + * + * @internal + * + * @param NDArray $input + * @param NDArray $dOut + * @return NDArray + */ + public function gradient(NDArray $input, NDArray $dOut) : NDArray + { + $derivative = $this->differentiate($input); + + return NumPower::multiply($derivative, $dOut); + } + + /** + * Return the parameters of the layer. + * + * @internal + * + * @throws \RuntimeException + * @return Generator + */ + public function parameters() : Generator + { + if (!$this->alpha) { + throw new RuntimeException('Layer has not been initialized.'); + } + + yield 'alpha' => $this->alpha; + } + + /** + * Restore the parameters in the layer from an associative array. + * + * @internal + * + * @param Parameter[] $parameters + */ + public function restore(array $parameters) : void + { + $this->alpha = $parameters['alpha']; + } + + /** + * Compute the leaky ReLU activation function and return a matrix. + * + * @param NDArray $input + * @throws RuntimeException + * @return NDArray + */ + protected function activate(NDArray $input) : NDArray + { + if (!$this->alpha) { + throw new RuntimeException('Layer has not been initialized.'); + } + + // Reshape alpha vector [width] to column [width, 1] for broadcasting + $alphaCol = NumPower::reshape($this->alpha->param(), [$this->width(), 1]); + + $positiveActivation = NumPower::maximum($input, 0.0); + + $negativeActivation = NumPower::multiply( + NumPower::minimum($input, 0.0), + $alphaCol, + ); + + return NumPower::add($positiveActivation, $negativeActivation); + } + + /** + * Calculate the derivative of the activation function at a given output. + * + * @param NDArray $input + * @throws RuntimeException + * @return NDArray + */ + protected function differentiate(NDArray $input) : NDArray + { + if (!$this->alpha) { + throw new RuntimeException('Layer has not been initialized.'); + } + + // Reshape alpha vector [width] to column [width, 1] for broadcasting + $alphaCol = NumPower::reshape($this->alpha->param(), [$this->width(), 1]); + + $positivePart = NumPower::greater($input, 0.0); + + $negativePart = NumPower::multiply( + NumPower::lessEqual($input, 0.0), + $alphaCol, + ); + + return NumPower::add($positivePart, $negativePart); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "PReLU (initializer: {$this->initializer})"; + } +} diff --git a/tests/NeuralNet/Layers/PReLU/PReLUTest.php b/tests/NeuralNet/Layers/PReLU/PReLUTest.php new file mode 100644 index 000000000..a1193ea09 --- /dev/null +++ b/tests/NeuralNet/Layers/PReLU/PReLUTest.php @@ -0,0 +1,291 @@ +fanIn = 3; + + $this->input = NumPower::array([ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ]); + + $this->prevGrad = new Deferred(fn: function () : NDArray { + return NumPower::array([ + [0.25, 0.7, 0.1], + [0.50, 0.2, 0.01], + [0.25, 0.1, 0.89], + ]); + }); + + $this->optimizer = new Stochastic(0.001); + + $this->layer = new PReLU(new Constant(0.25)); + + srand(self::RANDOM_SEED); + } + + /** + * @return array + */ + public static function initializeProvider() : array + { + return [ + 'fanIn=3' => [3], + ]; + } + + /** + * @return array + */ + public static function forwardProvider() : array + { + return [ + 'expectedForward' => [[ + [1.0, 2.5, -0.025], + [0.1, 0.0, 3.0], + [0.002, -1.5, -0.125], + ]], + ]; + } + + /** + * @return array + */ + public static function backProvider() : array + { + return [ + 'expectedGradient' => [[ + [0.25, 0.6999999, 0.0250010], + [0.5, 0.05, 0.01], + [0.25, 0.0251045, 0.2234300], + ]], + ]; + } + + /** + * @return array + */ + public static function gradientProvider() : array + { + return [ + 'expectedGradient' => [[ + [0.25, 0.7, 0.025], + [0.5, 0.05, 0.01], + [0.25, 0.025, 0.2225], + ]], + ]; + } + + /** + * @return array + */ + public static function inferProvider() : array + { + return [ + 'expectedInfer' => [[ + [1.0, 2.5, -0.0250000], + [0.1, 0.0, 3.0], + [0.0020000, -1.5, -0.125], + ]], + ]; + } + + /** + * @return array + */ + public static function activateProvider() : array + { + return [ + 'defaultInput' => [ + [ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ], + [ + [1.0, 2.5, -0.025], + [0.1, 0.0, 3.0], + [0.002, -1.5, -0.125], + ], + ], + ]; + } + + /** + * @return array + */ + public static function differentiateProvider() : array + { + return [ + 'defaultInput' => [ + [ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ], + [ + [1.0, 1.0, 0.25], + [1.0, 0.25, 1.0], + [1.0, 0.25, 0.25], + ], + ], + ]; + } + + #[Test] + #[TestDox('Can be cast to a string')] + public function testToString() : void + { + self::assertEquals('PReLU (initializer: Constant (value: 0.25))', (string) $this->layer); + } + + #[Test] + #[TestDox('Initializes width equal to fan-in')] + public function testInitializeSetsWidth() : void + { + $this->layer->initialize($this->fanIn); + + self::assertEquals($this->fanIn, $this->layer->width()); + } + + #[Test] + #[TestDox('Initializes and returns fan out equal to fan-in')] + #[DataProvider('initializeProvider')] + public function testInitializeReturnsFanOut(int $fanIn) : void + { + $fanOut = $this->layer->initialize($fanIn); + + self::assertEquals($fanIn, $fanOut); + self::assertEquals($fanIn, $this->layer->width()); + } + + #[Test] + #[TestDox('Computes forward activations')] + #[DataProvider('forwardProvider')] + public function testForward(array $expected) : void + { + $this->layer->initialize($this->fanIn); + + $forward = $this->layer->forward($this->input); + + self::assertEqualsWithDelta($expected, $forward->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Backpropagates and returns gradient for previous layer')] + #[DataProvider('backProvider')] + public function testBack(array $expected) : void + { + $this->layer->initialize($this->fanIn); + + // Forward pass to set internal input state + $this->layer->forward($this->input); + + $gradient = $this->layer->back( + prevGradient: $this->prevGrad, + optimizer: $this->optimizer + )->compute(); + + self::assertInstanceOf(NDArray::class, $gradient); + self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes gradient for previous layer directly')] + #[DataProvider('gradientProvider')] + public function testGradient(array $expected) : void + { + $this->layer->initialize($this->fanIn); + + $gradient = $this->layer->gradient( + $this->input, + ($this->prevGrad)(), + ); + + self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes inference activations')] + #[DataProvider('inferProvider')] + public function testInfer(array $expected) : void + { + $this->layer->initialize($this->fanIn); + + $infer = $this->layer->infer($this->input); + + self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Yields trainable alpha parameter')] + public function testParameters() : void + { + $this->layer->initialize($this->fanIn); + + $params = iterator_to_array($this->layer->parameters()); + + self::assertArrayHasKey('alpha', $params); + self::assertInstanceOf(TrainableParameter::class, $params['alpha']); + } + + #[Test] + #[TestDox('Restores alpha parameter from array')] + public function testRestore() : void + { + $this->layer->initialize($this->fanIn); + + $alphaNew = new TrainableParameter(NumPower::full([$this->fanIn], 0.5)); + + $this->layer->restore([ + 'alpha' => $alphaNew, + ]); + + $restored = iterator_to_array($this->layer->parameters()); + + self::assertSame($alphaNew, $restored['alpha']); + self::assertEquals( + array_fill(0, $this->fanIn, 0.5), + $restored['alpha']->param()->toArray(), + ); + } +}