From 11d07a360f8f42c1d9a098f7a345167714e1ffd8 Mon Sep 17 00:00:00 2001 From: AleDinve Date: Wed, 10 Dec 2025 15:24:32 +0100 Subject: [PATCH] correction of trained-weights dependent weighting --- pina/loss/ntk_weighting.py | 2 +- pina/loss/self_adaptive_weighting.py | 2 +- .../test_self_adaptive_weighting.py | 24 +++++++++++++++++++ 3 files changed, 26 insertions(+), 2 deletions(-) diff --git a/pina/loss/ntk_weighting.py b/pina/loss/ntk_weighting.py index fe671157a..3a29e2afd 100644 --- a/pina/loss/ntk_weighting.py +++ b/pina/loss/ntk_weighting.py @@ -56,7 +56,7 @@ def weights_update(self, losses): for condition, loss in losses.items(): loss.backward(retain_graph=True) grads = torch.cat( - [p.grad.flatten() for p in self.solver.model.parameters()] + [p.grad.flatten() for p in self.solver.model.parameters() if p.requires_grad] ) losses_norm[condition] = grads.norm() diff --git a/pina/loss/self_adaptive_weighting.py b/pina/loss/self_adaptive_weighting.py index 62196c529..cca65efca 100644 --- a/pina/loss/self_adaptive_weighting.py +++ b/pina/loss/self_adaptive_weighting.py @@ -46,7 +46,7 @@ def weights_update(self, losses): for condition, loss in losses.items(): loss.backward(retain_graph=True) grads = torch.cat( - [p.grad.flatten() for p in self.solver.model.parameters()] + [p.grad.flatten() for p in self.solver.model.parameters() if p.requires_grad] ) losses_norm[condition] = grads.norm() diff --git a/tests/test_weighting/test_self_adaptive_weighting.py b/tests/test_weighting/test_self_adaptive_weighting.py index 066e8855e..af9818a60 100644 --- a/tests/test_weighting/test_self_adaptive_weighting.py +++ b/tests/test_weighting/test_self_adaptive_weighting.py @@ -1,4 +1,5 @@ import pytest +import torch from pina import Trainer from pina.solver import PINN from pina.model import FeedForward @@ -37,3 +38,26 @@ def test_train_aggregation(update_every_n_epochs): solver = PINN(problem=problem, model=model, weighting=weighting) trainer = Trainer(solver=solver, max_epochs=5, accelerator="cpu") trainer.train() + +class Net_biased(torch.nn.Module): + def __init__(self, input_dim, output_dim, num_layers=2): + super().__init__() + self.mlp = FeedForward( + input_dimensions=input_dim, + output_dimensions=output_dim, + layers=[10 for _ in range(num_layers)] + ) + self.bias = torch.nn.Parameter(torch.zeros(1)) + + def forward(self, x): + return self.mlp(x) + +@pytest.mark.parametrize("update_every_n_epochs", [1, 3]) +def test_train_aggregation_freezed_weights(update_every_n_epochs): + model = Net_biased(len(problem.input_variables), len(problem.output_variables)) + weighting = SelfAdaptiveWeighting( + update_every_n_epochs=update_every_n_epochs + ) + solver = PINN(problem=problem, model=model, weighting=weighting) + trainer = Trainer(solver=solver, max_epochs=5, accelerator="cpu") + trainer.train() \ No newline at end of file