diff options
author | Alby M <1473644+Linux-cpp-lisp@users.noreply.github.com> | 2021-04-21 07:06:20 -0600 |
---|---|---|
committer | GitHub <noreply@github.com> | 2021-04-21 15:06:20 +0200 |
commit | a8223abaad1da1293f350d80b636a8d67b2d58a5 (patch) | |
tree | e06fa79877aac117cbf685d4862f567a75cc8d6c /tests/test_ema.py | |
parent | 3015941b5c61b686161701887a8618f5f77044bb (diff) |
State dict support (#6)
Diffstat (limited to 'tests/test_ema.py')
-rw-r--r-- | tests/test_ema.py | 46 |
1 files changed, 45 insertions, 1 deletions
diff --git a/tests/test_ema.py b/tests/test_ema.py index ad6ee37..67a14dc 100644 --- a/tests/test_ema.py +++ b/tests/test_ema.py @@ -1,5 +1,7 @@ import pytest +import copy + import torch from torch_ema import ExponentialMovingAverage @@ -40,6 +42,7 @@ def test_val_error(decay, use_num_updates, explicit_params): model.eval() logits = model(x_val) loss_orig = torch.nn.functional.cross_entropy(logits, y_val) + print(f"Original loss: {loss_orig}") # Validation: with EMA # First save original parameters before replacing with EMA version @@ -55,6 +58,7 @@ def test_val_error(decay, use_num_updates, explicit_params): logits = model(x_val) loss_ema = torch.nn.functional.cross_entropy(logits, y_val) + print(f"EMA loss: {loss_ema}") assert loss_ema < loss_orig, "EMA loss wasn't lower" # Test restore @@ -131,4 +135,44 @@ def test_explicit_params(): model2.weight.fill_(1.0) ema.update(model2.parameters()) ema.copy_to() - assert not torch.all(model.weight == 0.0)
\ No newline at end of file + assert not torch.all(model.weight == 0.0) + + +@pytest.mark.parametrize("decay", [0.995]) +@pytest.mark.parametrize("use_num_updates", [True, False]) +@pytest.mark.parametrize("explicit_params", [True, False]) +def test_state_dict(decay, use_num_updates, explicit_params): + model = torch.nn.Linear(10, 2, bias=False) + with torch.no_grad(): + model.weight.fill_(0.0) + ema = ExponentialMovingAverage( + model.parameters(), + decay=decay, + use_num_updates=False + ) + state_dict = copy.deepcopy(ema.state_dict()) + + model2 = torch.nn.Linear(10, 2, bias=False) + ema2 = ExponentialMovingAverage(model2.parameters(), decay=0.0) + ema2.load_state_dict(state_dict) + assert ema2.decay == decay + assert torch.allclose(ema2.shadow_params[0], ema.shadow_params[0]) + + with torch.no_grad(): + model2.weight.fill_(1.0) + if explicit_params: + ema2.update(model2.parameters()) + else: + ema2.update() + assert torch.all(model2.weight == 1.0), "ema.update changed model weights" + + ema.load_state_dict(ema2.state_dict()) + + if explicit_params: + ema.copy_to(model.parameters()) + else: + ema.copy_to() + assert torch.allclose( + model.weight, + torch.full(size=(1,), fill_value=(1.0 - decay)) + ), "average was wrong" |