From 6074484b9b9e148836fc3b83cbe5df8d0bec9afb Mon Sep 17 00:00:00 2001 From: generatedunixname2443911735787003 Date: Thu, 30 Nov 2023 08:45:02 -0800 Subject: [PATCH] [AutoAccept][Codemod][Replace deprecated unittest asserts] fbcode//faim/mmf Differential Revision: D51702249 fbshipit-source-id: 90df3f5a87cdad64aa4c6f365607ee5ceff5c5dd --- tests/models/test_uniter.py | 2 +- tests/trainers/lightning/test_checkpoint.py | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/models/test_uniter.py b/tests/models/test_uniter.py index 9bfb0aa0b..96ad23aa1 100644 --- a/tests/models/test_uniter.py +++ b/tests/models/test_uniter.py @@ -34,7 +34,7 @@ def test_forward(self): output = embedding( self.img_feat, self.img_pos_feat, self.type_embeddings, img_masks=None ) - self.assertEquals(list(output.shape), [32, 100, 256]) + self.assertEqual(list(output.shape), [32, 100, 256]) class TestUNITERModelBase(unittest.TestCase): diff --git a/tests/trainers/lightning/test_checkpoint.py b/tests/trainers/lightning/test_checkpoint.py index 854d33493..ca8ea6a63 100644 --- a/tests/trainers/lightning/test_checkpoint.py +++ b/tests/trainers/lightning/test_checkpoint.py @@ -53,7 +53,7 @@ def _assert_same_dict(self, mmf, lightning, same=True): def _assert_same(self, obj1, obj2, same=True): if same: if hasattr(obj1, "mean") and obj1.dtype == torch.float: - self.assertAlmostEquals(obj1.mean().item(), obj2.mean().item(), 2) + self.assertAlmostEqual(obj1.mean().item(), obj2.mean().item(), 2) elif hasattr(obj1, "item"): self.assertEqual(obj1.item(), obj2.item()) elif type(obj1) is dict and type(obj2) is dict: @@ -328,11 +328,11 @@ def test_load_mmf_trainer_checkpoint_in_lightning(self): lightning.trainer.fit( lightning.model, train_dataloaders=lightning.train_loader ) - self.assertEquals(lightning.trainer.global_step, 6) + self.assertEqual(lightning.trainer.global_step, 6) call_args_list = mock_method.call_args_list # training will take place 0 times. Since max_steps is the same # as the checkpoint's global_step - self.assertEquals(len(call_args_list), 0) + self.assertEqual(len(call_args_list), 0) # check to make sure that the lightning trainer's model and # mmf's are the same @@ -393,7 +393,7 @@ def test_load_trainer_ckpt_number_of_steps(self): lightning.trainer.fit( lightning.model, train_dataloaders=lightning.train_loader ) - self.assertEquals(lightning.trainer.global_step, 12) + self.assertEqual(lightning.trainer.global_step, 12) call_args_list = [l[0][4] for l in mock_method.call_args_list] # in lightning 1.6.0 last batch idx from ckpt is repeated self.assertListEqual(list(range(5, 11)), call_args_list) @@ -455,7 +455,7 @@ def test_lightning_checkpoint_interval(self): # https://github.com/PyTorchLightning/pytorch-lightning/pull/6997 # also was an issue according to test_validation.py files = os.listdir(os.path.join(tmp_d, "models")) - self.assertEquals(3, len(files)) + self.assertEqual(3, len(files)) indexes = {int(x[:-5].split("=")[1]) for x in files} self.assertSetEqual({2, 4, 6}, indexes) @@ -511,8 +511,8 @@ def _load_checkpoint_and_test(self, filename, ckpt_config=None): # Make sure lightning and mmf parity self._assert_same_dict(mmf_ckpt["model"], lightning_ckpt["state_dict"]) - self.assertEquals(mmf_ckpt["current_epoch"], lightning_ckpt["epoch"] + 1) - self.assertEquals(mmf_ckpt["num_updates"], lightning_ckpt["global_step"]) + self.assertEqual(mmf_ckpt["current_epoch"], lightning_ckpt["epoch"] + 1) + self.assertEqual(mmf_ckpt["num_updates"], lightning_ckpt["global_step"]) self._assert_same_dict( mmf_ckpt["optimizer"], lightning_ckpt["optimizer_states"][0] )