From d52d7a26790cd4ff70934953b5be552e97ee2294 Mon Sep 17 00:00:00 2001 From: Enayat Ullah Date: Fri, 2 Aug 2024 16:01:42 -0700 Subject: [PATCH] Fix DistributedDP Optimizer for Fast Gradient Clipping (#662) Summary: Pull Request resolved: https://github.com/pytorch/opacus/pull/662 The step function incorrectly called "original_optimizer.original_optimizer" instead of "original_optimizer". Fixed it now. Reviewed By: HuanyuZhang Differential Revision: D60484128 --- opacus/__init__.py | 3 ++- opacus/optimizers/ddpoptimizer_fast_gradient_clipping.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/opacus/__init__.py b/opacus/__init__.py index c5f7d3f9..b2009227 100644 --- a/opacus/__init__.py +++ b/opacus/__init__.py @@ -14,7 +14,7 @@ # limitations under the License. from . import utils -from .grad_sample import GradSampleModule +from .grad_sample import GradSampleModule, GradSampleModuleFastGradientClipping from .privacy_engine import PrivacyEngine from .version import __version__ @@ -22,6 +22,7 @@ __all__ = [ "PrivacyEngine", "GradSampleModule", + "GradSampleModuleFastGradientClipping", "utils", "__version__", ] diff --git a/opacus/optimizers/ddpoptimizer_fast_gradient_clipping.py b/opacus/optimizers/ddpoptimizer_fast_gradient_clipping.py index dd2c1b94..b2245303 100644 --- a/opacus/optimizers/ddpoptimizer_fast_gradient_clipping.py +++ b/opacus/optimizers/ddpoptimizer_fast_gradient_clipping.py @@ -76,6 +76,6 @@ def step( if self.pre_step(): self.reduce_gradients() - return self.original_optimizer.original_optimizer.step() + return self.original_optimizer.step() else: return None