diff --git a/.github/workflows/ci_gpu.yml b/.github/workflows/ci_gpu.yml index a908656f..7be73e94 100644 --- a/.github/workflows/ci_gpu.yml +++ b/.github/workflows/ci_gpu.yml @@ -84,9 +84,9 @@ jobs: mkdir -p runs/cifar10/test-reports pip install tensorboard python examples/cifar10.py --lr 0.1 --sigma 1.5 -c 10 --batch-size 2000 --epochs 10 --data-root runs/cifar10/data --log-dir runs/cifar10/logs --device cuda - python -c "import torch; model = torch.load('model_best.pth.tar'); exit(0) if (model['best_acc1']>0.4 and model['best_acc1']<0.49) else exit(1)" + python -c "import torch; model = torch.load('model_best.pth.tar', weights_only=False); exit(0) if (model['best_acc1']>0.4 and model['best_acc1']<0.49) else exit(1)" python examples/cifar10.py --lr 0.1 --sigma 1.5 -c 10 --batch-size 2000 --epochs 10 --data-root runs/cifar10/data --log-dir runs/cifar10/logs --device cuda --grad_sample_mode no_op - python -c "import torch; model = torch.load('model_best.pth.tar'); exit(0) if (model['best_acc1']>0.4 and model['best_acc1']<0.49) else exit(1)" + python -c "import torch; model = torch.load('model_best.pth.tar', weights_only=False); exit(0) if (model['best_acc1']>0.4 and model['best_acc1']<0.49) else exit(1)" - name: Store CIFAR10 test results uses: actions/upload-artifact@v4 diff --git a/benchmarks/tests/test_utils.py b/benchmarks/tests/test_utils.py index 37649ae8..3bf9d1a1 100644 --- a/benchmarks/tests/test_utils.py +++ b/benchmarks/tests/test_utils.py @@ -194,7 +194,7 @@ def pickle_data_and_config( ], ) def test_save_results( - pickle_data_and_config: Tuple[Dict[str, Any], Dict[str, Any]] + pickle_data_and_config: Tuple[Dict[str, Any], Dict[str, Any]], ) -> None: """Tests saving benchmark results. diff --git a/opacus/grad_sample/utils.py b/opacus/grad_sample/utils.py index f4011361..9f8cbd7b 100644 --- a/opacus/grad_sample/utils.py +++ b/opacus/grad_sample/utils.py @@ -27,7 +27,7 @@ def register_grad_sampler( - target_class_or_classes: Union[Type[nn.Module], Sequence[Type[nn.Module]]] + target_class_or_classes: Union[Type[nn.Module], Sequence[Type[nn.Module]]], ): """ Registers the decorated function as the ``grad_sampler`` of ``target_class_or_classes``, which is @@ -56,7 +56,7 @@ def decorator(f): def register_norm_sampler( - target_class_or_classes: Union[Type[nn.Module], Sequence[Type[nn.Module]]] + target_class_or_classes: Union[Type[nn.Module], Sequence[Type[nn.Module]]], ): """ Registers the decorated function as the ``norm_sampler`` of ``target_class_or_classes``, which is diff --git a/opacus/privacy_engine.py b/opacus/privacy_engine.py index 558c8f8e..cacacee0 100644 --- a/opacus/privacy_engine.py +++ b/opacus/privacy_engine.py @@ -603,7 +603,7 @@ def load_checkpoint( module_load_dict_kwargs: Optional[Dict[str, Any]] = None, torch_load_kwargs: Optional[Dict[str, Any]] = None, ) -> Dict: - checkpoint = torch.load(path, **(torch_load_kwargs or {})) + checkpoint = torch.load(path, **(torch_load_kwargs or {}), weights_only=False) module.load_state_dict( checkpoint["module_state_dict"], **(module_load_dict_kwargs or {}) ) diff --git a/opacus/utils/per_sample_gradients_utils.py b/opacus/utils/per_sample_gradients_utils.py index 754d86d6..7703b77c 100644 --- a/opacus/utils/per_sample_gradients_utils.py +++ b/opacus/utils/per_sample_gradients_utils.py @@ -39,7 +39,7 @@ def clone_module(module: nn.Module) -> nn.Module: with io.BytesIO() as bytesio: torch.save(module, bytesio) bytesio.seek(0) - module_copy = torch.load(bytesio) + module_copy = torch.load(bytesio, weights_only=False) return module_copy