{"id":24147,"library":"opacus","title":"Opacus","description":"Opacus is a library for training PyTorch models with differential privacy. It supports per-sample gradient clipping, privacy accounting (including PRVAccountant), and fast gradient clipping. Current version is 1.5.4, compatible with Python >=3.7.5 and PyTorch 1.13+. It is actively maintained by Meta.","status":"active","version":"1.5.4","language":"python","source_language":"en","source_url":"https://github.com/pytorch/opacus","tags":["differential-privacy","pytorch","privacy","machine-learning"],"install":[{"cmd":"pip install opacus","lang":"bash","label":"Default install"}],"dependencies":[{"reason":"Opacus requires PyTorch >=1.13","package":"torch","optional":false}],"imports":[{"note":"PrivacyEngine is exported from top-level opacus package","wrong":"from opacus.privacy_engine import PrivacyEngine","symbol":"PrivacyEngine","correct":"from opacus import PrivacyEngine"},{"note":"DPOptimizer is in opacus.optimizers, not top-level","wrong":"from opacus import DPOptimizer","symbol":"DPOptimizer","correct":"from opacus.optimizers import DPOptimizer"},{"note":"GradSampleModule is exported from top-level opacus","wrong":"from opacus.grad_sample import GradSampleModule","symbol":"GradSampleModule","correct":"from opacus import GradSampleModule"},{"note":"PRVAccountant is in opacus.accountants, not top-level","wrong":"from opacus import PRVAccountant","symbol":"PRVAccountant","correct":"from opacus.accountants import PRVAccountant"},{"note":"RDPAccountant is in opacus.accountants","wrong":"from opacus import RDPAccountant","symbol":"RDPAccountant","correct":"from opacus.accountants import RDPAccountant"}],"quickstart":{"code":"import torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader, TensorDataset\nfrom opacus import PrivacyEngine\n\n# Create a simple model\ntorch.manual_seed(0)\nmodel = nn.Linear(10, 2)\ndata = torch.randn(64, 10)\nlabels = torch.randint(0, 2, (64,))\ntrain_dataset = TensorDataset(data, labels)\ntrain_loader = DataLoader(train_dataset, batch_size=32)\n\n# Define optimizer and loss\noptimizer = torch.optim.SGD(model.parameters(), lr=0.01)\ncriterion = nn.CrossEntropyLoss()\n\n# Attach privacy engine\nprivacy_engine = PrivacyEngine()\nmodel, optimizer, train_loader = privacy_engine.make_private(\n    module=model,\n    optimizer=optimizer,\n    data_loader=train_loader,\n    noise_multiplier=0.5,\n    max_grad_norm=1.0,\n)\n\n# Training loop\nfor epoch in range(2):\n    for x, y in train_loader:\n        outputs = model(x)\n        loss = criterion(outputs, y)\n        loss.backward()\n        optimizer.step()\n        optimizer.zero_grad()\n\n# Get privacy spent\nepsilon = privacy_engine.get_epsilon(delta=1e-5)\nprint(f\"Privacy spent: epsilon = {epsilon:.2f}\")","lang":"python","description":"Minimal DP training loop with Opacus using PrivacyEngine.make_private."},"warnings":[{"fix":"Always use the data_loader returned by privacy_engine.make_private() in training loops.","message":"DataLoader must be passed to make_private() and the returned loader used for training. Using the original loader breaks privacy.","severity":"gotcha","affected_versions":">=1.0.0"},{"fix":"Update to: model, optimizer, data_loader = privacy_engine.make_private(...). Read the v1.5 migration guide.","message":"In v1.5, the make_private() method signature changed to accept keyword arguments and returns three objects (model, optimizer, data_loader). Old code unpacking two items (e.g., model, optimizer = privacy_engine.make_private(...)) will break.","severity":"breaking","affected_versions":">=1.5.0"},{"fix":"Replace privacy_engine.accountant.get_epsilon(delta) with privacy_engine.get_epsilon(delta).","message":"The old accountant interface via privacy_engine.accountant is deprecated. Use privacy_engine.get_epsilon(delta) directly.","severity":"deprecated","affected_versions":">=1.4.0"},{"fix":"For LLM fine-tuning, use GhostClippingEngine or set ghost_clipping=True in make_private.","message":"Ghost clipping (for language models) requires a separate setup: use make_private_with_epsilon or specify ghost_clipping=True. Vanilla make_private does not apply ghost clipping.","severity":"gotcha","affected_versions":">=1.5.0"}],"env_vars":null,"last_verified":"2026-05-01T00:00:00.000Z","next_check":"2026-07-30T00:00:00.000Z","problems":[{"fix":"Upgrade to latest Opacus: pip install --upgrade opacus","cause":"Using an older version of Opacus (pre-1.0) where the method was named differently.","error":"AttributeError: 'PrivacyEngine' object has no attribute 'make_private'"},{"fix":"Change to: model, optimizer, data_loader = privacy_engine.make_private(...)","cause":"Unpacking make_private() result into two variables but it returns three (model, optimizer, data_loader) since v1.5.","error":"TypeError: cannot unpack non-iterable PrivacyEngine object"},{"fix":"Always use the data_loader returned by privacy_engine.make_private(). Never use the original DataLoader directly.","cause":"Using the original data_loader instead of the one returned by make_private. Opacus replaces the sampler.","error":"RuntimeError: DataLoader worker process exited before finishing"},{"fix":"Upgrade to opacus>=1.5.1 or install opt_einsum: pip install opt_einsum","cause":"Opacus 1.5.0 and earlier required opt_einsum for linear layers; removed in 1.5.1+ but old installs may still need it.","error":"ModuleNotFoundError: No module named 'opt_einsum'"}],"ecosystem":"pypi","meta_description":null,"install_score":null,"install_tag":null,"quickstart_score":null,"quickstart_tag":null}