pytorch-lightning
417 строк · 17.9 Кб
1# Copyright The Lightning AI team.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14from contextlib import contextmanager15from dataclasses import fields16from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union, overload17from weakref import proxy18
19import torch20from torch import optim21from torch.optim import Optimizer22from typing_extensions import override23
24import lightning.pytorch as pl25from lightning.fabric.utilities.types import Optimizable, ReduceLROnPlateau, _Stateful26from lightning.pytorch.utilities.exceptions import MisconfigurationException27from lightning.pytorch.utilities.model_helpers import is_overridden28from lightning.pytorch.utilities.rank_zero import rank_zero_warn29from lightning.pytorch.utilities.signature_utils import is_param_in_hook_signature30from lightning.pytorch.utilities.types import LRSchedulerConfig, LRSchedulerTypeTuple31
32
33def do_nothing_closure() -> None:34return35
36
37class LightningOptimizer:38"""This class is used to wrap the user optimizers and handle properly the backward and optimizer_step logic across39accelerators, AMP, accumulate_grad_batches.
40
41Note: The purpose of this wrapper is only to define new methods and redirect the `.step()` call. The internal
42state ``__dict__`` is not kept in sync with the internal state of the original optimizer, but the Trainer never
43relies on the internal state of the wrapper.
44
45"""
46
47def __init__(self, optimizer: Optimizer):48self._optimizer = optimizer49self._strategy: Optional[pl.strategies.Strategy] = None50# to inject logic around the optimizer step, particularly useful with manual optimization51self._on_before_step = do_nothing_closure52self._on_after_step = do_nothing_closure53# imitate the class of the wrapped object to make isinstance checks work54self.__class__ = type("Lightning" + optimizer.__class__.__name__, (self.__class__, optimizer.__class__), {})55
56@property57def optimizer(self) -> Optimizer:58return self._optimizer59
60@contextmanager61def toggle_model(self, sync_grad: bool = True) -> Generator[None, None, None]:62"""This function is just a helper for advanced users.63
64Considering the current optimizer as A and all other optimizers as B.
65Toggling means all parameters from B exclusive to A will have ``requires_grad`` set to False.
66
67When performing gradient accumulation, there is no need to perform grad synchronization
68during the accumulation phase.
69Setting `sync_grad` to False will block this synchronization and improve performance.
70
71"""
72# local import here to avoid circular import73from lightning.pytorch.loops.utilities import _block_parallel_sync_behavior74
75assert self._strategy is not None76lightning_module = self._strategy.lightning_module77assert lightning_module is not None78with _block_parallel_sync_behavior(self._strategy, block=(not sync_grad)):79lightning_module.toggle_optimizer(self)80yield81lightning_module.untoggle_optimizer(self)82
83def step(self, closure: Optional[Callable[[], Any]] = None, **kwargs: Any) -> Any:84"""Performs a single optimization step (parameter update).85
86Args:
87closure: An optional optimizer closure.
88kwargs: Any additional arguments to the ``optimizer.step()`` call.
89
90Returns:
91The output from the step call, which is generally the output of the closure execution.
92
93Example::
94
95# Scenario for a GAN using manual optimization
96def training_step(self, batch, batch_idx):
97opt_gen, opt_dis = self.optimizers()
98
99...
100
101# compute generator loss
102loss_gen = self.compute_generator_loss(...)
103# zero_grad needs to be called before backward
104opt_gen.zero_grad()
105self.manual_backward(loss_gen)
106opt_gen.step()
107
108# compute discriminator loss
109loss_dis = self.compute_discriminator_loss(...)
110
111# zero_grad needs to be called before backward
112opt_dis.zero_grad()
113self.manual_backward(loss_dis)
114opt_dis.step()
115
116
117# A more advanced example
118def training_step(self, batch, batch_idx):
119opt_gen, opt_dis = self.optimizers()
120
121...
122accumulated_grad_batches = batch_idx % 2 == 0
123
124# compute generator loss
125def closure_gen():
126loss_gen = self.compute_generator_loss(...)
127self.manual_backward(loss_gen)
128if accumulated_grad_batches:
129opt_gen.zero_grad()
130
131with opt_gen.toggle_model(sync_grad=accumulated_grad_batches):
132opt_gen.step(closure=closure_gen)
133
134def closure_dis():
135loss_dis = self.compute_discriminator_loss(...)
136self.manual_backward(loss_dis)
137if accumulated_grad_batches:
138opt_dis.zero_grad()
139
140with opt_dis.toggle_model(sync_grad=accumulated_grad_batches):
141opt_dis.step(closure=closure_dis)
142
143"""
144self._on_before_step()145
146if closure is None:147closure = do_nothing_closure148elif not callable(closure):149raise MisconfigurationException("When `optimizer.step(closure)` is called, the closure should be callable")150
151assert self._strategy is not None152step_output = self._strategy.optimizer_step(self._optimizer, closure, **kwargs)153
154self._on_after_step()155
156return step_output157
158@classmethod159def _to_lightning_optimizer(160cls, optimizer: Union[Optimizer, "LightningOptimizer"], strategy: "pl.strategies.Strategy"161) -> "LightningOptimizer":162# the user could return a `LightningOptimizer` from `configure_optimizers`, see test:163# tests/core/test_lightning_optimizer.py::test_lightning_optimizer[False]164lightning_optimizer = optimizer if isinstance(optimizer, LightningOptimizer) else cls(optimizer)165lightning_optimizer._strategy = proxy(strategy)166return lightning_optimizer167
168def __getattr__(self, item: Any) -> Any:169return getattr(self._optimizer, item)170
171
172def _init_optimizers_and_lr_schedulers(173model: "pl.LightningModule",174) -> Tuple[List[Optimizer], List[LRSchedulerConfig]]:175"""Calls `LightningModule.configure_optimizers` and parses and validates the output."""176from lightning.pytorch.trainer import call177
178optim_conf = call._call_lightning_module_hook(model.trainer, "configure_optimizers", pl_module=model)179
180if optim_conf is None:181rank_zero_warn(182"`LightningModule.configure_optimizers` returned `None`, this fit will run with no optimizer",183)184optim_conf = _MockOptimizer()185
186optimizers, lr_schedulers, monitor = _configure_optimizers(optim_conf)187lr_scheduler_configs = (188_configure_schedulers_automatic_opt(lr_schedulers, monitor)189if model.automatic_optimization190else _configure_schedulers_manual_opt(lr_schedulers)191)192_validate_multiple_optimizers_support(optimizers, model)193_validate_optimizers_attached(optimizers, lr_scheduler_configs)194_validate_scheduler_api(lr_scheduler_configs, model)195return optimizers, lr_scheduler_configs196
197
198def _configure_optimizers(199optim_conf: Union[Dict[str, Any], List, Optimizer, Tuple],200) -> Tuple[List, List, Optional[str]]:201optimizers, lr_schedulers = [], []202monitor = None203
204# single output, single optimizer205if isinstance(optim_conf, Optimizable):206optimizers = [optim_conf]207# two lists, optimizer + lr schedulers208elif (209isinstance(optim_conf, (list, tuple))210and len(optim_conf) == 2211and isinstance(optim_conf[0], list)212and all(isinstance(opt, Optimizable) for opt in optim_conf[0])213):214opt, sch = optim_conf215optimizers = opt216lr_schedulers = sch if isinstance(sch, list) else [sch]217# single dictionary218elif isinstance(optim_conf, dict):219_validate_optim_conf(optim_conf)220optimizers = [optim_conf["optimizer"]]221monitor = optim_conf.get("monitor", None)222lr_schedulers = [optim_conf["lr_scheduler"]] if "lr_scheduler" in optim_conf else []223# multiple dictionaries224elif isinstance(optim_conf, (list, tuple)) and all(isinstance(d, dict) for d in optim_conf):225for opt_dict in optim_conf:226_validate_optim_conf(opt_dict)227optimizers = [opt_dict["optimizer"] for opt_dict in optim_conf]228scheduler_dict = lambda scheduler: dict(scheduler) if isinstance(scheduler, dict) else {"scheduler": scheduler}229lr_schedulers = [230scheduler_dict(opt_dict["lr_scheduler"]) for opt_dict in optim_conf if "lr_scheduler" in opt_dict231]232# single list or tuple, multiple optimizer233elif isinstance(optim_conf, (list, tuple)) and all(isinstance(opt, Optimizable) for opt in optim_conf):234optimizers = list(optim_conf)235# unknown configuration236else:237raise MisconfigurationException(238"Unknown configuration for model optimizers."239" Output from `model.configure_optimizers()` should be one of:\n"240" * `Optimizer`\n"241" * [`Optimizer`]\n"242" * ([`Optimizer`], [`LRScheduler`])\n"243' * {"optimizer": `Optimizer`, (optional) "lr_scheduler": `LRScheduler`}\n'244)245return optimizers, lr_schedulers, monitor246
247
248def _configure_schedulers_automatic_opt(schedulers: list, monitor: Optional[str]) -> List[LRSchedulerConfig]:249"""Convert each scheduler into `LRSchedulerConfig` with relevant information, when using automatic optimization."""250lr_scheduler_configs = []251for scheduler in schedulers:252if isinstance(scheduler, dict):253# check provided keys254supported_keys = {field.name for field in fields(LRSchedulerConfig)}255extra_keys = scheduler.keys() - supported_keys256if extra_keys:257rank_zero_warn(258f"Found unsupported keys in the lr scheduler dict: {extra_keys}."259" HINT: remove them from the output of `configure_optimizers`.",260category=RuntimeWarning,261)262scheduler = {k: v for k, v in scheduler.items() if k in supported_keys}263if "scheduler" not in scheduler:264raise MisconfigurationException(265'The lr scheduler dict must have the key "scheduler" with its item being an lr scheduler'266)267if "interval" in scheduler and scheduler["interval"] not in ("step", "epoch"):268raise MisconfigurationException(269'The "interval" key in lr scheduler dict must be "step" or "epoch"'270f' but is "{scheduler["interval"]}"'271)272scheduler["reduce_on_plateau"] = scheduler.get(273"reduce_on_plateau", isinstance(scheduler["scheduler"], optim.lr_scheduler.ReduceLROnPlateau)274)275if scheduler["reduce_on_plateau"] and scheduler.get("monitor", None) is None:276raise MisconfigurationException(277"The lr scheduler dict must include a monitor when a `ReduceLROnPlateau` scheduler is used."278' For example: {"optimizer": optimizer, "lr_scheduler":'279' {"scheduler": scheduler, "monitor": "your_loss"}}'280)281is_one_cycle = isinstance(scheduler["scheduler"], optim.lr_scheduler.OneCycleLR)282if is_one_cycle and scheduler.get("interval", "epoch") == "epoch":283rank_zero_warn(284"A `OneCycleLR` scheduler is using 'interval': 'epoch'."285" Are you sure you didn't mean 'interval': 'step'?",286category=RuntimeWarning,287)288config = LRSchedulerConfig(**scheduler)289elif isinstance(scheduler, ReduceLROnPlateau):290if monitor is None:291raise MisconfigurationException(292"`configure_optimizers` must include a monitor when a `ReduceLROnPlateau`"293" scheduler is used. For example:"294' {"optimizer": optimizer, "lr_scheduler": scheduler, "monitor": "metric_to_track"}'295)296config = LRSchedulerConfig(scheduler, reduce_on_plateau=True, monitor=monitor)297else:298config = LRSchedulerConfig(scheduler)299lr_scheduler_configs.append(config)300return lr_scheduler_configs301
302
303def _configure_schedulers_manual_opt(schedulers: list) -> List[LRSchedulerConfig]:304"""Convert each scheduler into `LRSchedulerConfig` structure with relevant information, when using manual305optimization."""
306lr_scheduler_configs = []307for scheduler in schedulers:308if isinstance(scheduler, dict):309# interval is not in this list even though the user needs to manually call the scheduler because310# the `LearningRateMonitor` callback needs to check its value to know when to log the learning rate311invalid_keys = {"reduce_on_plateau", "monitor", "strict"}312keys_to_warn = [k for k in scheduler if k in invalid_keys]313
314if keys_to_warn:315rank_zero_warn(316f"The lr scheduler dict contains the key(s) {keys_to_warn}, but the keys will be ignored."317" You need to call `lr_scheduler.step()` manually in manual optimization.",318category=RuntimeWarning,319)320
321config = LRSchedulerConfig(**{key: scheduler[key] for key in scheduler if key not in invalid_keys})322else:323config = LRSchedulerConfig(scheduler)324lr_scheduler_configs.append(config)325return lr_scheduler_configs326
327
328def _validate_scheduler_api(lr_scheduler_configs: List[LRSchedulerConfig], model: "pl.LightningModule") -> None:329for config in lr_scheduler_configs:330scheduler = config.scheduler331if not isinstance(scheduler, _Stateful):332raise TypeError(333f"The provided lr scheduler `{scheduler.__class__.__name__}` is invalid."334" It should have `state_dict` and `load_state_dict` methods defined."335)336
337if (338not isinstance(scheduler, LRSchedulerTypeTuple)339and not is_overridden("lr_scheduler_step", model)340and model.automatic_optimization341):342raise MisconfigurationException(343f"The provided lr scheduler `{scheduler.__class__.__name__}` doesn't follow PyTorch's LRScheduler"344" API. You should override the `LightningModule.lr_scheduler_step` hook with your own logic if"345" you are using a custom LR scheduler."346)347
348
349def _validate_multiple_optimizers_support(optimizers: List[Optimizer], model: "pl.LightningModule") -> None:350if is_param_in_hook_signature(model.training_step, "optimizer_idx", explicit=True):351raise RuntimeError(352"Training with multiple optimizers is only supported with manual optimization. Remove the `optimizer_idx`"353" argument from `training_step`, set `self.automatic_optimization = False` and access your optimizers"354" in `training_step` with `opt1, opt2, ... = self.optimizers()`."355)356if model.automatic_optimization and len(optimizers) > 1:357raise RuntimeError(358"Training with multiple optimizers is only supported with manual optimization. Set"359" `self.automatic_optimization = False`, then access your optimizers in `training_step` with"360" `opt1, opt2, ... = self.optimizers()`."361)362
363
364def _validate_optimizers_attached(optimizers: List[Optimizer], lr_scheduler_configs: List[LRSchedulerConfig]) -> None:365for config in lr_scheduler_configs:366if config.scheduler.optimizer not in optimizers:367raise MisconfigurationException(368"Some schedulers are attached with an optimizer that wasn't returned from `configure_optimizers`."369)370
371
372def _validate_optim_conf(optim_conf: Dict[str, Any]) -> None:373valid_keys = {"optimizer", "lr_scheduler", "monitor"}374extra_keys = optim_conf.keys() - valid_keys375if extra_keys:376rank_zero_warn(377f"Found unsupported keys in the optimizer configuration: {set(extra_keys)}", category=RuntimeWarning378)379
380
381class _MockOptimizer(Optimizer):382"""The `_MockOptimizer` will be used inplace of an optimizer in the event that `None` is returned from383:meth:`~lightning.pytorch.core.LightningModule.configure_optimizers`."""
384
385def __init__(self) -> None:386super().__init__([torch.zeros(1)], {})387
388@override389def add_param_group(self, param_group: Dict[Any, Any]) -> None:390pass # Do Nothing391
392@override393def load_state_dict(self, state_dict: Dict[Any, Any]) -> None:394pass # Do Nothing395
396@override397def state_dict(self) -> Dict[str, Any]:398return {} # Return Empty399
400@overload401def step(self, closure: None = ...) -> None: ...402
403@overload404def step(self, closure: Callable[[], float]) -> float: ...405
406@override407def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:408if closure is not None:409return closure()410
411@override412def zero_grad(self, set_to_none: Optional[bool] = True) -> None:413pass # Do Nothing414
415@override416def __repr__(self) -> str:417return "No Optimizer"418