Source code for pytorch_lightning.strategies.single_hpu
# Copyright The Lightning AI team.## Licensed under the Apache License, Version 2.0 (the "License");# you may not use this file except in compliance with the License.# You may obtain a copy of the License at## http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.fromtypingimportAny,Callable,Dict,Optional,Unionfromtorch.nnimportModulefromtorch.optim.optimizerimportOptimizerimportpytorch_lightningasplfromlightning_fabric.pluginsimportCheckpointIOfromlightning_fabric.utilities.typesimport_DEVICEfrompytorch_lightning.accelerators.hpuimport_HPU_AVAILABLEfrompytorch_lightning.plugins.io.hpu_pluginimportHPUCheckpointIOfrompytorch_lightning.plugins.io.wrapperimport_WrappingCheckpointIOfrompytorch_lightning.plugins.precisionimportPrecisionPluginfrompytorch_lightning.strategies.single_deviceimportSingleDeviceStrategyfrompytorch_lightning.utilities.exceptionsimportMisconfigurationExceptionfrompytorch_lightning.utilities.typesimportSTEP_OUTPUTif_HPU_AVAILABLE:importhabana_frameworks.torch.coreashtcore
[docs]classSingleHPUStrategy(SingleDeviceStrategy):"""Strategy for training on single HPU device."""strategy_name="hpu_single"def__init__(self,device:_DEVICE="hpu",accelerator:Optional["pl.accelerators.Accelerator"]=None,checkpoint_io:Optional[CheckpointIO]=None,precision_plugin:Optional[PrecisionPlugin]=None,):ifnot_HPU_AVAILABLE:raiseMisconfigurationException("`SingleHPUStrategy` requires HPU devices to run")super().__init__(accelerator=accelerator,device=device,checkpoint_io=checkpoint_io,precision_plugin=precision_plugin,)@propertydefcheckpoint_io(self)->CheckpointIO:ifself._checkpoint_ioisNone:self._checkpoint_io=HPUCheckpointIO()elifisinstance(self._checkpoint_io,_WrappingCheckpointIO):self._checkpoint_io.checkpoint_io=HPUCheckpointIO()returnself._checkpoint_io@checkpoint_io.setterdefcheckpoint_io(self,io:Optional[CheckpointIO])->None:self._checkpoint_io=io@propertydefis_distributed(self)->bool:returnFalse
defon_after_backward(self)->None:# Break lazy accumulation of graph after fwd+bwdhtcore.mark_step()
[docs]defoptimizer_step(self,optimizer:Optimizer,opt_idx:int,closure:Callable[[],Any],model:Optional[Union["pl.LightningModule",Module]]=None,**kwargs:Any,)->Any:optimizer_output=super().optimizer_step(optimizer,opt_idx,closure,model,**kwargs)# Break lazy accumulation of graph after optimizerhtcore.mark_step()returnoptimizer_output
defvalidation_step_end(self,step_output:STEP_OUTPUT)->STEP_OUTPUT:# Break lazy accumulation of graph after every stephtcore.mark_step()returnstep_outputdeftest_step_end(self,step_output:STEP_OUTPUT)->STEP_OUTPUT:# Break lazy accumulation of graph after every stephtcore.mark_step()returnstep_output@classmethoddefregister_strategies(cls,strategy_registry:Dict)->None:strategy_registry.register(cls.strategy_name,cls,description=f"{cls.__class__.__name__}",)
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. Read PyTorch Lightning's Privacy Policy.