Source code for pytorch_lightning.plugins.precision.sharded_native_amp
# Copyright The PyTorch Lightning team.## Licensed under the Apache License, Version 2.0 (the "License");# you may not use this file except in compliance with the License.# You may obtain a copy of the License at## http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.fromtypingimportOptional,Unionimporttorchfrompytorch_lightning.overrides.fairscaleimport_FAIRSCALE_AVAILABLEfrompytorch_lightning.plugins.precision.native_ampimportNativeMixedPrecisionPluginfrompytorch_lightning.utilities.exceptionsimportMisconfigurationExceptionif_FAIRSCALE_AVAILABLE:fromfairscale.optimimportOSSfromfairscale.optim.grad_scalerimportShardedGradScalerelse:OSS=ShardedGradScaler=object
[docs]classShardedNativeMixedPrecisionPlugin(NativeMixedPrecisionPlugin):"""Native AMP for Sharded Training."""def__init__(self,precision:Union[str,int],device:str,scaler:Optional[torch.cuda.amp.GradScaler]=None)->None:ifnot_FAIRSCALE_AVAILABLE:raiseMisconfigurationException("You have asked for sharded AMP but you have not installed it."" Install `fairscale` using this guide: https://https://github.com/facebookresearch/fairscale")super().__init__(precision,device,scaler=ShardedGradScaler()ifscalerisNoneandprecision==16elseNone)
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. Read PyTorch Lightning's Privacy Policy.