Source code for pytorch_lightning.plugins.precision.sharded_native_amp
# Copyright The PyTorch Lightning team.## Licensed under the Apache License, Version 2.0 (the "License");# you may not use this file except in compliance with the License.# You may obtain a copy of the License at## http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.fromtypingimportOptional,Unionfromtyping_extensionsimportLiteralfrompytorch_lightning.overrides.fairscaleimport_FAIRSCALE_AVAILABLEfrompytorch_lightning.plugins.precision.native_ampimportMixedPrecisionPluginfrompytorch_lightning.utilities.exceptionsimportMisconfigurationExceptionfrompytorch_lightning.utilities.rank_zeroimportrank_zero_deprecationif_FAIRSCALE_AVAILABLE:fromfairscale.optimimportOSSfromfairscale.optim.grad_scalerimportShardedGradScalerelse:OSS=ShardedGradScaler=object
[docs]classShardedNativeMixedPrecisionPlugin(MixedPrecisionPlugin):"""Native AMP for Sharded Training."""def__init__(self,precision:Literal["16",16,"bf16"],device:str,scaler:Optional[ShardedGradScaler]=None)->None:rank_zero_deprecation("PyTorch Lightning's sharded implementation using FairScale has been deprecated in v1.9.0 and will be"" removed in v2.0.0. You can try using the `Trainer(strategy='fsdp_native')` instead."" The difference is that native FSDP uses PyTorch's implementation and the current strategy uses"" FairScale's implementation (which was upstreamed to PyTorch). After removal, `strategy='fsdp'` will use"" the native version by default.")ifnot_FAIRSCALE_AVAILABLE:raiseMisconfigurationException("You have asked for sharded AMP but you have not installed it."" Install `fairscale` using this guide: https://https://github.com/facebookresearch/fairscale")super().__init__(precision,device,scaler=(ShardedGradScaler()ifscalerisNoneandstr(precision)=="16"elseNone))
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. Read PyTorch Lightning's Privacy Policy.