Source code for pytorch_lightning.callbacks.gpu_stats_monitor
# Copyright The PyTorch Lightning team.## Licensed under the Apache License, Version 2.0 (the "License");# you may not use this file except in compliance with the License.# You may obtain a copy of the License at## http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License."""GPU Stats Monitor=================Monitor and logs GPU stats during training."""importosimportshutilimportsubprocessimporttimefromtypingimportAny,Dict,List,Optional,Tupleimporttorchimportpytorch_lightningasplfrompytorch_lightning.callbacks.baseimportCallbackfrompytorch_lightning.utilities.exceptionsimportMisconfigurationExceptionfrompytorch_lightning.utilities.parsingimportAttributeDictfrompytorch_lightning.utilities.rank_zeroimportrank_zero_deprecation,rank_zero_onlyfrompytorch_lightning.utilities.typesimportSTEP_OUTPUT
[docs]classGPUStatsMonitor(Callback):r""" .. deprecated:: v1.5 The `GPUStatsMonitor` callback was deprecated in v1.5 and will be removed in v1.7. Please use the `DeviceStatsMonitor` callback instead. Automatically monitors and logs GPU stats during training stage. ``GPUStatsMonitor`` is a callback and in order to use it you need to assign a logger in the ``Trainer``. Args: memory_utilization: Set to ``True`` to monitor used, free and percentage of memory utilization at the start and end of each step. Default: ``True``. gpu_utilization: Set to ``True`` to monitor percentage of GPU utilization at the start and end of each step. Default: ``True``. intra_step_time: Set to ``True`` to monitor the time of each step. Default: ``False``. inter_step_time: Set to ``True`` to monitor the time between the end of one step and the start of the next step. Default: ``False``. fan_speed: Set to ``True`` to monitor percentage of fan speed. Default: ``False``. temperature: Set to ``True`` to monitor the memory and gpu temperature in degree Celsius. Default: ``False``. Raises: MisconfigurationException: If NVIDIA driver is not installed, not running on GPUs, or ``Trainer`` has no logger. Example:: >>> from pytorch_lightning import Trainer >>> from pytorch_lightning.callbacks import GPUStatsMonitor >>> gpu_stats = GPUStatsMonitor() # doctest: +SKIP >>> trainer = Trainer(callbacks=[gpu_stats]) # doctest: +SKIP GPU stats are mainly based on `nvidia-smi --query-gpu` command. The description of the queries is as follows: - **fan.speed** – The fan speed value is the percent of maximum speed that the device's fan is currently intended to run at. It ranges from 0 to 100 %. Note: The reported speed is the intended fan speed. If the fan is physically blocked and unable to spin, this output will not match the actual fan speed. Many parts do not report fan speeds because they rely on cooling via fans in the surrounding enclosure. - **memory.used** – Total memory allocated by active contexts. - **memory.free** – Total free memory. - **utilization.gpu** – Percent of time over the past sample period during which one or more kernels was executing on the GPU. The sample period may be between 1 second and 1/6 second depending on the product. - **utilization.memory** – Percent of time over the past sample period during which global (device) memory was being read or written. The sample period may be between 1 second and 1/6 second depending on the product. - **temperature.gpu** – Core GPU temperature, in degrees C. - **temperature.memory** – HBM memory temperature, in degrees C. """def__init__(self,memory_utilization:bool=True,gpu_utilization:bool=True,intra_step_time:bool=False,inter_step_time:bool=False,fan_speed:bool=False,temperature:bool=False,):super().__init__()rank_zero_deprecation("The `GPUStatsMonitor` callback was deprecated in v1.5 and will be removed in v1.7."" Please use the `DeviceStatsMonitor` callback instead.")ifshutil.which("nvidia-smi")isNone:raiseMisconfigurationException("Cannot use GPUStatsMonitor callback because NVIDIA driver is not installed.")self._log_stats=AttributeDict({"memory_utilization":memory_utilization,"gpu_utilization":gpu_utilization,"intra_step_time":intra_step_time,"inter_step_time":inter_step_time,"fan_speed":fan_speed,"temperature":temperature,})# The logical device IDs for selected devicesself._device_ids:List[int]=[]# will be assigned later in setup()# The unmasked real GPU IDsself._gpu_ids:List[str]=[]# will be assigned later in setup()
[docs]defsetup(self,trainer:"pl.Trainer",pl_module:"pl.LightningModule",stage:Optional[str]=None)->None:ifnottrainer.loggers:raiseMisconfigurationException("Cannot use GPUStatsMonitor callback with Trainer that has no logger.")iftrainer.strategy.root_device.type!="cuda":raiseMisconfigurationException("You are using GPUStatsMonitor but are not running on GPU."f" The root device type is {trainer.strategy.root_device.type}.")# The logical device IDs for selected devicesself._device_ids=sorted(set(trainer.device_ids))# The unmasked real GPU IDsself._gpu_ids=self._get_gpu_ids(self._device_ids)
[docs]@rank_zero_onlydefon_train_batch_start(self,trainer:"pl.Trainer",pl_module:"pl.LightningModule",batch:Any,batch_idx:int)->None:ifself._log_stats.intra_step_time:self._snap_intra_step_time=time.time()ifnottrainer._logger_connector.should_update_logs:returngpu_stat_keys=self._get_gpu_stat_keys()gpu_stats=self._get_gpu_stats([kfork,_ingpu_stat_keys])logs=self._parse_gpu_stats(self._device_ids,gpu_stats,gpu_stat_keys)ifself._log_stats.inter_step_timeandself._snap_inter_step_time:# First log at beginning of second steplogs["batch_time/inter_step (ms)"]=(time.time()-self._snap_inter_step_time)*1000forloggerintrainer.loggers:logger.log_metrics(logs,step=trainer.fit_loop.epoch_loop._batches_that_stepped)
@staticmethoddef_get_gpu_ids(device_ids:List[int])->List[str]:"""Get the unmasked real GPU IDs."""# All devices if `CUDA_VISIBLE_DEVICES` unsetdefault=",".join(str(i)foriinrange(torch.cuda.device_count()))cuda_visible_devices:List[str]=os.getenv("CUDA_VISIBLE_DEVICES",default=default).split(",")return[cuda_visible_devices[device_id].strip()fordevice_idindevice_ids]def_get_gpu_stats(self,queries:List[str])->List[List[float]]:ifnotqueries:return[]"""Run nvidia-smi to get the gpu stats"""gpu_query=",".join(queries)format="csv,nounits,noheader"gpu_ids=",".join(self._gpu_ids)result=subprocess.run([# it's ok to suppress the warning here since we ensure nvidia-smi exists during initshutil.which("nvidia-smi"),# type: ignoref"--query-gpu={gpu_query}",f"--format={format}",f"--id={gpu_ids}",],encoding="utf-8",capture_output=True,check=True,)def_to_float(x:str)->float:try:returnfloat(x)exceptValueError:return0.0stats=[[_to_float(x)forxins.split(", ")]forsinresult.stdout.strip().split(os.linesep)]returnstats@staticmethoddef_parse_gpu_stats(device_ids:List[int],stats:List[List[float]],keys:List[Tuple[str,str]])->Dict[str,float]:"""Parse the gpu stats into a loggable dict."""logs={}fori,device_idinenumerate(device_ids):forj,(x,unit)inenumerate(keys):logs[f"device_id: {device_id}/{x} ({unit})"]=stats[i][j]returnlogsdef_get_gpu_stat_keys(self)->List[Tuple[str,str]]:"""Get the GPU stats keys."""stat_keys=[]ifself._log_stats.gpu_utilization:stat_keys.append(("utilization.gpu","%"))ifself._log_stats.memory_utilization:stat_keys.extend([("memory.used","MB"),("memory.free","MB"),("utilization.memory","%")])returnstat_keysdef_get_gpu_device_stat_keys(self)->List[Tuple[str,str]]:"""Get the device stats keys."""stat_keys=[]ifself._log_stats.fan_speed:stat_keys.append(("fan.speed","%"))ifself._log_stats.temperature:stat_keys.extend([("temperature.gpu","°C"),("temperature.memory","°C")])returnstat_keys
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. Read PyTorch Lightning's Privacy Policy.