when I use pytorch_lightning.metrics.classification.F1
in my LightningModule, I get this error:
error with better-exceptions
.../site-packages/pytorch_lightning/utilities/distributed.py:50: RuntimeWarning: You are using `LearningRateMonitor` callback with models that have no learning rate schedulers. Please see documentation for `configure_optimizers` method.
warnings.warn(*args, **kwargs)
Epoch 0: 0%| | 0/24 [00:01<?, ?it/s]
Traceback (most recent call last):
File ".../utils/models.py", line 106, in train_model
return trainer.fit(model_instance, datamodule=data)
│ │ └ <pynuclei.utils.data.DataModule object at 0x2b1be7f907d0>
│ └ UNetModel(
(loss/train/f1): F1()
(loss/val/f1): F1()
(unet): UNet(
(conv1): Sequential(
(0): Conv2d(3, 16, kerne...
└ <pytorch_lightning.trainer.trainer.Trainer object at 0x2b135b65f490>
File ".../site-packages/pytorch_lightning/trainer/trainer.py", line 513, in fit
self.dispatch()
└ <pytorch_lightning.trainer.trainer.Trainer object at 0x2b135b65f490>
File ".../site-packages/pytorch_lightning/trainer/trainer.py", line 553, in dispatch
self.accelerator.start_training(self)
│ └ <pytorch_lightning.trainer.trainer.Trainer object at 0x2b135b65f490>
└ <pytorch_lightning.trainer.trainer.Trainer object at 0x2b135b65f490>
File ".../site-packages/pytorch_lightning/accelerators/accelerator.py", line 74, in start_training
self.training_type_plugin.start_training(trainer)
│ └ <pytorch_lightning.trainer.trainer.Trainer object at 0x2b135b65f490>
└ <pytorch_lightning.accelerators.cpu.CPUAccelerator object at 0x2b1be7f5add0>
File ".../site-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py", line 111, in start_training
self._results = trainer.run_train()
│ └ <pytorch_lightning.trainer.trainer.Trainer object at 0x2b135b65f490>
└ <pytorch_lightning.plugins.training_type.single_device.SingleDevicePlugin object at 0x2b1be7adfd50>
File ".../site-packages/pytorch_lightning/trainer/trainer.py", line 644, in run_train
self.train_loop.run_training_epoch()
└ <pytorch_lightning.trainer.trainer.Trainer object at 0x2b135b65f490>
File ".../site-packages/pytorch_lightning/trainer/training_loop.py", line 492, in run_training_epoch
batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
│ │ │ └ 0
│ │ └ 0
│ └ [tensor([[[[0.7765, 0.7137, 0.6314, ..., 0.6431, 0.5804, 0.5020],
[0.8706, 0.8549, 0.7569, ..., 0.5373, 0.4745, 0.43...
└ <pytorch_lightning.trainer.training_loop.TrainLoop object at 0x2b1be7f73f50>
File ".../site-packages/pytorch_lightning/trainer/training_loop.py", line 650, in run_training_batch
self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
│ │ │ │ └ <function TrainLoop.run_training_batch.<locals>.train_step_and_backward_closure at 0x2b1c8ac7a050>
│ │ │ └ 0
│ │ └ 0
│ └ SGD (
Parameter Group 0
dampening: 0
lr: 0.1
momentum: 0.99
nesterov: False
weight_decay: 0
)
└ <pytorch_lightning.trainer.training_loop.TrainLoop object at 0x2b1be7f73f50>
File ".../site-packages/pytorch_lightning/trainer/training_loop.py", line 434, in optimizer_step
using_lbfgs=is_lbfgs,
└ False
File ".../site-packages/pytorch_lightning/core/lightning.py", line 1384, in optimizer_step
optimizer.step(closure=optimizer_closure)
│ └ <function TrainLoop.run_training_batch.<locals>.train_step_and_backward_closure at 0x2b1c8ac7a050>
└ LightningSGD(groups=[{'dampening': 0, 'lr': 0.1, 'momentum': 0.99, 'nesterov': False, 'weight_decay': 0}])
File ".../site-packages/pytorch_lightning/core/optimizer.py", line 219, in step
self.__optimizer_step(*args, closure=closure, profiler_name=profiler_name, **kwargs)
│ │ │ │ └ {}
│ │ │ └ 'optimizer_step_and_closure_0'
│ │ └ <function TrainLoop.run_training_batch.<locals>.train_step_and_backward_closure at 0x2b1c8ac7a050>
│ └ ()
└ LightningSGD(groups=[{'dampening': 0, 'lr': 0.1, 'momentum': 0.99, 'nesterov': False, 'weight_decay': 0}])
File ".../site-packages/pytorch_lightning/core/optimizer.py", line 135, in __optimizer_step
trainer.accelerator.optimizer_step(optimizer, self._optimizer_idx, lambda_closure=closure, **kwargs)
│ │ │ │ └ {}
│ │ │ └ <function TrainLoop.run_training_batch.<locals>.train_step_and_backward_closure at 0x2b1c8ac7a050>
│ │ └ LightningSGD(groups=[{'dampening': 0, 'lr': 0.1, 'momentum': 0.99, 'nesterov': False, 'weight_decay': 0}])
│ └ SGD (
Parameter Group 0
dampening: 0
lr: 0.1
momentum: 0.99
nesterov: False
weight_decay: 0
)
└ <weakproxy at 0x2b13590a5650 to Trainer at 0x2b135b65f490>
File ".../site-packages/pytorch_lightning/accelerators/accelerator.py", line 278, in optimizer_step
self.run_optimizer_step(optimizer, opt_idx, lambda_closure, **kwargs)
│ │ │ │ └ {}
│ │ │ └ <function TrainLoop.run_training_batch.<locals>.train_step_and_backward_closure at 0x2b1c8ac7a050>
│ │ └ 0
│ └ SGD (
Parameter Group 0
dampening: 0
lr: 0.1
momentum: 0.99
nesterov: False
weight_decay: 0
)
└ <pytorch_lightning.accelerators.cpu.CPUAccelerator object at 0x2b1be7f5add0>
File ".../site-packages/pytorch_lightning/accelerators/accelerator.py", line 283, in run_optimizer_step
self.training_type_plugin.optimizer_step(optimizer, lambda_closure=lambda_closure, **kwargs)
│ │ │ └ {}
│ │ └ <function TrainLoop.run_training_batch.<locals>.train_step_and_backward_closure at 0x2b1c8ac7a050>
│ └ SGD (
Parameter Group 0
dampening: 0
lr: 0.1
momentum: 0.99
nesterov: False
weight_decay: 0
)
└ <pytorch_lightning.accelerators.cpu.CPUAccelerator object at 0x2b1be7f5add0>
File ".../site-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py", line 160, in optimizer_step
optimizer.step(closure=lambda_closure, **kwargs)
│ │ └ {}
│ └ <function TrainLoop.run_training_batch.<locals>.train_step_and_backward_closure at 0x2b1c8ac7a050>
└ SGD (
Parameter Group 0
dampening: 0
lr: 0.1
momentum: 0.99
nesterov: False
weight_decay: 0
)
File ".../site-packages/torch/autograd/grad_mode.py", line 15, in decorate_context
return func(*args, **kwargs)
│ │ └ {'closure': <function TrainLoop.run_training_batch.<locals>.train_step_and_backward_closure at 0x2b1c8ac7a050>}
│ └ (SGD (
Parameter Group 0
dampening: 0
lr: 0.1
momentum: 0.99
nesterov: False
weight_decay: 0
),)
└ <function SGD.step at 0x2b1be719ca70>
File ".../site-packages/torch/optim/sgd.py", line 86, in step
loss = closure()
│ └ <function TrainLoop.run_training_batch.<locals>.train_step_and_backward_closure at 0x2b1c8ac7a050>
└ None
File ".../site-packages/pytorch_lightning/trainer/training_loop.py", line 645, in train_step_and_backward_closure
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
│ │ │ │ └ <pytorch_lightning.trainer.training_loop.TrainLoop object at 0x2b1be7f73f50>
│ │ │ └ SGD (
Parameter Group 0
dampening: 0
lr: 0.1
momentum: 0.99
nesterov: False
weight_decay: 0
)
│ │ └ 0
│ └ 0
└ [tensor([[[[0.7765, 0.7137, 0.6314, ..., 0.6431, 0.5804, 0.5020],
[0.8706, 0.8549, 0.7569, ..., 0.5373, 0.4745, 0.43...
File ".../site-packages/pytorch_lightning/trainer/training_loop.py", line 745, in training_step_and_backward
self.backward(result, optimizer, opt_idx)
│ │ │ └ 0
│ │ └ SGD (
Parameter Group 0
dampening: 0
lr: 0.1
momentum: 0.99
nesterov: False
weight_decay: 0
)
│ └ "closure_loss": 0.3339143395423889
"hiddens": None
"loss": ...
└ <pytorch_lightning.trainer.training_loop.TrainLoop object at 0x2b1be7f73f50>
File ".../site-packages/pytorch_lightning/trainer/training_loop.py", line 775, in backward
result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs
File ".../site-packages/pytorch_lightning/accelerators/accelerator.py", line 258, in backward
self.lightning_module, closure_loss, optimizer, optimizer_idx, should_accumulate, *args, **kwargs
File ".../site-packages/pytorch_lightning/plugins/precision/precision_plugin.py", line 71, in backward
model.backward(closure_loss, optimizer, opt_idx)
│ │ │ └ 0
│ │ └ SGD (
Parameter Group 0
dampening: 0
lr: 0.1
momentum: 0.99
nesterov: False
weight_decay: 0
)
│ └ tensor(0.3339)
└ UNetModel(
(loss/train/f1): F1()
(loss/val/f1): F1()
(unet): UNet(
(conv1): Sequential(
(0): Conv2d(3, 16, kerne...
File ".../site-packages/pytorch_lightning/core/lightning.py", line 1245, in backward
loss.backward(*args, **kwargs)
│ │ └ {}
│ └ ()
└ tensor(0.3339)
File ".../site-packages/torch/tensor.py", line 185, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph)
│ │ │ │ └ False
│ │ │ └ None
│ │ └ None
│ └ tensor(0.3339)
└ <module 'torch' from '.../site-packa...
File ".../site-packages/torch/autograd/__init__.py", line 127, in backward
allow_unreachable=True) # allow_unreachable flag
RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
"normal" error (without better-exceptions)
.../site-packages/pytorch_lightning/utilities/distributed.py:50: RuntimeWarning: You are using `LearningRateMonitor` callback with models that have no learning rate schedulers. Please see documentation for `configure_optimizers` method.
warnings.warn(*args, **kwargs)
Epoch 0: 0%| | 0/24 [00:01<?, ?it/s]
Traceback (most recent call last):
File ".../utils/models.py", line 106, in train_model
return trainer.fit(model_instance, datamodule=data)
File ".../site-packages/pytorch_lightning/trainer/trainer.py", line 513, in fit
self.dispatch()
File ".../site-packages/pytorch_lightning/trainer/trainer.py", line 553, in dispatch
self.accelerator.start_training(self)
File ".../site-packages/pytorch_lightning/accelerators/accelerator.py", line 74, in start_training
self.training_type_plugin.start_training(trainer)
File ".../site-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py", line 111, in start_training
self._results = trainer.run_train()
File ".../site-packages/pytorch_lightning/trainer/trainer.py", line 644, in run_train
self.train_loop.run_training_epoch()
File ".../site-packages/pytorch_lightning/trainer/training_loop.py", line 492, in run_training_epoch
batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
File ".../site-packages/pytorch_lightning/trainer/training_loop.py", line 650, in run_training_batch
self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
File ".../site-packages/pytorch_lightning/trainer/training_loop.py", line 434, in optimizer_step
using_lbfgs=is_lbfgs,
File ".../site-packages/pytorch_lightning/core/lightning.py", line 1384, in optimizer_step
optimizer.step(closure=optimizer_closure)
File ".../site-packages/pytorch_lightning/core/optimizer.py", line 219, in step
self.__optimizer_step(*args, closure=closure, profiler_name=profiler_name, **kwargs)
File ".../site-packages/pytorch_lightning/core/optimizer.py", line 135, in __optimizer_step
trainer.accelerator.optimizer_step(optimizer, self._optimizer_idx, lambda_closure=closure, **kwargs)
File ".../site-packages/pytorch_lightning/accelerators/accelerator.py", line 278, in optimizer_step
self.run_optimizer_step(optimizer, opt_idx, lambda_closure, **kwargs)
File ".../site-packages/pytorch_lightning/accelerators/accelerator.py", line 283, in run_optimizer_step
self.training_type_plugin.optimizer_step(optimizer, lambda_closure=lambda_closure, **kwargs)
File ".../site-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py", line 160, in optimizer_step
optimizer.step(closure=lambda_closure, **kwargs)
File ".../site-packages/torch/optim/sgd.py", line 86, in step
loss = closure()
File ".../site-packages/pytorch_lightning/trainer/training_loop.py", line 645, in train_step_and_backward_closure
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
File ".../site-packages/pytorch_lightning/trainer/training_loop.py", line 775, in backward
result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs
File ".../site-packages/pytorch_lightning/accelerators/accelerator.py", line 258, in backward
self.lightning_module, closure_loss, optimizer, optimizer_idx, should_accumulate, *args, **kwargs
File ".../site-packages/pytorch_lightning/plugins/precision/precision_plugin.py", line 71, in backward
model.backward(closure_loss, optimizer, opt_idx)
File ".../site-packages/torch/autograd/__init__.py", line 127, in backward
allow_unreachable=True) # allow_unreachable flag
RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
when I use from pytorch_lightning.metrics.regression.MeanSquaredError
with the same model I get no error