How to save NotImplementedError

Hi, I try to use pytorch lightning to detect objects in my own dataset. The model I am using is Faster R-CNN, but not the model written by myself. After running, it shows a bug called NotImplementedError, shown in the log. Because it does not show anything on where might I make mistake, I want to ask for some suggestions on how to solve the problem. Thank you!

This is my code

from typing import Any, Dict, Iterable, List, Optional, Tuple, TypedDict, Union
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning import seed_everything, LightningModule, Trainer
import vision.transforms as Transforms
from torch.utils.data import DataLoader
import torch.utils
import vision.utils as utils
import torchvision
import torch.nn.functional as F
from torch import save
from vision.paper_dataset import PaperDataset

class LitDocumentReader(LightningModule):

def __init__(self, pretrained: bool = False, backbone: str = None, progress: bool = True,
             pretrained_backbone: bool =True, num_classes: int = 7):

    super(LitDocumentReader, self).__init__()
    self.num_classes = num_classes
    self.backbone = backbone
    self.lr = 0.0003
    if backbone is None:
        self.model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=pretrained, progress=progress,
                                                             num_classes=num_classes, pretrained_backbone=pretrained_backbone)

def get_transform(self, train):
    transforms = []
    transforms.append(Transforms.ToTensor())
    if train:
        transforms.append(Transforms.RandomHorizontalFlip(0.5))

    return Transforms.Compose(transforms)

def configure_optimizers(self):
    params = [p for p in self.parameters() if p.requires_grad]
    return torch.optim.SGD(params, lr=0.0003, momentum=0.9, weight_decay=0.0005)

def train_dataloader(self) -> DataLoader:
    root = r""
    dataset = PaperDataset(root, self.get_transform(train=True))
    indices = torch.randperm(len(dataset)).tolist()
    dataset = torch.utils.data.Subset(dataset, indices[:38])
    return DataLoader(dataset, batch_size=2, shuffle=True, collate_fn=utils.collate_fn)

def test_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
    root = r""
    dataset_test = PaperDataset(root, self.get_transform(train=False))
    indices = torch.randperm(len(dataset_test)).tolist()
    dataset = torch.utils.data.Subset(dataset_test, indices[:38])
    return DataLoader(dataset, batch_size=2, shuffle=False, collate_fn=utils.collate_fn)

def training_step(self, batch, batch_nb):
    x, y = batch
    loss = F.binary_cross_entropy(self(x), y)
    return {'loss': loss, 'log': {'train_loss': loss}}

if __name__ == '__main__':
    seed_everything(42)
    device = 'cpu'
    num_classes = 7
    early_stop_callback = EarlyStopping(monitor='val_loss', min_delta=0.00, patience=5, verbose=True, mode='auto')

    model = LitDocumentReader()

    trainer = Trainer(max_epochs=10, min_epochs=1, auto_lr_find=True, auto_scale_batch_size=False,
                  progress_bar_refresh_rate=10, callbacks=[early_stop_callback])
    # trainer.tune(model)
    trainer.fit(model)
    save(model.state_dict(), 'Location of our saved model')

Here is my log:

EarlyStopping mode set to min for monitoring val_loss.
GPU available: False, used: False
TPU available: None, using: 0 TPU cores

| Name | Type | Params

0 | model | FasterRCNN | 41.3 M

41.1 M Trainable params
222 K Non-trainable params
41.3 M Total params
/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/utilities/distributed.py:49: UserWarning: The dataloader, train dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the num_workers argument(try 8 which is the number of cpus on this machine) in theDataLoader` init to improve performance.
warnings.warn(*args, **kwargs)
Epoch 0: 0%| | 0/19 [00:00<?, ?it/s]
Traceback (most recent call last):
File “/Users/Documents/GitHub/doculist/vision/pdf_reader_v2.1.py”, line 68, in
trainer.fit(model)
File “/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py”, line 470, in fit
results = self.accelerator_backend.train()
File “/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/accelerators/cpu_accelerator.py”, line 62, in train
results = self.train_or_test()
File “/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/accelerators/accelerator.py”, line 69, in train_or_test
results = self.trainer.train()
File “/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py”, line 521, in train
self.train_loop.run_training_epoch()
File “/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py”, line 560, in run_training_epoch
batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
File “/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py”, line 718, in run_training_batch
self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
File “/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py”, line 493, in optimizer_step
model_ref.optimizer_step(
File “/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/core/lightning.py”, line 1258, in optimizer_step
optimizer.step(closure=optimizer_closure)
File “/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/core/optimizer.py”, line 278, in step
self.__optimizer_step(*args, closure=closure, profiler_name=profiler_name, **kwargs)
File “/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/core/optimizer.py”, line 136, in __optimizer_step
optimizer.step(closure=closure, *args, **kwargs)
File “/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/torch/autograd/grad_mode.py”, line 26, in decorate_context
return func(*args, **kwargs)
File “/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/torch/optim/sgd.py”, line 86, in step
loss = closure()
File “/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py”, line 708, in train_step_and_backward_closure
result = self.training_step_and_backward(
File “/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py”, line 806, in training_step_and_backward
result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)
File “/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py”, line 330, in training_step
training_step_output = self.trainer.accelerator_backend.training_step(args)
File “/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/accelerators/cpu_accelerator.py”, line 74, in training_step
return self._step(self.trainer.model.training_step, args)
File “/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/accelerators/cpu_accelerator.py”, line 70, in _step
output = model_step(*args)
File “/Users/tianyini/Documents/GitHub/doculist/vision/pdf_reader_v2.1.py”, line 54, in training_step
loss = F.binary_cross_entropy(self(x), y)
File “/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/torch/nn/modules/module.py”, line 727, in _call_impl
result = self.forward(*input, **kwargs)
File “/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/core/lightning.py”, line 439, in forward
return super().forward(*args, **kwargs)
File “/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/torch/nn/modules/module.py”, line 175, in _forward_unimplemented
raise NotImplementedError
NotImplementedError

you need to implement a forward method if you are doing self(x) or instead replace self(x) with self.model(x).

something related: https://pytorch-lightning.readthedocs.io/en/latest/style_guide.html#forward-vs-training-step

In Python, NotImplementedError is an exception that is raised when a method or function is not implemented, or when a subclass does not implement an abstract method from its parent class. This exception can be raised intentionally to signal that the functionality is not yet implemented, or it can be raised accidentally due to a coding mistake.

Check if the method or function is intentionally not implemented: If the NotImplementedError is raised intentionally, you can leave the method or function as is, or provide a placeholder implementation that raises the NotImplementedError. However, if the NotImplementedError is raised accidentally, you need to provide a proper implementation.

If you are dealing with an python abstract method, you can either provide a concrete implementation for the method in the subclass, or mark the subclass as abstract and force its children to implement the method.