Runtime Error: broadcast backward in place modification of view

Implementing a relatively simple conditional GAN. Repeatedly getting errors like this:

RuntimeError: Output 6 of BroadcastBackward is a view and its base or another view of its base has been modified inplace. This view is the output of a function that returns multiple views. Such functions do not allow the output views to be modified inplace. You should replace the inplace operation by an out-of-place one.

I’m having trouble isolating the issue. Apparently some view is being modified in multiple places? But this also appears to be an issue with gradients. Anybody able to spot an error? Here’s the source code of my LightningModule:

lass CovidCTGAN(pl.LightningModule):
    def __init__(self, generator, discriminator, config):
        super(CovidCTGAN, self).__init__()
        self.generator = generator
        self.discriminator = discriminator
        self.noise_dim = config.generator.in_dim
        self.g_optim_cfg = config.generator.optimizer
        self.d_optim_cfg = config.discriminator.optimizer
        self.automatic_optimization = False
        self.validation_z = torch.randn(10, self.noise_dim)
        self.validation_l = torch.cat(
            [torch.tensor([[1, 0]] * 5), torch.tensor([[0, 1]] * 5)]
        )
        self.loss_fn = nn.BCELoss(reduction="mean")

    def forward(self, z, l):
        return self.generator(z, l)

    def training_step(self, batch, batch_idx):

        g_opt, d_opt = self.optimizers()

        imgs, labels = batch
        labels = labels.view(-1, 1)
        labelsp = torch.abs(torch.ones(imgs.shape[0], 1).to(self.device) - labels)
        labels = torch.cat([labels, labelsp], 1)

        z = torch.randn(imgs.shape[0], self.noise_dim)
        z = z.type_as(imgs)
        z_l = torch.randint(low=0, high=2, size=(imgs.shape[0], 1))
        z_lp = torch.abs(torch.ones(imgs.shape[0], 1) - z_l)
        z_l = torch.cat([z_l, z_lp], dim=1).to(self.device)
        gt_l = torch.ones(imgs.shape[0], 1).to(self.device)
        fake_l = torch.zeros(imgs.shape[0], 1).to(self.device)

        # Sample Generator
        gen_imgs = self(z, z_l)

        # Train Discriminator
        real_loss = self.loss_fn(self.discriminator(imgs, labels), gt_l)
        fake_loss = self.loss_fn(self.discriminator(gen_imgs.detach(), z_l), fake_l)
        d_loss = fake_loss + real_loss
        d_opt.zero_grad()
        self.manual_backward(d_loss)
        d_opt.step()

        # # Train Generator
        g_loss = self.loss_fn(self.discriminator(gen_imgs, z_l), gt_l)
        g_opt.zero_grad()
        self.manual_backward(g_loss)
        g_opt.step()

        self.log_dict(
            {"discriminator_loss": d_loss, "generator_loss": g_loss},
            on_epoch=True,
            prog_bar=True,
            logger=True,
        )

    def on_epoch_end(self):
        sample_imgs = self(self.validation_z, self.validation_l)
        grid = torchvision.utils.make_grid(sample_imgs)
        self.logger.experiment.add_image("generated_images", grid, self.current_epoch)

    def configure_optimizers(self):
        g_lr = self.g_optim_cfg.lr
        g_l2 = self.g_optim_cfg.weight_decay
        g_betas = self.g_optim_cfg.betas
        d_lr = self.d_optim_cfg.lr
        d_l2 = self.d_optim_cfg.weight_decay
        d_betas = self.d_optim_cfg.betas
        opt_g = torch.optim.Adam(
            self.generator.parameters(), lr=g_lr, betas=g_betas, weight_decay=g_l2
        )
        opt_d = torch.optim.Adam(
            self.discriminator.parameters(), lr=d_lr, betas=d_betas, weight_decay=d_l2
        )
        return [opt_g, opt_d], []