Why the following code executes 3x slower then plain PyTorch training?
class NeuralNetwork(nn.Module):
def __init__(self):
super().__init__()
self.linear_relu_stack = nn.Sequential(
nn.Linear(331, 512),
nn.ReLU(),
nn.Linear(512, 1)
)
def forward(self, x):
log_val = self.linear_relu_stack(x)
return log_val
train_dataloader = DataLoader(TensorDataset(train_features, train_target), batch_size=1, shuffle=True)
test_dataloader = DataLoader(TensorDataset(test_features, test_target), batch_size=1, shuffle=False)
class HousePriceModule(pl.LightningModule):
def __init__(self, model):
super().__init__()
self.model = model
self.loss = nn.MSELoss()
def training_step(self, batch, batch_idx):
x, y = batch
loss = self.loss(self.model(x), y)
#self.log("train_loss", loss)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
loss = self.loss(self.model(x), y)
#self.log("val_loss", loss)
return loss
def configure_optimizers(self):
#optimizer = optim.Adam(self.parameters(), lr=1e-3)
optimizer = optim.SGD(self.parameters(), lr=1e-3)
return optimizer
housePriceModule = HousePriceModule(NeuralNetwork())
trainer = pl.Trainer(max_epochs=10, log_every_n_steps=100, enable_progress_bar=False)
trainer.fit(model=housePriceModule, train_dataloaders=train_dataloader, val_dataloaders=test_dataloader)