Logger doesn't work as expected on test losses and accuracy

I am currently doing a NN with Pytorch lightning, I am a beginner. I have this metrics file, generated by the logger:

Sorry for the pictures, they were too long I have to cut it in two.
Since I am doing an hold out cross validation, it is okay that the column of validation_loss is not filled until the last one row, because the model it’s only validated on one portion of data.

But I can’t understand why the test columns are empty. I printed the loss and accuracy inside the test_step method, and seems that the final value is the mean of them. Is there a way to display also test_loss and test_accuracy on each epoch ?

The code is this:

class ClassificationMonk(pl.LightningModule):
def init(self, input_dim, output_dim, hidden1, learning_rate):
super(ClassificationMonk, self).init()
# First hidden layer
self.linear1 = nn.Linear(input_dim, hidden1)
self.act1 = nn.ReLU()
# Second hidden layer
self.linear2 = nn.Linear(hidden1, hidden1)
self.act2 = nn.ReLU()
# Third hidden layer
self.linear3 = nn.Linear(hidden1,output_dim)
self.act3 = nn.Sigmoid()
self.loss_fun = nn.MSELoss()
self.dicts =
self.learning_rate = learning_rate
self.apply(self.init_weights)

def init_weights(self,m):
    if isinstance(m, nn.Linear):
        torch.nn.init.xavier_uniform(m.weight)
        m.bias.data.fill_(0.01)

def forward(self, X):
    #Input to the first hidden layer
    X = self.linear1(X)
    X = self.act1(X)
    # Second hidden layer
    X = self.linear2(X)
    X = self.act2(X)
    # Third hidden layer
    X = self.linear3(X)
    X = self.act3(X)
    return X

def configure_optimizers(self):
    optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
    return optimizer

def training_step(self, train_batch, batch_idx):
    X, y = train_batch 
    y_copy = y # Integer y for the accuracy
    X = X.type(torch.float32)
    y = y.type(torch.float32)  
    # forward pass
    y_pred = self.forward(X).squeeze()  
    # accuracy
    accuracy = Accuracy()
    acc = accuracy(y_pred, y_copy)
    # compute loss
    loss = self.loss_fun(y_pred, y)
    self.log_dict({'train_loss': loss, 'train_accuracy': acc}, on_step=False, on_epoch=True, prog_bar=True, logger=True)
    return loss

def validation_step(self, validation_batch, batch_idx):
    X, y = validation_batch
    X = X.type(torch.float32)
    # forward pass
    y_pred = self.forward(X).squeeze()        
    # compute metrics 
    accuracy = Accuracy()
    acc = accuracy(y_pred, y)
    loss = self.loss_fun(y_pred, y)
    self.log_dict({'validation_loss': loss, 'validation_accuracy': acc}, on_step=False, on_epoch=True,  prog_bar=True, logger=True)
    return loss


def test_step(self, test_batch, batch_idx):
    X, y = test_batch
    X = X.type(torch.float32)
    # forward pass
    y_pred = self.forward(X).squeeze()        
    # compute metrics   
    accuracy = Accuracy()
    acc = accuracy(y_pred, y)
    loss = self.loss_fun(y_pred, y)
    self.log_dict({'test_loss': loss, 'test_accuracy': acc}, on_epoch=True, prog_bar=True, logger=True)
    return loss