Astrophysica a black hole and nebula travelling python based library

import pennylane as qml
import numpy as np
import torch
from torchvision import datasets, transforms
import matplotlib.pyplot as plt

Set up the PennyLane device

dev = qml.device(“default.qubit”, wires=4)

Define the quantum circuit using PennyLane

@qml.qnode(dev)
def quantum_circuit(inputs, weights):
for i in range(len(inputs)):
qml.RX(inputs[i], wires=i)
qml.templates.StronglyEntanglingLayers(weights, wires=list(range(len(inputs))))
return [qml.expval(qml.PauliZ(i)) for i in range(len(inputs))]

Hybrid Quantum Convolutional Neural Network

class HybridQCNN(torch.nn.Module):
def init(self):
super(HybridQCNN, self).init()
self.conv_layer = torch.nn.Conv2d(in_channels=1, out_channels=4, kernel_size=3)
self.fc_layer = torch.nn.Linear(4, 10) # 10 classes
self.weights = np.random.randn(2, 4) # Number of quantum layers = 2

def forward(self, x):
    x = self.conv_layer(x)
    x = x.view(x.size(0), -1)
    x = quantum_circuit(x, self.weights)
    x = self.fc_layer(x)
    return x

Loading MNIST dataset

transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])
train_loader = torch.utils.data.DataLoader(datasets.MNIST(root=“./data”, train=True, transform=transform, download=True), batch_size=32, shuffle=True)

Instantiate the Hybrid Quantum Convolutional Neural Network model

model = HybridQCNN()

Define loss function and optimizer

criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001) # Use Adam optimizer

Training loop

num_epochs = 5
losses =

for epoch in range(num_epochs):
epoch_loss = 0.0
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
outputs = model(data)
loss = criterion(outputs, target)
loss.backward()
optimizer.step()
losses.append(loss.item())
epoch_loss += loss.item()
print(f"Epoch [{epoch+1}/{num_epochs}], Batch [{batch_idx+1}/{len(train_loader)}], Loss: {loss.item():.4f}")

print(f"Epoch [{epoch+1}/{num_epochs}] - Average Loss: {epoch_loss / (batch_idx+1):.4f}")

print(“Training finished”)

Plot the loss vs. epochs graph

plt.plot(range(len(losses)), losses, marker=‘o’)
plt.xlabel(‘Iterations’)
plt.ylabel(‘Loss’)
plt.title(‘Loss vs. Iterations’)
plt.grid(True)
plt.show()