PennyLaneAI / pennylane

PennyLane is a cross-platform Python library for quantum computing, quantum machine learning, and quantum chemistry. Train a quantum computer the same way as a neural network.
https://pennylane.ai
Apache License 2.0
2.28k stars 588 forks source link

Issues with training of a QNN for the binary classification of images #4233

Closed BoltzmannEntropy closed 1 year ago

BoltzmannEntropy commented 1 year ago

Feature details

Dear team, This is a follow up from here: https://discuss.pennylane.ai/t/expectation-values-and-tensors-in-qnn/3024/7

I was unable to find an example that specifically addresses my requirements of processing RGB images, encoding, and training the parameters of a PQC using a quantum circuit alone. Most existing examples tend to involve a classical feature extractor combined with training of only a quantum FC layer, which is not what I am looking for.

I have written a self-contained example for binary classification using the bees/ants dataset, but facing an issue where the validation and training accuracy do not change. I believe this issue is not related to overfitting, but rather a problem in my code that I am unable to identify.

I would appreciate it if you could take a look at my code and provide guidance on this.

Implementation

Here is the code to reproduce the problem:

# Install necessary packages
# !pip install torch==1.12.1 torchvision==0.13.1 pennylane==0.30.0 efficientnet_pytorch

# Import required libraries
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import pennylane as qml
from pennylane import numpy as np
import os
from tqdm import tqdm
import sys 
from pennylane.operation import Tensor
import psutil
import matplotlib.pyplot as plt

# from qblocks.qgates import *

print("[Python version]:", sys.version)
print("[Deep Learning framework, Pytorch (Facebook) version]:", torch.__version__)
print("[Quantum Machine Learning framework (Pennylane) version]:", qml.__version__)

# Set device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# Set parameters
patch_size = 2
img_size_single = 128
img_size_flat = img_size_single ** 2
RGB_C = 3  # Number of channels in the image (RGB)
batch_size = 32
num_epochs = 60
n_qubits = patch_size ** 2 * RGB_C

n_layers = 6
num_classes = None

# Note if you forget to wrap your circuit with dev: AttributeError: 'tuple' object has no attribute 'float' 
dev_train = qml.device('default.qubit', wires=n_qubits)   

# Define the quantum circuit
# @qml.qnode(dev_train, interface="torch")

def Q_Plot(cirq_0, q_b,q_d):
    # print("Plot Q:", q_b)
    fig, ax = qml.draw_mpl(cirq_0,expansion_strategy='device')(torch.zeros(q_b), torch.zeros(q_d))        
    # print (qml.draw(cirq_0,expansion_strategy='device')(torch.zeros(q_b), torch.zeros(q_d)))
    # plt.figure(figsize=(5,3))
    # from pylab import rcParams
    # rcParams['figure.figsize'] = 3, 6
    # fig.set_size_inches(12,6)
    plt.show()
    fig.show()

def Q_count_parameters(qnn):
    print(dict(qnn.named_parameters()))
    for name, param in qnn.named_parameters():
        param.requires_grad=True
        # print (name, param.data)
    return sum(p.numel() for p in qnn.parameters() if p.requires_grad)

import random

def circuit(inputs, weights):
    # print('inputs / weights {}/{}'.format(inputs.shape, weights.shape))
    for qub in range(n_qubits):
        qml.Hadamard(wires=qub)
        qml.RY(inputs[qub], wires=qub)

    for l in range(n_layers):
        qubit_indices = list(range(n_qubits))
        random.shuffle(qubit_indices)  # Shuffle the qubit indices to create random pairs

        for i in range(0, n_qubits, 2):
            control_qubit = qubit_indices[i]
            target_qubit = qubit_indices[(i + 1) % n_qubits]

            # Apply CRZ gate with conditional RY gate
            random_num = random.uniform(0, 1)
            qml.CRZ(weights[control_qubit], wires=[control_qubit, target_qubit])
            qml.RY(random_num * weights[control_qubit], wires=control_qubit)
            qml.CNOT(wires=[control_qubit, target_qubit])
            qml.CZ(wires=[control_qubit, (control_qubit + 2) % n_qubits])  # Additional CZ gate for entanglement

    return qml.expval(Tensor(*[qml.PauliZ(i) for i in range(n_qubits)]))

# Define the Quanvolutional Neural Network
class QuanvolutionalNeuralNetwork(nn.Module):
    def __init__(self, num_classes):
        super().__init__()
        self.fc1 = nn.Linear(n_qubits, num_classes)
        self.q_params = nn.Parameter(torch.Tensor(n_qubits, n_qubits))
        self.lr1 = nn.LeakyReLU(0.1)
        nn.init.xavier_uniform_(self.q_params)
        self.pqc = qml.QNode(circuit, dev_train, interface = 'torch')
        Q_Plot(circuit,n_qubits,n_qubits)

    def extract_patches(self, x):
        patches = []
        bs, c, w, h = x.size()
        for i in range(w - patch_size + 1):
            for j in range(h - patch_size + 1):
                patch = x[:, :, i:i+patch_size, j:j+patch_size]
                patches.append(patch)
        patches = torch.stack(patches, dim=1).view(bs, -1, c * patch_size * patch_size)
        return patches

    def forward(self, x):
        assert len(x.shape) == 4  # (bs, c, w, h)
        bs = x.shape[0]  # batch_size = x.size(0)
        c = x.shape[1]  # RGB
        x = x.view(bs, c, img_size_single, img_size_single)
        q_in = self.extract_patches(x)
        q_in = q_in.to(device)
        # print (q_in.shape)
        # q_out = torch.Tensor(0, n_qubits)
        q_out = torch.Tensor(0, n_qubits)

        q_out = q_out.to(device)
        for elem in q_in:
            # print (elem.shape)
            # print (self.q_params.shape)
            q_out_elem = self.pqc(elem, self.q_params).float().unsqueeze(0)
            q_out = torch.cat((q_out, q_out_elem))        
        x = self.lr1(q_out.view(-1, n_qubits))
        x = self.fc1(x)

        return x

# Set the data directory and transformations
data_dir = 'datasets/hymenoptera/'
# import splitfolders as sf
# sf.ratio('datasets/mri/train', 'output', ratio=(0.65, 0.05, 0.3), seed=42)
data_transforms = {
    'train': transforms.Compose([
        # transforms.Resize(256),
        transforms.CenterCrop(img_size_single),
        transforms.Grayscale() if RGB_C == 1 else transforms.Lambda(lambda x: x),
        transforms.ToTensor(),
    ]),
    'val': transforms.Compose([
        # transforms.Resize(256),
        transforms.CenterCrop(img_size_single),
        transforms.Grayscale() if RGB_C == 1 else transforms.Lambda(lambda x: x),
        transforms.ToTensor(),
    ]),
}

# Load the image datasets
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
num_classes=len(class_names)

dataloaders = {x: DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True) for x in ['train', 'val']}

# Initialize the Quanvolutional Neural Network
qnn = QuanvolutionalNeuralNetwork(num_classes=num_classes)
qnn = qnn.to(device)
# print ("Total trainable params:",Q_count_parameters(qnn))

# Set the loss function and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(qnn.parameters(), lr=0.001)

# Train the model
for epoch in tqdm(range(num_epochs)):
    # print("Epoch {}/{}, Qubits:{}".format(epoch, num_epochs, n_qubits))
    cpu_percent = psutil.cpu_percent()
    mem_usage= psutil.virtual_memory().total / (1024 ** 3)
    used_ram_gb = psutil.virtual_memory().used / (1024 ** 3)    
    # print(f"Epoch{epoch+1}/{num_epochs}, Dataset:{data_dir,dataset_sizes}, Qubits:{n_qubits}, RGB:{RGB_C}, IMG:{img_size_single} Layers:{n_layers},QNN Params:{[sum(p.numel() for p in qnn.parameters())]},CPU:{cpu_percent},RAM(GB):{used_ram_gb}'/'{mem_usage}")
    print(f"Epoch:[{epoch+1}/{num_epochs}], Dataset:{data_dir,dataset_sizes}, Qubits:{n_qubits}, RGB:{RGB_C},IMG:{img_size_single} Layers:{n_layers},QNN Params:{[sum(p.numel() for p in qnn.parameters())]} CPU:{cpu_percent}, RAM(GB):{used_ram_gb}/{mem_usage}")

    qnn.train()
    running_loss = 0
    running_corrects = 0
    total_samples = 0

    for batch_idx, (data, target) in tqdm(enumerate(dataloaders['train'])):
        data = data.to(device)
        target = target.view(-1).to(device)
        batch_size = data.size(0)  # Get the actual batch size

        optimizer.zero_grad()
        output = qnn(data)

        # Adjust the output tensor size if necessary
        if output.size(0) > batch_size:
            output = output[:batch_size]

        loss = criterion(output, target)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        _, predicted = torch.max(output, 1)
        running_corrects += torch.sum(predicted == target.data)
        total_samples += batch_size

    batch_loss = running_loss / len(dataloaders['train'])
    batch_acc = running_corrects / total_samples
    print(f'[{epoch + 1}] Training Loss: {batch_loss:.3f}, Training Accuracy: {batch_acc:.3f}')

    running_loss = 0.0
    running_corrects = 0
    total_samples = 0

    val_correct = 0
    val_total = 0
    with torch.no_grad():
        for val_data, val_target in dataloaders['val']:
            val_data = val_data.to(device)
            val_target = val_target.to(device)
            batch_size = val_data.size(0)  # Get the actual batch size

            val_output = qnn(val_data)
            _, val_predicted = torch.max(val_output.data, 1)

            # Adjust the output and target tensors if necessary
            if val_predicted.size(0) > batch_size:
                val_predicted = val_predicted.narrow(0, 0, batch_size)
                val_target = val_target.narrow(0, 0, batch_size)

            val_total += batch_size
            val_correct += (val_predicted == val_target).sum().item()

    val_accuracy = 100 * val_correct / val_total
    print(f"[{epoch + 1}] Validation Accuracy: {val_accuracy:.2f}%")

How important would you say this feature is?

2: Somewhat important. Needed this quarter.

Additional information

Here is the log:

[1] Training Loss: 0.705, Training Accuracy: 0.507
  2%|▏         | 1[/60](https://file+.vscode-resource.vscode-cdn.net/60) [01:06<1:05:21, 66.46s[/it](https://file+.vscode-resource.vscode-cdn.net/it)]
[1] Validation Accuracy: 45.00%
Epoch:[2[/60](https://file+.vscode-resource.vscode-cdn.net/60)], Dataset:('datasets[/hymenoptera/](https://file+.vscode-resource.vscode-cdn.net/hymenoptera/)', {'train': 225, 'val': 140}), Qubits:12, RGB:3,IMG:128 Layers:6,QNN Params:[170] CPU:29.8, RAM(GB):15.030136108398438[/32.0](https://file+.vscode-resource.vscode-cdn.net/32.0)
8it [00:55,  6.93s[/it](https://file+.vscode-resource.vscode-cdn.net/it)]
[2] Training Loss: 0.688, Training Accuracy: 0.507
  3%|▎         | 2[/60](https://file+.vscode-resource.vscode-cdn.net/60) [02:11<1:03:33, 65.75s[/it](https://file+.vscode-resource.vscode-cdn.net/it)]
[2] Validation Accuracy: 45.00%
Epoch:[3[/60](https://file+.vscode-resource.vscode-cdn.net/60)], Dataset:('datasets[/hymenoptera/](https://file+.vscode-resource.vscode-cdn.net/hymenoptera/)', {'train': 225, 'val': 140}), Qubits:12, RGB:3,IMG:128 Layers:6,QNN Params:[170] CPU:32.5, RAM(GB):14.813690185546875[/32.0](https://file+.vscode-resource.vscode-cdn.net/32.0)
8it [00:55,  6.91s[/it](https://file+.vscode-resource.vscode-cdn.net/it)]
[3] Training Loss: 0.686, Training Accuracy: 0.507
  5%|▌         | 3[/60](https://file+.vscode-resource.vscode-cdn.net/60) [03:17<1:02:31, 65.81s[/it](https://file+.vscode-resource.vscode-cdn.net/it)]
[3] Validation Accuracy: 45.00%
Epoch:[4[/60](https://file+.vscode-resource.vscode-cdn.net/60)], Dataset:('datasets[/hymenoptera/](https://file+.vscode-resource.vscode-cdn.net/hymenoptera/)', {'train': 225, 'val': 140}), Qubits:12, RGB:3,IMG:128 Layers:6,QNN Params:[170] CPU:29.7, RAM(GB):15.015106201171875[/32.0](https://file+.vscode-resource.vscode-cdn.net/32.0)
8it [00:58,  7.37s[/it](https://file+.vscode-resource.vscode-cdn.net/it)]
[4] Training Loss: 0.686, Training Accuracy: 0.507
  7%|▋         | 4[/60](https://file+.vscode-resource.vscode-cdn.net/60) [04:27<1:03:00, 67.51s[/it](https://file+.vscode-resource.vscode-cdn.net/it)]
[4] Validation Accuracy: 45.00%
rmoyard commented 1 year ago

Hi @BoltzmannEntropy!

First thing: you should not use Tensor in the expectation value. return qml.expval(Tensor(*[qml.PauliZ(i) for i in range(n_qubits)])) but simply return qml.expval([qml.PauliZ(i) for i in range(n_qubits)]), then you need to use hstack on the QNode results.

Could you come up with a minimal non working example (few lines)? Remove all unnecessary steps and highlight a specific issue. It is possible that your model not training, is not related directly to PennyLane.

Thanks!

BoltzmannEntropy commented 1 year ago

Hi there, I appreciate your greeting and gratitude.

The code snippet I shared is labeled as "minimal non-working." However, it is actually functional, although it may have a conceptual bug. The issue is that both ACC and LOSS remain unchanged throughout the code's execution. This behavior could be connected to PennyLane, although it's uncertain whether the problem is specific to that library.

Previously, I had transferred the code from Paddle-Quantum, where this particular issue did not occur and ACC reached almost 78%, but i can not really compare the two experiments since these are totally different issues.

BoltzmannEntropy commented 1 year ago

Hi @BoltzmannEntropy!

First thing: you should not use Tensor in the expectation value. return qml.expval(Tensor(*[qml.PauliZ(i) for i in range(n_qubits)])) but simply return qml.expval([qml.PauliZ(i) for i in range(n_qubits)]), then you need to use hstack on the QNode results.

Regarding the expectation value issue, here is a minimal code to reproduce the issue:

# Install necessary packages
# !pip install torch==1.12.1 torchvision==0.13.1 pennylane==0.29.0 efficientnet_pytorch

# Import required libraries
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import pennylane as qml
from pennylane import numpy as np
import os
from tqdm import tqdm
import sys 
from pennylane.operation import Tensor
import psutil
import matplotlib.pyplot as plt

import random

# Set device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Set parameters
patch_size = 2
img_size_single = 128
img_size_flat = img_size_single ** 2
RGB_C = 3  # Number of channels in the image (RGB)
batch_size = 32
num_epochs = 60
n_qubits = patch_size ** 2 * RGB_C

n_layers = 1
num_classes = 2

# Note if you forget to wrap your circuit with dev: AttributeError: 'tuple' object has no attribute 'float' 
dev_train = qml.device('default.qubit', wires=n_qubits)   

def Q_encoding_block(q_input_features, n_qubits):
    for qub in range(n_qubits):
        qml.Hadamard(wires=qub)        
        qml.RY(q_input_features[qub], wires=qub)   

def Q_quanvol_block_A(q_weights, n_qubits, q_depth):
            for layer in range(q_depth):
                for i in range(n_qubits):
                    if (n_qubits - i - 1) != i:
                        if i % 2 != 0:
                                qml.CNOT(wires=[n_qubits - i - 1, i]) 
                        else:
                                qml.CNOT(wires=[i,n_qubits - i - 1,]) 
                        if (i < n_qubits - 1) and ((n_qubits - i - 1) != i):
                            qml.CRZ(q_weights[layer], wires=[i, (i + 1) % n_qubits])                    
                        # Prevent WireError: Wires must be unique; got [0, 0].           
                            if i % 2 == 0:
                                qml.CNOT(wires=[n_qubits - i - 1, i]) 
                            else:
                                qml.CNOT(wires=[i,n_qubits - i - 1,]) 
                                qml.Hadamard(n_qubits - i - 1)
                qml.RY(q_weights[layer], wires=i)

def Q_encoding_circuit_A(q_input_features, q_weights, n_qubits, q_depth):
    # q_input_features = q_input_features.astype(np.float64)
    # print('inputs / weights {}/{}'.format(inputs.shape, weights.shape))
    Q_encoding_block(q_input_features, n_qubits)

    Q_quanvol_block_A(q_weights, n_qubits, q_depth)
    # exp_vals = [qml.expval(qml.PauliZ(position)) for position in range(n_qubits)]  
    # exp_vals=qml.expval(Tensor(*[qml.PauliZ(i) for i in range(n_qubits)]))  
    return qml.expval([qml.PauliZ(i) for i in range(n_qubits)])
    # return exp_vals

def Q_Plot(cirq_0, q_b,q_d):

    print("Plot Q/D:{}/{}".format (q_b,q_d))
    # shapes:torch.Size([16129, 12]),torch.Size([12, 12]),12,2
    fig, ax = qml.draw_mpl(cirq_0,expansion_strategy='device')(torch.zeros(q_b), torch.zeros(q_d),q_b,q_d)     
    # print (qml.draw(cirq_0,expansion_strategy='device')(torch.zeros(q_b), torch.zeros(q_d)))
    # plt.figure(figsize=(5,3))
    # from pylab import rcParams
    # rcParams['figure.figsize'] = 3, 6
    # fig.set_size_inches(12,6)
    plt.show()
    fig.show()

# Define the Quanvolutional Neural Network
class QuanvolutionalNeuralNetwork(nn.Module):
    def __init__(self, n_qubits, n_layers, circuit, dev_train, gpu_device, patch_size, img_size_single, num_classes):
        super().__init__()
        self.n_qubits=n_qubits
        self.patch_size=patch_size
        self.img_size_single=img_size_single
        self.n_layers=n_layers
        self.device=gpu_device 
        self.circuit=circuit 
        self.num_classes=num_classes
        self.dev_train=dev_train
        weight_shapes = {"weights": (n_layers, 2 * n_qubits)}

        self.fc1 = nn.Linear(self.n_qubits, self.num_classes)
        self.q_params = nn.Parameter(torch.Tensor(self.n_qubits, self.n_qubits))
        self.lr1 = nn.LeakyReLU(0.1)
        nn.init.xavier_uniform_(self.q_params)
        # qnode = qml.QNode(circuit, self.dev_train, interface = 'torch')
        self.pqc = qml.QNode(circuit, self.dev_train, interface='torch')
        # self.ql1 = qml.qnn.TorchLayer(qnode, weight_shapes)
        Q_Plot(self.pqc,self.n_qubits,self.n_layers)

    def extract_patches(self, x):
        patches = []
        bs, c, w, h = x.size()
        for i in range(w - self.patch_size + 1):
            for j in range(h - self.patch_size + 1):
                patch = x[:, :, i:i+self.patch_size, j:j+self.patch_size]
                patches.append(patch)
        patches = torch.stack(patches, dim=1).view(bs, -1, c * self.patch_size * self.patch_size)
        return patches

    def forward(self, x):
        assert len(x.shape) == 4  # (bs, c, w, h)
        bs = x.shape[0]  # batch_size = x.size(0)
        c = x.shape[1]  # RGB or mono 
        x = x.view(bs, c, self.img_size_single, self.img_size_single)
        q_in = self.extract_patches(x)
        q_in = q_in.to(self.device)
        # print (q_in.shape)
        # q_out = torch.Tensor(0, n_qubits)
        q_out = torch.Tensor(0, self.n_qubits)

        XL=[]
        q_out = q_out.to(self.device)        
        for elem in q_in:            
            # print ('shapes:{},{},{},{}'.format(elem.shape, self.q_params.shape,self.n_qubits, self.n_layers))            
            # output = torch.stack([torch.hstack(circuit(x, params)) for x in input])
            q_out_elem = self.pqc(elem, self.q_params,self.n_qubits, self.n_layers).float().unsqueeze(0)
            # q_out=torch.hstack(q_out_elem)
            XL.append(q_out_elem)
            # q_out = torch.cat((q_out, q_out_elem))
        X = torch.stack(XL, dim=0)
        # Reshape X to match the subsequent layer's input requirements

        x = self.lr1(X.view(-1, self.n_qubits))
        x = self.fc1(x)

        return x

if __name__ == '__main__':
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print (device)        
    # Initialize the Quanvolutional Neural Network
    qnn= QuanvolutionalNeuralNetwork(n_qubits, n_layers, Q_encoding_circuit_A, dev_train, device, patch_size, img_size_single, num_classes)
    qnn = qnn.to(device)

The resulting error:

Plot Q/D:12/1
Traceback (most recent call last):
  File "qc-cnn.py", line 153, in <module>
    qnn = QuanvolutionalNeuralNetwork(n_qubits, n_layers, Q_encoding_circuit_A, dev_train, device, patch_size, img_size_single, num_classes)
  File "qc-cnn.py", line 108, in __init__
    Q_Plot(self.pqc, self.n_qubits, self.n_layers)
  File "qc-cnn.py", line 77, in Q_Plot
    fig, ax = qml.draw_mpl(cirq_0, expansion_strategy='device')(torch.zeros(q_b), torch.zeros(q_d), q_b, q_d)     
  File "pennylane/drawer/draw.py", line 536, in wrapper
    qnode.construct(args, kwargs_qnode)
  File "pennylane/qnode.py", line 751, in construct
    self._tape = make_qscript(self.func)(*args, **kwargs)
  File "pennylane/tape/qscript.py", line 1371, in wrapper
    result = fn(*args, **kwargs)
  File "qc-cnn.py", line 70, in Q_encoding_circuit_A
    return qml.expval([qml.PauliZ(i) for i in range(n_qubits)])
  File "pennylane/measurements/expval.py", line 55, in expval
    if not op.is_hermitian:
AttributeError: 'list' object has no attribute 'is_hermitian'
rmoyard commented 1 year ago

Hi @BoltzmannEntropy,

Sorry for the misunderstanding, you can use either return [qml.expval(qml.PauliZ(i)) for i in range(n_qubits)] with hstack as post processing or if you want the product of it qml.expval(qml.prod(*[qml.PauliZ(wires=i) for _ in range(n_qubits)])).

Let me know if that works

frederikwilde commented 1 year ago

Hi @BoltzmannEntropy, I just wanted to leave this video here: 5 Tips for Making Great Forum Posts. These tips are also very helpful for reporting bugs on GitHub. I hope this helps.