data61 / MP-SPDZ

Versatile framework for multi-party computation
Other
944 stars 280 forks source link

Error when I tried to implement ResNet50 by PyTorch in Compiler/ml.py #1477

Closed DuanYuFi closed 3 months ago

DuanYuFi commented 3 months ago

Hi, I am trying to implement my own ResNet50 training and inference code with PyTorch. But error occurs in ml.py.

Here is my code:

# torch_cifar_resnet.mpc

from Compiler.types import *

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import os
import numpy as np

data_root = '/root/codes/MachineLearning/ResNet50/data'

num_epochs = 1
learning_rate = 0.001
batch_size = 128

transform_train = transforms.Compose([
    transforms.RandomCrop(32, padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

trainset = torchvision.datasets.CIFAR10(
    root=data_root, train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(
    trainset, batch_size=128, shuffle=True, num_workers=2)

testset = torchvision.datasets.CIFAR10(
    root=data_root, train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(
    testset, batch_size=100, shuffle=False, num_workers=2)

for data in trainloader:
    training_samples, training_labels = data
    training_samples = training_samples.numpy()
    training_labels = training_labels.numpy()

for data in testloader:
    test_samples, test_labels = data
    test_samples = test_samples.numpy()
    test_labels = test_labels.numpy()

training_samples = sfix.input_tensor_via(0, training_samples, binary=True)
training_labels = sfix.input_tensor_via(0, training_labels, binary=True)
test_samples = sfix.input_tensor_via(0, test_samples, binary=True)
test_labels = sfix.input_tensor_via(0,  test_labels, binary=True)

classes = ('plane', 'car', 'bird', 'cat', 'deer',
           'dog', 'frog', 'horse', 'ship', 'truck')

class BasicBlock(nn.Module):
    expansion = 1

    def __init__(self, in_planes, planes, stride=1):
        super(BasicBlock, self).__init__()
        self.conv1 = nn.Conv2d(
            in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(planes)
        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
                               stride=1, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(planes)

        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != self.expansion*planes:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_planes, self.expansion*planes,
                          kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(self.expansion*planes)
            )

    def forward(self, x):
        out = F.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        out += self.shortcut(x)
        out = F.relu(out)
        return out

class Bottleneck(nn.Module):
    expansion = 4

    def __init__(self, in_planes, planes, stride=1):
        super(Bottleneck, self).__init__()
        self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(planes)
        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
                               stride=stride, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(planes)
        self.conv3 = nn.Conv2d(planes, self.expansion *
                               planes, kernel_size=1, bias=False)
        self.bn3 = nn.BatchNorm2d(self.expansion*planes)

        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != self.expansion*planes:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_planes, self.expansion*planes,
                          kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(self.expansion*planes)
            )

    def forward(self, x):
        out = F.relu(self.bn1(self.conv1(x)))
        out = F.relu(self.bn2(self.conv2(out)))
        out = self.bn3(self.conv3(out))
        out += self.shortcut(x)
        out = F.relu(out)
        return out

class ResNet(nn.Module):
    def __init__(self, block, num_blocks, num_classes=10):
        super(ResNet, self).__init__()
        self.in_planes = 64

        self.conv1 = nn.Conv2d(3, 64, kernel_size=3,
                               stride=1, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
        self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
        self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
        self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
        self.linear = nn.Linear(512*block.expansion, num_classes)

    def _make_layer(self, block, planes, num_blocks, stride):
        strides = [stride] + [1]*(num_blocks-1)
        layers = []
        for stride in strides:
            layers.append(block(self.in_planes, planes, stride))
            self.in_planes = planes * block.expansion
        return nn.Sequential(*layers)

    def forward(self, x):
        out = F.relu(self.bn1(self.conv1(x)))
        out = self.layer1(out)
        out = self.layer2(out)
        out = self.layer3(out)
        out = self.layer4(out)
        out = F.avg_pool2d(out, 4)
        out = out.view(out.size(0), -1)
        out = self.linear(out)
        return out

model = ResNet(Bottleneck, [3, 4, 6, 3])
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)

from Compiler import ml

try:
    ml.set_n_threads(int(program.args[2]))
except:
    pass

criterion = nn.CrossEntropyLoss()

# test network
ds = torchvision.datasets.CIFAR10(
    root=data_root, transform=torchvision.transforms.ToTensor())
inputs = next(iter(torch.utils.data.DataLoader(ds)))[0]
print(inputs.shape)
outputs = model(inputs)

layers = ml.layers_from_torch(model, training_samples.shape, batch_size)
optimizer = ml.Adam(layers)

optimizer.fit(
    training_samples,
    training_labels,
    epochs=num_epochs,
    batch_size=batch_size,
    validation_data=(test_samples, test_labels),
    program=program
)

And here is the output:

Traceback (most recent call last):
  File "/root/MP_SPDZ/./compile.py", line 41, in <module>
    main(compiler)
  File "/root/MP_SPDZ/./compile.py", line 36, in main
    compilation(compiler)
  File "/root/MP_SPDZ/./compile.py", line 19, in compilation
    prog = compiler.compile_file()
           ^^^^^^^^^^^^^^^^^^^^^^^
  File "/root/MP_SPDZ/Compiler/compilerLib.py", line 479, in compile_file
    exec(compile(infile.read(), infile.name, "exec"), self.VARS)
  File "Programs/Source/torch_cifar_resnet.mpc", line 171, in <module>
    layers = ml.layers_from_torch(model, training_samples.shape, batch_size)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/root/MP_SPDZ/Compiler/ml.py", line 3388, in layers_from_torch
    raise e
  File "/root/MP_SPDZ/Compiler/ml.py", line 3384, in layers_from_torch
    inputs = [named_layers[x] for x in args]
              ~~~~~~~~~~~~^^^
KeyError: 4

Compile command: ./compile.py torch_cifar_resnet

mkskeller commented 3 months ago

b6aa32f26d4f037d829fc227391a8568d26dd268 should fix this particular issue, but you will encounter further issues:

DuanYuFi commented 3 months ago

Thanks a lot! I will update the codes :)