Closed lith0613 closed 5 years ago
I have encounterd a new problem when I tried to run the code as follows:
import torch.nn as nn from pytorch2keras.converter import pytorch_to_keras import numpy as np import torch from torch.autograd import Variable class CifarNet(nn.Module): def __init__(self): super(CifarNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=3) self.conv2 = nn.Conv2d(64, 64, kernel_size=3) self.conv3 = nn.Conv2d(64, 128, kernel_size=3) self.conv4 = nn.Conv2d(128, 128, kernel_size=3) self.pool = nn.MaxPool2d(2, 2) self.relu = nn.ReLU(inplace=True) self.fc1 = nn.Linear(3200, 256) self.dropout = nn.Dropout(0.5) self.fc2 = nn.Linear(256, 256) self.fc3 = nn.Linear(256, 10) def forward(self, x): x = self.relu(self.conv1(x)) x = self.relu(self.conv2(x)) x = self.pool(x) x = self.relu(self.conv3(x)) x = self.relu(self.conv4(x)) x = self.pool(x) x = x.view(-1, 3200) x = self.relu(self.fc1(x)) x = self.dropout(x) x = self.relu(self.fc2(x)) x = self.fc3(x) return x # Load the trained model from file trained_model = CifarNet() trained_model.load_state_dict(torch.load('results/cifar_best.pth')) input_np = np.random.uniform(0, 1, (1, 3, 32, 32)) input_var = Variable(torch.FloatTensor(input_np)) k_model = pytorch_to_keras(trained_model, input_var, [(3, 32, 32,)], verbose=True)
which reported as follows:
Traceback (most recent call last): File "pytorch4keras.py", line 40, in <module> k_model = pytorch_to_keras(trained_model, input_var, [(3, 32, 32,)], verbose=True) File "/root/anaconda3/envs/python3_lth2/lib/python3.6/site-packages/pytorch2keras/converter.py", line 332, in pytorch_to_keras names File "/root/anaconda3/envs/python3_lth2/lib/python3.6/site-packages/pytorch2keras/linear_layers.py", line 34, in convert_gemm W = weights[weights_name].numpy().transpose() KeyError: 'dropout.weight'
Is the dropout layer not include in the converter framework ?
I change the pytorch from 1.0 into 0.4.0, then it works, I don't know why that happens
I have encounterd a new problem when I tried to run the code as follows:
which reported as follows:
Is the dropout layer not include in the converter framework ?