Open hanhandian opened 9 months ago
def split_data(x, y, split_ratio): train_size = int(len(y) * split_ratio) test_size = len(y) - train_size
x_data = Variable(torch.Tensor(np.array(x))).to(device)
y_data = Variable(torch.Tensor(np.array(y))).to(device)
x_train = Variable(torch.Tensor(np.array(x[0:train_size]))).to(device)
y_train = Variable(torch.Tensor(np.array(y[0:train_size]))).to(device)
y_test = Variable(torch.Tensor(np.array(y[train_size:len(y)]))).to(device)
x_test = Variable(torch.Tensor(np.array(x[train_size:len(x)]))).to(device)
print('x_data.shape,y_data.shape,x_train.shape,y_train.shape,x_test.shape,y_test.shape:\n{}{}{}{}{}{}'
.format(x_data.shape, y_data.shape, x_train.shape, y_train.shape, x_test.shape, y_test.shape))
return x_data, y_data, x_train, y_train, x_test, y_test
def data_generator(x_train, y_train, x_test, y_test, batch_size): train_dataset = Data.TensorDataset(x_train, y_train) test_dataset = Data.TensorDataset(x_test, y_test) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=False, drop_last=True) # 加载数据集,使数据集可迭代 test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, drop_last=True)
return train_loader, test_loader
class LSTM(nn.Module): """ Parameters:
num_layers: layers of LSTM to stack """
def init(self, input_size, hidden_size, output_size, num_layers): super().init() self.hidden_size = hidden_size self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True) self.dropout = nn.Dropout(p=0.2) self.linear1 = nn.Linear(hidden_size, hidden_size 2) # 全连接层 self.linear2 = nn.Linear(hidden_size 2, output_size) # 全连接层 self.num_directions = 1
def forward(self, _x):
# h_0 = torch.randn(self.num_directions * num_layers, batch_size, self.hidden_size).to(device)
# c_0 = torch.randn(self.num_directions * num_layers, batch_size, self.hidden_size).to(device)
# _x = _x.reshape((-1, 5, 7))
x, _ = self.lstm(_x) # _x is input, size (seq_len, batch, input_size)
x = self.dropout(x)
x = self.linear1(x)
x = self.linear2(x)
x = x[:, -1, :]
return x
This is the code for LSTM
When I explained LSTM with captum, the following error occurred: Traceback (most recent call last): File "D:\论文机器学习模型\shiyan.py", line 210, in
attributions_ig, delta_ig = lig.attribute(x_test[0:128], target=0, return_convergence_delta=True)
File "D:\APP\Anaconda\envs\pytorch\lib\site-packages\captum\log__init__.py", line 42, in wrapper
return func(*args, *kwargs)
File "D:\APP\Anaconda\envs\pytorch\lib\site-packages\captum\attr_core\layer\layer_integrated_gradients.py", line 371, in attribute
inputs_layer = _forward_layer_eval(
File "D:\APP\Anaconda\envs\pytorch\lib\site-packages\captum_utils\gradient.py", line 182, in _forward_layer_eval
return _forward_layer_eval_with_neuron_grads(
File "D:\APP\Anaconda\envs\pytorch\lib\site-packages\captum_utils\gradient.py", line 445, in _forward_layer_eval_with_neuron_grads
saved_layer = _forward_layer_distributed_eval(
File "D:\APP\Anaconda\envs\pytorch\lib\site-packages\captum_utils\gradient.py", line 294, in _forward_layer_distributed_eval
output = _run_forward(
File "D:\APP\Anaconda\envs\pytorch\lib\site-packages\captum_utils\common.py", line 531, in _run_forward
output = forward_func(
File "D:\APP\Anaconda\envs\pytorch\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(args, **kwargs)
File "D:\论文机器学习模型\shiyan.py", line 111, in forward
x, _ = self.lstm(_x) # _x is input, size (seq_len, batch, input_size)
File "D:\APP\Anaconda\envs\pytorch\lib\site-packages\torch\nn\modules\module.py", line 1547, in _call_impl
hook_result = hook(self, args, result)
File "D:\APP\Anaconda\envs\pytorch\lib\site-packages\captum_utils\gradient.py", line 277, in forward_hook
saved_layer[original_module][eval_tsrs[0].device] = tuple(
File "D:\APP\Anaconda\envs\pytorch\lib\site-packages\captum_utils\gradient.py", line 278, in
eval_tsr.clone() for eval_tsr in eval_tsrs
AttributeError: 'tuple' object has no attribute 'clone'
Has anyone encountered the same problem?
Help,please!