Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

AttributeError: 'tuple' object has no attribute 'clone' #1220

Open
hanhandian opened this issue Dec 9, 2023 · 1 comment
Open

AttributeError: 'tuple' object has no attribute 'clone' #1220

hanhandian opened this issue Dec 9, 2023 · 1 comment

Comments

@hanhandian
Copy link

When I explained LSTM with captum, the following error occurred:
Traceback (most recent call last):
File "D:\论文机器学习模型\shiyan.py", line 210, in
attributions_ig, delta_ig = lig.attribute(x_test[0:128], target=0, return_convergence_delta=True)
File "D:\APP\Anaconda\envs\pytorch\lib\site-packages\captum\log_init_.py", line 42, in wrapper
return func(*args, **kwargs)
File "D:\APP\Anaconda\envs\pytorch\lib\site-packages\captum\attr_core\layer\layer_integrated_gradients.py", line 371, in attribute
inputs_layer = _forward_layer_eval(
File "D:\APP\Anaconda\envs\pytorch\lib\site-packages\captum_utils\gradient.py", line 182, in _forward_layer_eval
return _forward_layer_eval_with_neuron_grads(
File "D:\APP\Anaconda\envs\pytorch\lib\site-packages\captum_utils\gradient.py", line 445, in _forward_layer_eval_with_neuron_grads
saved_layer = _forward_layer_distributed_eval(
File "D:\APP\Anaconda\envs\pytorch\lib\site-packages\captum_utils\gradient.py", line 294, in _forward_layer_distributed_eval
output = _run_forward(
File "D:\APP\Anaconda\envs\pytorch\lib\site-packages\captum_utils\common.py", line 531, in _run_forward
output = forward_func(
File "D:\APP\Anaconda\envs\pytorch\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\论文机器学习模型\shiyan.py", line 111, in forward
x, _ = self.lstm(_x) # _x is input, size (seq_len, batch, input_size)
File "D:\APP\Anaconda\envs\pytorch\lib\site-packages\torch\nn\modules\module.py", line 1547, in _call_impl
hook_result = hook(self, args, result)
File "D:\APP\Anaconda\envs\pytorch\lib\site-packages\captum_utils\gradient.py", line 277, in forward_hook
saved_layer[original_module][eval_tsrs[0].device] = tuple(
File "D:\APP\Anaconda\envs\pytorch\lib\site-packages\captum_utils\gradient.py", line 278, in
eval_tsr.clone() for eval_tsr in eval_tsrs
AttributeError: 'tuple' object has no attribute 'clone'
Has anyone encountered the same problem?
Help,please!

@hanhandian
Copy link
Author

数据分离

def split_data(x, y, split_ratio):
train_size = int(len(y) * split_ratio)
test_size = len(y) - train_size

x_data = Variable(torch.Tensor(np.array(x))).to(device)
y_data = Variable(torch.Tensor(np.array(y))).to(device)

x_train = Variable(torch.Tensor(np.array(x[0:train_size]))).to(device)
y_train = Variable(torch.Tensor(np.array(y[0:train_size]))).to(device)
y_test = Variable(torch.Tensor(np.array(y[train_size:len(y)]))).to(device)
x_test = Variable(torch.Tensor(np.array(x[train_size:len(x)]))).to(device)

print('x_data.shape,y_data.shape,x_train.shape,y_train.shape,x_test.shape,y_test.shape:\n{}{}{}{}{}{}'
      .format(x_data.shape, y_data.shape, x_train.shape, y_train.shape, x_test.shape, y_test.shape))

return x_data, y_data, x_train, y_train, x_test, y_test

数据装入

def data_generator(x_train, y_train, x_test, y_test, batch_size):
train_dataset = Data.TensorDataset(x_train, y_train)
test_dataset = Data.TensorDataset(x_test, y_test)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=False,
drop_last=True) # 加载数据集,使数据集可迭代
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False,
drop_last=True)

return train_loader, test_loader

Define LSTM Neural Networks

class LSTM(nn.Module):
"""
Parameters:
- input_size: feature size
- hidden_size: number of hidden units
- output_size: number of output
- num_layers: layers of LSTM to stack
"""

def __init__(self, input_size, hidden_size, output_size, num_layers):
    super().__init__()
    self.hidden_size = hidden_size
    self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
    self.dropout = nn.Dropout(p=0.2)
    self.linear1 = nn.Linear(hidden_size, hidden_size * 2)  # 全连接层
    self.linear2 = nn.Linear(hidden_size * 2, output_size)  # 全连接层
    self.num_directions = 1

def forward(self, _x):
    # batch_size, seq_len = _x.shape[0], _x.shape[1]
    # h_0 = torch.randn(self.num_directions * num_layers, batch_size, self.hidden_size).to(device)
    # c_0 = torch.randn(self.num_directions * num_layers, batch_size, self.hidden_size).to(device)
    # _x = _x.reshape((-1, 5, 7))
    x, _ = self.lstm(_x)  # _x is input, size (seq_len, batch, input_size)
    x = self.dropout(x)
    x = self.linear1(x)
    x = self.linear2(x)
    x = x[:, -1, :]
    return x

This is the code for LSTM

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant