В моей модели есть слой внедрения, слой усложнения, слой lstm.И я использую Pytorch.Мой вопрос: когда я помещаю pack = pack_padded_sequence(conv)
в слой lstm, я получаю ошибку RuntimeError: start (pack[0].size(0)) + length (1) exceeds dimension size (pack[0].size(0)).
.Почему я получил эту ошибку?И что это значит ??Пожалуйста, помогите мне ..
class CNNModel(nn.Module):
def __init__(self, vocab_size: int, embed_size: int, out_size: int, filter_size: List[int], hidden_size: int,
layer_num: int, pad: int = 1):
super().__init__()
self.vocab_size = vocab_size
self.embed_size = embed_size
self.pad = pad
self.out_size = out_size
self.filter_size = filter_size
self.hidden_size = hidden_size
self.layer_num = layer_num
self.embed = nn.Embedding(
num_embeddings=self.vocab_size,
embedding_dim=self.embed_size,
padding_idx=self.pad,
)
self.cnn = nn.Conv1d(
in_channels=self.embed_size,
out_channels=self.out_size,
kernel_size=self.filter_size[0],
bias=True,
)
self.lstm = nn.LSTM(
input_size=self.out_size,
hidden_size=self.hidden_size,
num_layers=self.layer_num,
bidirectional=True,
batch_first=True,
)
def forward(self, batch):
embed = self.embed(batch.word[0]) # [B(64), L(465), F(256)]
embed = embed.transpose(1, 2) # [B(64), L(256), F(465)]
conv = self.cnn(embed) # [B(64), F(64), L(465)]
conv = conv.transpose(1, 2) # [B(64), L(465), F(64)]
encoding = pack_padded_sequence(conv,
[tensor.item() for tensor in batch.word[1]],
batch_first=True)
print(f'encoding => {encoding[0].size()}') # [8093, 64]
print(f'encoding => {encoding[1].size()}') # [465]
_, (h, _) = self.lstm(encoding) # [bsz, sln, dim3]
print(f'h => {h}')
И результат, как показано ниже.
File "/Users/user_name/anaconda3/lib/python3.7/site-packages/torch/nn/modules/rnn.py", line 182, in forward
self.num_layers, self.dropout, self.training, self.bidirectional)
RuntimeError: start (8093) + length (1) exceeds dimension size (8093).