Я новичок в Pytorch и пытаюсь создать модель, но я получаю эту ошибку,
AttributeError Traceback (most recent call last)
<ipython-input-16-a0f31875b0ba> in <module>()
1 for t in range(100):
2 # Forward pass
----> 3 y_pred = model(X_train)
4
5 # Accuracy
AttributeError: 'numpy.ndarray' object has no attribute 'dim'
Вот мой код:
x = np.array([2,4,6,18,20,30,50])
y = x * 2
print(x)
print(y)
shuffle_indices = torch.LongTensor(random.sample(range(0, len(x)),
len(x)))
x = x[shuffle_indices]
y = y[shuffle_indices]
x = torch.from_numpy(x).float()
y = torch.from_numpy(y.ravel()).long()
# Split datasets
test_start_idx = int(len(x) * 0.75)
X_train = x[:test_start_idx]
y_train = y[:test_start_idx]
X_test = x[test_start_idx:]
y_test = y[test_start_idx:]
print("We have %i train samples and %i test samples." % (len(X_train), len(X_test)))
У нас есть 5образцы поездов и 2 тестовых образца.
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, output_dim)
def forward(self, x_in, apply_softmax=False):
a_1 = F.relu(self.fc1(x_in))
y_pred = self.fc2(a_1)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
model = MLP(input_dim=len(X_train),
hidden_dim=100,
output_dim=len(set(y)))
print (model.named_modules)
связанный метод Module.named_modules из MLP ((fc1): линейный (in_features = 7, out_features = 100, bias = True) (fc2): Линейный (in_features = 100, out_features = 7, смещение = True))
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=1e-3)
def get_accuracy(y_pred, y_target):
n_correct = torch.eq(y_pred, y_target).sum().item()
accuracy = n_correct / len(y_pred) * 100
return accuracy
for t in range(7):
y_pred = model(X_train)
_, predictions = y_pred.max(dim=1)
accuracy = get_accuracy(y_pred=predictions.long(), y_target=y_train)
loss = loss_fn(y_pred, y_train)
if t%20==0:
print ("epoch: {0} | loss: {1:.4f} | accuracy: {2:.1f}%".format(t, loss, accuracy))
optimizer.zero_grad()
loss.backward()
optimizer.step()