, когда я переделываю этот код keras с помощью pytorch, я получаю разные потери при обучении. Может кто-нибудь сказать мне, где проблема, большое спасибо!
Код Keras:
model_2d = Sequential()
model_2d.add(LSTM(
input_shape=(SensorTrain.shape[1], SensorTrain.shape[2]),
units=128,
))
model_2d.add(Dense(2))
model_2d.compile(optimizer=RMSprop(LR),
loss='mse',metrics=['acc'])
history = model_2d.fit(SensorTrain, location,
#validation_data=(Sensor_val,loc_val),
epochs=epoch, batch_size=100, verbose=1,
#shuffle=False,
)
PyTorch код:
class LSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, output_dim):
super(LSTM, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, output_dim)
def forward(self, x):
# Set initial hidden and cell states
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
# Forward propagate LSTM
out, _ = self.lstm(x, (h0, c0)) # out: tensor of shape (batch_size, seq_length, hidden_size)
# Decode the hidden state of the last time step
out = self.fc(out[:, -1, :])
return out
lstmmodel = LSTM(input_size, hidden_size, num_layers, output_dim).to(device)
criterion = nn.MSELoss()
optimizer = torch.optim.RMSprop(lstmmodel.parameters(), lr=learning_rate)
total_step=len(train_loader)