Когда я вырезаю линию
tf.reshape(rewards_list, [-1, 25])
Я получаю сообщение об ошибке
ValueError: Cannot feed value of shape (1, 1, 25) for Tensor 'Placeholder_3:0', which has shape '(?, 25)'
Но когда я оставляю это там, я получаю сообщение об ошибке в заголовке.
ValueError: Tried to convert 'tensor' to a tensor and failed. Error: Argument must be a dense tensor: [array([[0.4758947]], dtype=float32)] - got shape [1, 1, 1], but wanted [1].
Я не понимаю, что происходит. Как может rewards_list быть обеими этими фигурами?
observations = tf.placeholder('float32', shape=[None, num_stops]) # Current game states : r[stop], r[next_stop], r[third_stop]
actions = tf.placeholder('int32',shape=[None]) # 0 - num-stops for actions taken
rewards = tf.placeholder('float32',shape=[None]) # +1, -1 with discounts
# Model
Y = tf.layers.dense(observations, 200, activation=tf.nn.relu)
Ylogits = tf.layers.dense(Y, num_stops)
# sample an action from predicted probabilities
sample_op = tf.random.categorical(logits=Ylogits, num_samples=1)
# loss
cross_entropies = tf.losses.softmax_cross_entropy(onehot_labels=tf.one_hot(actions,num_stops), logits=Ylogits)
loss = tf.reduce_sum(rewards * cross_entropies)
# training operation
optimizer = tf.train.RMSPropOptimizer(learning_rate=0.001, decay=.99)
train_op = optimizer.minimize(loss)
visited_stops = []
steps = 0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Start at a random stop, initialize done to false
current_stop = random.randint(0, len(r) - 1)
done = False
# reset everything
while not done: # play a game in x steps
observations_list = []
actions_list = []
rewards_list = []
# List all stops and their scores
observation = r[current_stop]
# Add the stop to a list of non-visited stops if it isn't
# already there
if current_stop not in visited_stops:
visited_stops.append(current_stop)
# decide where to go
action = sess.run(sample_op, feed_dict={observations: [observation]})
# play it, output next state, reward if we got a point, and whether the game is over
#game_state, reward, done, info = pong_sim.step(action)
new_stop = int(action)
reward = r[current_stop][action]
if len(visited_stops) == num_stops:
done = True
if steps >= BATCH_SIZE:
done = True
steps += 1
observations_list.append(observation)
actions_list.append(action)
rewards_list.append(reward)
#rewards_list = np.reshape(rewards, [-1, 25])
current_stop = new_stop
#processed_rewards = discount_rewards(rewards, args.gamma)
#processed_rewards = normalize_rewards(rewards, args.gamma)
tf.reshape(rewards_list, [-1, 25])
sess.run(train_op, feed_dict={observations: [observations_list],
actions: [actions_list],
rewards: rewards_list})