У меня есть простая сеть с несколькими слоями, я пытался уменьшить слои (и num_outputs) для ускорения модели, но точность также снизилась, и мой вопрос заключается в том, как ускорить сходимость и одновременно повысить точностьвремя.Что-то не так в реализации, которая делает сеть слишком медленной
learning_rate=0.99
n_x=50
n_y=10
lossList=[]
g_tb = tf.Graph()
#
with g_tb.as_default():
x = tf.placeholder(tf.float32, [None, n_x], name="x")#Glove Vectors
y = tf.placeholder(tf.float32, [None, n_y], name="y")#scores
with tf.name_scope('Neural_Nt'):
fully_connected1 = tf.contrib.layers.fully_connected(inputs=x, num_outputs=500,
activation_fn=tf.nn.relu,scope="Fully_Conn1")
fully_connected2 = tf.contrib.layers.fully_connected(inputs=fully_connected1, num_outputs=400,
activation_fn=tf.nn.relu,scope="Fully_Conn2")
fully_connected3 = tf.contrib.layers.fully_connected(inputs=fully_connected2, num_outputs=300,
activation_fn=tf.nn.relu,scope="Fully_Conn3")
fully_connected4 = tf.contrib.layers.fully_connected(inputs=fully_connected2, num_outputs=50,
activation_fn=tf.nn.relu,scope="Fully_Conn4")
fully_connected5 = tf.contrib.layers.fully_connected(inputs=fully_connected2, num_outputs=10,
activation_fn=tf.nn.relu,scope="Fully_Conn5")
prediction = tf.contrib.layers.fully_connected(inputs=fully_connected3, num_outputs=10,
activation_fn=tf.nn.softmax,scope="Out")
with tf.name_scope('Cost'):
cost = tf.losses.softmax_cross_entropy(onehot_labels=y, logits=prediction,scope="Cost_Function")
with tf.name_scope('Accuracy'):
correct_prediction = tf.equal(tf.argmax(prediction, 1, name="Argmax_Pred"), tf.argmax(y, 1, name="Y_Pred"),
name="Correct_Pred")
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32, name="Cast_Corr_Pred"), name="Accuracy")
#
with tf.name_scope('Optimization'):
optimizer = tf.train.AdagradOptimizer(learning_rate, name="Optimizer").minimize(cost)
init = tf.global_variables_initializer()
#
# Start session
with tf.Session(graph=g_tb) as sess:
#
lossList=[]
sess.run(init)
#Save the graph in the summary
#summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
training_epochs = 2000#4000
for epoch in range(training_epochs):
_, c ,prediction1,acc= sess.run([optimizer, cost,prediction,accuracy], feed_dict={x: np.array(gloveVectors),
y: np.array(scores)})
# For every epoch save cost and accuracy
print(epoch, c)
lossList.append(c)
print(acc)