Вот небольшое решение с тензорным потоком:
#-*- coding:utf-8 -*-
import tensorflow as tf
import numpy as np
import numpy.random as rd
import matplotlib.pyplot as plt
batch_size = 16
def generate_input(N=10000):
x = rd.uniform(low=-10, high=10, size=(3, N))
z1 = x[0] + x[1] - x[2]
z2 = -x[0] + x[1] + x[2]
return [np.expand_dims(np.stack((z1, z2), axis=0), axis=2) for _ in range(batch_size)]
def target_func(data):
return [(np.sin(x[0, :]) / x[0, :]) * (np.sin(x[1, :]) / x[1, :]) for x in data]
def main():
x = tf.placeholder(tf.float32, shape=(batch_size, 2, None, 1), name='inputs')
y = tf.placeholder(tf.float32, shape=(batch_size, None, 1), name='target')
x_sum = tf.reduce_sum(x, axis=1)
fc1 = tf.layers.dense(inputs=x_sum, units=32, activation=tf.nn.relu, name="fc1")
fc2 = tf.layers.dense(inputs=fc1, units=16, activation=tf.nn.relu, name="fc2")
output = tf.layers.dense(inputs=fc2, units=1, activation=None, name="output")
predict = output
losses = tf.reduce_sum(tf.square(y - predict, name="loss"))
train_step = tf.train.AdamOptimizer().minimize(losses)
saver = tf.train.Saver()
max_train_iter = 500
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(max_train_iter):
inp = generate_input()
target = target_func(inp)
sess.run(train_step, feed_dict={x: inp, y: target})
if i % (max_train_iter // 10) == 0:
val_inp = generate_input()
loss_val = sess.run(losses, feed_dict={x: val_inp, y: target_func(val_inp)})
print ('Step:%d, Loss:%f' % (i, loss_val))
saver.save(sess, 'sin/model', global_step=i)
test_inp = generate_input(50)
truth = target_func(test_inp)
pred = sess.run(predict, feed_dict={x: test_inp})
plt.figure()
rang = np.linspace(-10, 10, num=50)
plt.plot(rang, truth[0], label='target')
plt.plot(rang, pred[0], label='prediction')
plt.legend()
plt.savefig('sin/'+'graph_'+str(i)+'.png')
if __name__ == '__main__':
main()
Это не дает хорошего прогноза.Вы должны поиграть с архитектурой и параметрами, чтобы улучшить это.Но это работает.Пример теста после тренировки показан ниже.