TensorFlow, обслуживающий mnist - клиент Python с собственным изображением - PullRequest
0 голосов
/ 28 августа 2018

Я работаю над простой программой распознавания Mensist Tensorflow. Цель состоит в том, чтобы загрузить изображение в формате jpg / png, которое содержит написанное от руки число, и узнать, какой это был номер. Мой друг написал какую-то модель и экспортировал ее с помощью скрипта Python:

import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
import time


iters_num = 1000
display_step = 10
batch = 100

tf.set_random_seed(0)

mnist = read_data_sets("MNISTdata", one_hot=True, reshape=False, validation_size=0)

# placeholders definition
X = tf.placeholder(tf.float32, [None, 28, 28, 1])
Y_ = tf.placeholder(tf.float32, [None, 10])

# probability of keeping a node during dropout = 1.0 at test time (no dropout) and 0.75 at training time(defined later on)
pkeep = tf.placeholder(tf.float32)

# layers with size (depth) definition
layer1 = 16
layer2 = 32
layer3 = 64

# fullyconnected layer (number of neurons)
full_layer4 = 512

# layers definitions
W1 = tf.Variable(tf.truncated_normal([10, 10, 1, layer1], stddev=0.1))
b1 = tf.Variable(tf.truncated_normal([layer1], stddev=0.1))
W2 = tf.Variable(tf.truncated_normal([6, 6, layer1, layer2], stddev=0.1))
b2 = tf.Variable(tf.truncated_normal([layer2], stddev=0.1))
W3 = tf.Variable(tf.truncated_normal([6, 6, layer2, layer3], stddev=0.1))
b3 = tf.Variable(tf.truncated_normal([layer3], stddev=0.1))
W4 = tf.Variable(tf.truncated_normal([7 * 7 * layer3, full_layer4], stddev=0.1))
b4 = tf.Variable(tf.truncated_normal([full_layer4], stddev=0.1))
# output softmax layer (10 labels (for 10 digits))
W5 = tf.Variable(tf.truncated_normal([full_layer4, 10], stddev=0.1))
b5 = tf.Variable(tf.truncated_normal([10], stddev=0.1))

XX = tf.reshape(X, [-1, 784])


# model definition
stride = 1  # output is 28x28 (no size changes)
Y1 = tf.nn.relu(tf.nn.conv2d(X, W1, strides=[1, stride, stride, 1], padding='SAME') + b1)
k = 2 # max pool filter size
Y2 = tf.nn.relu(tf.nn.conv2d(Y1, W2, strides=[1, stride, stride, 1], padding='SAME') + b2)
Y2 = tf.nn.max_pool(Y2, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')
Y3 = tf.nn.relu(tf.nn.conv2d(Y2, W3, strides=[1, stride, stride, 1], padding='SAME') + b3)
Y3 = tf.nn.max_pool(Y3, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')
# reshape the output from the third convolution for the fully connected layer
YY = tf.reshape(Y3, shape=[-1, 7 * 7 * layer3])
Y4 = tf.nn.relu(tf.matmul(YY, W4) + b4)
Ylogits = tf.matmul(Y4, W5) + b5
Y = tf.nn.softmax(Ylogits)

# loss function -> cross entropy
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)
cross_entropy = tf.reduce_mean(cross_entropy) * 100


# accuracy of the trained model <0,1>
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

# training step definition with Adam optimalization algorithm
learning_rate = 0.003
train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)

# matplotlib visualization
allweights = tf.concat([tf.reshape(W1, [-1]), tf.reshape(W2, [-1]), tf.reshape(W3, [-1]), tf.reshape(W4, [-1]), tf.reshape(W5, [-1])], 0)
allbiases = tf.concat([tf.reshape(b1, [-1]), tf.reshape(b2, [-1]), tf.reshape(b3, [-1]), tf.reshape(b4, [-1]), tf.reshape(b5, [-1])], 0)


# initializing all variables(!!)
init = tf.global_variables_initializer()

# lists for training values 
train_losses = list()
train_acc = list()
test_losses = list()
test_acc = list()

saver = tf.train.Saver()
time_start = time.clock() 

export_dir = "D:/Optinav.Testowy/trunk/HandwriteRecognition/DigitsRecognitionCNN/23"
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)

# launching computational graph
with tf.Session() as sess:
    sess.run(init)

    for i in range(iters_num + 1):
        # training incoming on batches
        batch_X, batch_Y = mnist.train.next_batch(batch)

        if i % display_step == 0:
            # compute training values for visualization of model steps
            acc_trn, loss_trn, w, b = sess.run([accuracy, cross_entropy, allweights, allbiases], feed_dict={X: batch_X, Y_: batch_Y, pkeep: 1.0}) 
            acc_tst, loss_tst = sess.run([accuracy, cross_entropy], feed_dict={X: mnist.test.images, Y_: mnist.test.labels, pkeep: 1.0})

            print("Step#{} Train accuracy={} , Train loss={} Test accuracy={} , Test loss={}".format(i,acc_trn,loss_trn,acc_tst,loss_tst))
            train_losses.append(loss_trn)
            train_acc.append(acc_trn)
            test_losses.append(loss_tst)
            test_acc.append(acc_tst)
        # the back-propagation training step (probability = 0.75)
        sess.run(train_step, feed_dict={X: batch_X, Y_: batch_Y, pkeep: 0.75})
        # save model
        saver.save(sess, "D:/Optinav.Testowy/trunk/HandwriteRecognition/DigitsRecognitionCNN/model2.ckpt")

    classification_inputs = tf.saved_model.utils.build_tensor_info(
        X)
    classification_outputs_classes = tf.saved_model.utils.build_tensor_info(
        Y_)
    classification_outputs_scores = tf.saved_model.utils.build_tensor_info(Y)

    classification_signature = (
        tf.saved_model.signature_def_utils.build_signature_def(
            inputs={
                tf.saved_model.signature_constants.CLASSIFY_INPUTS:
                    classification_inputs
            },

            outputs={
                tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES:
                    classification_outputs_classes,
                tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES:
                    classification_outputs_scores
            },
            method_name=tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME))

    tensor_info_x = tf.saved_model.utils.build_tensor_info(X)
    tensor_info_y = tf.saved_model.utils.build_tensor_info(Y)

    prediction_signature = (
        tf.saved_model.signature_def_utils.build_signature_def(
            inputs={'images': tensor_info_x},
            outputs={'scores': tensor_info_y},
            method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))
    builder.add_meta_graph_and_variables(
        sess, [tf.saved_model.tag_constants.SERVING],
        signature_def_map={
            'predict_images':
                prediction_signature,
            tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                classification_signature,
        },
        main_op=tf.tables_initializer(),
        strip_default_attrs=True)

    builder.save()
    print('Done exporting!')

# calculates learning time [s]
time_stop = time.clock()
time_run = time_stop - time_start
print("Learning time: %s" % time_run)

Экспортированная модель помещается в контейнер Docker, который работает в Ubuntu Azure. С помощью примера TensorFlow ( GitHub ) я написал клиент на python:

from __future__ import print_function

import grpc
import tensorflow as tf
import scipy.ndimage
import numpy

from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc

tf.app.flags.DEFINE_string('server', '', 'PredictionService host:port')
tf.app.flags.DEFINE_string('image_filename', '', 'Name of image to test')
tf.app.flags.DEFINE_string('work_dir', '/tmp', 'Working directory. ')
FLAGS = tf.app.flags.FLAGS


class _ResultObj(object):
    def __init__(self):
        self._number = 99

    def get_number(self):
        return self._number

    def set_number(self, val):
        self._number = val


def myfunc(a):
    if a > 125:
        return 255
    elif a > 70:
        return a
    else:
        return 0


def _create_rpc_callback(resultobj):

    def _callback(result_future):
        exception = result_future.exception()
        if exception:
            print(exception)
        else:
            response = numpy.array(
                result_future.result().outputs['scores'].float_val)
            prediction = numpy.argmax(response)
            resultobj.set_number(prediction)
    return _callback


def do_inference(hostport, image_filename):

    vfunc = numpy.vectorize(myfunc)
    test_data_set = vfunc(numpy.ndarray.flatten(scipy.ndimage.imread(image_filename, flatten=True)).astype(int))
    test_data_set = numpy.reshape(test_data_set, [28, 28, 1])
    channel = grpc.insecure_channel(hostport)
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
    resultObj = _ResultObj()
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'ver2'
    request.model_spec.signature_name = 'predict_images'
    request.inputs['images'].CopyFrom(
            tf.contrib.util.make_tensor_proto(test_data_set, shape=[1, test_data_set.size]))
    result_future = stub.Predict.future(request, 5.0)  # 5 seconds
    result_future.add_done_callback(
            _create_rpc_callback(resultObj))
    return resultObj.get_number()


def main(_):
    if not FLAGS.server:
        print('please specify server host:port')
        return
    print(do_inference(FLAGS.server, FLAGS.image_filename))


if __name__ == '__main__':
    tf.app.run()

Проблема

Я уверен, что модель моего друга может распознавать изображения. Она сказала мне векторизовать изображение перед добавлением его в запрос, но, похоже, я не могу правильно подключиться к модели. Я не получаю ошибку при использовании моего скрипта. Служба TensorFlow даже не возражает, когда я пишу неправильное имя модели. _callback никогда не используется, и я получаю только начальное значение моего объекта ResultObj.

Любая помощь с этой проблемой будет принята.

Спасибо.

Добро пожаловать на сайт PullRequest, где вы можете задавать вопросы и получать ответы от других членов сообщества.
...