Я настраиваю крошечную модель Альберта (классификационная миссия). после того, как я обучил 133841 шагов, я делаю прогноз, точность задачи классификации составляет 82,5%. Я добавляю estimator.export_savemodel в свой код, чтобы я мог сохранить модель в формате .pb.
Затем я использовал эту модель pb для обслуживания тензорного потока. Клиент также сделал прогноз. Однако точность падает на 0,7%. Не могли бы вы помочь мне найти причину?
ниже приведены мои client.py
from __future__ import print_function
import grpc
import requests
import tensorflow as tf
import tokenization
import numpy as np
import time
from run_classifier_albert import *
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
from tensorflow.contrib.util import make_tensor_proto
tf.app.flags.DEFINE_string('server','0.0.0.0:localhost',
'PredictionService host:port')
FLAGS = tf.app.flags.FLAGS
data_dir=""
processor = MyProcessor()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file='./vocab.txt', do_lower_case=False)
def main(_):
examples = processor.get_test_examples(data_dir)
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n", len(examples))
index =0
channel = grpc.insecure_channel(FLAGS.server)
request = predict_pb2.PredictRequest()
request.model_spec.name ='Albert'
request.model_spec.signature_name = 'serving_default'
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
with open("predict_result_full_convert.txt", 'w', encoding ="utf-8") as f1:
#request = predict_pb2.PredictRequest()
#request.model_spec.name ='Albert'
#request.model_spec.signature_name = 'serving_default'
#stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
for (ex_index, example) in enumerate(examples):
feature = convert_single_example(ex_index,example,label_list,128,tokenizer)
input_ids = feature.input_ids
new_input_ids = [input_ids]
# print("the shape of the old input_id is: ", np.shape(input_ids))
# print("the shape of the new input_id is: ",np.shape(new_input_ids))
input_mask = feature.input_mask
label_id = feature.label_id
segment_ids = feature.segment_ids
# request = predict_pb2.PredictRequest()
# request.model_spec.name = 'Albert'
# request.model_spec.signature_name = 'serving_default'
request.inputs["label_ids"].CopyFrom(make_tensor_proto(label_id))
request.inputs["input_ids"].CopyFrom(make_tensor_proto([input_ids],shape=[1,128]))
request.inputs["input_mask"].CopyFrom(make_tensor_proto([input_mask],shape=[1,128]))
request.inputs["segment_ids"].CopyFrom(make_tensor_proto([input_mask],shape=[1,128]))
startTime = time.time()
#stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
result = stub.Predict(request, 10.0)
endTime = time.time()
costime = endTime-startTime
probabilities = result.outputs['probabilities'].float_val
#print("the probabilities is: ", probabilities)
max_score = max(probabilities)
#print("the max score is: ", max_score)
value = max_score
for i in range(len(probabilities)):
if value == probabilities[i]:
target_index = i
#print("target_index is: ", i)
predict_label = label_list[target_index]
if ex_index % 10000 == 0:
print("##################################\n", ex_index)
f1.write(str(predict_label) + '\t' + str(max_score) + '\t' +str(costime))
f1.write('\n')
#if ex_index == 1000:
# break
f1.close()
if __name__ == '__main__':
tf.app.run()