Tensorflow 2.2.0 C ++: не удается правильно загрузить .pb - PullRequest
1 голос
/ 19 июня 2020

Я пытаюсь загрузить граф protobuf, сохраненный с помощью сценария python, для вывода с помощью API C ++.

Сценарий python:

#!/usr/bin/env python3

from __future__ import print_function
import tensorflow as tf
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
import os, sys

tf.keras.backend.clear_session()

physical_devices = tf.config.experimental.list_physical_devices('GPU')
for dev in physical_devices:
  try:
    tf.config.experimental.set_memory_growth(dev, True)
    print(dev, "SET MEMORY GROWTH")
    #tf.config.set_logical_device_configuration(dev, [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=2048)])
    print(tf.config.get_logical_device_configuration(dev))
  except:
    print("Device config error")
    sys.exit(1)


batch_size = 512
num_classes = 10
epochs = 50
data_augmentation = True

# The data, split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# Convert class vectors to binary class matrices.
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
                 input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(1024))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(1024))
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))

# initiate RMSprop optimizer
opt = tf.keras.optimizers.Adam(learning_rate=0.001)

# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
              optimizer=opt,
              metrics=['accuracy'])

print(model.summary())

x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255.
x_test /= 255.

print(x_train.shape[0])

if not data_augmentation:
    print('Not using data augmentation.')
    model.fit(x_train, y_train,
              steps_per_epoch=((x_train.shape[0] // batch_size)),
              batch_size=batch_size,
              epochs=epochs,
              validation_data=(x_test, y_test),
              validation_steps=((x_test.shape[0] // batch_size)),
              shuffle=True)
else:
    print('Using real-time data augmentation.')
    # This will do preprocessing and realtime data augmentation:
    datagen = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=False,  # set each sample mean to 0
        featurewise_std_normalization=False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        zca_epsilon=1e-06,  # epsilon for ZCA whitening
        rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
        # randomly shift images horizontally (fraction of total width)
        width_shift_range=0.1,
        # randomly shift images vertically (fraction of total height)
        height_shift_range=0.1,
        shear_range=0.,  # set range for random shear
        zoom_range=0.,  # set range for random zoom
        channel_shift_range=0.,  # set range for random channel shifts
        # set mode for filling points outside the input boundaries
        fill_mode='nearest',
        cval=0.,  # value used for fill_mode = "constant"
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False,  # randomly flip images
        # set rescaling factor (applied before any other transformation)
        rescale=None,
        # set function that will be applied on each input
        preprocessing_function=None,
        # image data format, either "channels_first" or "channels_last"
        data_format=None,
        # fraction of images reserved for validation (strictly between 0 and 1)
        validation_split=0.0)

    # Compute quantities required for feature-wise normalization
    # (std, mean, and principal components if ZCA whitening is applied).
    datagen.fit(x_train)

    # Fit the model on the batches generated by datagen.flow().
    model.fit(datagen.flow(x_train, y_train,
                            batch_size=batch_size),
                        steps_per_epoch=((x_train.shape[0] // batch_size)),
                        epochs=epochs,
                        validation_steps=((x_test.shape[0] // batch_size)),
                        validation_data=(x_test, y_test),
                        shuffle=True)
print("fitted")

model.save(save_format='tf', filepath='../../graphs/test0', include_optimizer=True)
print("saved")

model1 = tf.keras.models.load_model('../../graphs/test0')
print(model1.summary())

print("Done")

Код C ++ это:

#include <stdlib.h>

#include <fstream>
#include <iostream>
#include <string>
#include <vector>

#include "class_name.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/default_device.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/command_line_flags.h"

using namespace tensorflow;
using tensorflow::Flag;
using tensorflow::Status;
using tensorflow::string;
using tensorflow::Tensor;

//Read the image file, apply appropriate decoding depending on type of image
int TensorFromFile(string filename, const int i_height, const int i_width, std::vector<Tensor>* o_tensors) {
  tensorflow::Status status;
  auto root = tensorflow::Scope::NewRootScope();
  using namespace ::tensorflow::ops;
  std::unique_ptr<tensorflow::Session> session(tensorflow::NewSession({}));
  tensorflow::GraphDef graph;

  auto reader = tensorflow::ops::ReadFile(root.WithOpName("img_reader"), filename);
  const int channels = 1;
  tensorflow::Output imgreader;

  if (tensorflow::str_util::EndsWith(filename, ".png")) {
    imgreader = DecodePng(root.WithOpName("png_reader"), reader, DecodePng::Channels(channels));
  } else if (tensorflow::str_util::EndsWith(filename, ".gif")) {
    imgreader = DecodeGif(root.WithOpName("gif_reader"), reader);
  } else {
    imgreader = DecodeJpeg(root.WithOpName("jpeg_reader"), reader, DecodeJpeg::Channels(channels));
  }

  auto f_caster = Cast(root.WithOpName("float_caster"), imgreader, tensorflow::DT_FLOAT);
  ExpandDims(root.WithOpName("output"), f_caster, 0);

  status = root.ToGraphDef(&graph);
  if (!status.ok()) {
    LOG(ERROR) << status.ToString();
    return -1;
  }

  status = session->Create(graph);
  if (!status.ok()) {
    LOG(ERROR) << status.ToString();
    return -1;
  }

  status = session->Run({}, {"output"}, {}, o_tensors);
  if (!status.ok()) {
    LOG(ERROR) << status.ToString();
    return -1;
  }

  return 0;
}

int main(int argc, char* argv[]) {
  using namespace ::tensorflow::ops;
  tensorflow::Status status;

  std::string delimiter = ".";
  std::string ofilename;
  std::vector<Tensor> inputs;
  std::vector<Tensor> outputs;

  std::string graph_path = "../../graphs/test0";
  std::string image_path = "../../graphs/test0.png";

  std::string mdlpath(graph_path);
  std::string imgpath(image_path);
  int32 inputdim = 32;

  std::unique_ptr<tensorflow::Session> session(tensorflow::NewSession({}));
  tensorflow::GraphDef graph;

  LOG(INFO) << "OK";

  //read model file
  status = ReadBinaryProto(Env::Default(), mdlpath, &graph);
  if (!status.ok()) {
    std::cout << status.ToString() << "\n";
    return -1;
  }

  LOG(INFO) << "STATUS: " << status.ToString();
  LOG(INFO) << "OK";

  //add graph to scope
  status = session->Create(graph);
  if (!status.ok()) {
    std::cout << status.ToString() << "\n";
    return -1;
  }

  LOG(INFO) << status.ToString();
  LOG(INFO) << "OK";

  //Read input image, assuming to be a sqaure image
  if (TensorFromFile(imgpath, inputdim, inputdim, &inputs)) {
    LOG(ERROR) << "Image reading failed"
               << "\n";
    return -1;
  }

  LOG(INFO) << "OK L1";

  std::cout << "input dimension of the image: " << inputs[0].DebugString() << std::endl;
  std::cout << "v: " << graph.version() << std::endl;
  std::cout << "ns: " << graph.node_size() << std::endl << std::endl;
  auto shape = graph.node().Get(0).attr().at("shape").shape();
  for (int i = 0; i < shape.dim_size(); i++) {
      std::cout << shape.dim(i).size()<<std::endl;
  }

  //get the appropriate input and out layer names from the graph/mode to execute
  auto inputlayer = graph.node(0).name();
  LOG(INFO) << "OK A1";
  auto outputlayer = graph.node(graph.node_size() - 1).name();
  LOG(INFO) << "OK A2";

  status = session->Run({{inputlayer, inputs[0]}}, {outputlayer}, {}, &outputs);
  if (!status.ok()) {
    LOG(ERROR) << status.ToString();
    return -1;
  }

  std::cout << "Output dimension of the image" << outputs[0].DebugString() << std::endl;

  //create filename
  ofilename.append(imgpath.substr(0, imgpath.find(delimiter)));
  ofilename.append("_mask.png");

  std::cout << "output filename: " << ofilename << std::endl;

  //Now write this to a image file
  //if (TensorToFile(ofilename, outputs, threshold)) return -1;

  session->Close();

  return 0;
}

И затем я выполняю:

LD_LIBRARY_PATH="/opt/tf_cpp/lib" ./test

С выводом:

2020-06-19 14:13:51.120484: I tensorflow/core/platform/profile_utils/cpu_utils.cc:102] CPU Frequency: 3399905000 Hz
2020-06-19 14:13:51.120742: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x561cd4f5fe20 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
2020-06-19 14:13:51.120755: I tensorflow/compiler/xla/service/service.cc:176]   StreamExecutor device (0): Host, Default Version
2020-06-19 14:13:51.120974: I /home/niccolo/Documents/CodeBlocks/Prova/main.cpp:95] OK
2020-06-19 14:13:51.121005: I /home/niccolo/Documents/CodeBlocks/Prova/main.cpp:104] STATUS: OK
2020-06-19 14:13:51.121010: I /home/niccolo/Documents/CodeBlocks/Prova/main.cpp:105] OK
2020-06-19 14:13:51.121016: I /home/niccolo/Documents/CodeBlocks/Prova/main.cpp:114] OK
2020-06-19 14:13:51.121020: I /home/niccolo/Documents/CodeBlocks/Prova/main.cpp:115] OK
2020-06-19 14:13:51.123531: I /home/niccolo/Documents/CodeBlocks/Prova/main.cpp:124] OK L1
input dimension of the image: Tensor<type: float shape: [1,32,32,1] values: [[[169][131][100]]]...>
v: 0
ns: 0

[libprotobuf FATAL /opt/tpt/tf_cpp/include/src/google/protobuf/repeated_field.h:1535] CHECK failed: (index) < (current_size_): 
terminate called after throwing an instance of 'google::protobuf::FatalException'
  what():  CHECK failed: (index) < (current_size_): 
Aborted (core dumped)

Я следовал этим руководствам и руководствам:

(я не не найти хорошее полное руководство)

Что могло вызвать эту ошибку?

Thx

...