У меня есть нейронная сеть (GAN, которая сохраняет сгенерированное изображение от случайного шума), которая использует во время вывода случайный тензор в качестве входных данных. Моя модель или GraphDef
в формате .pb
показывает следующую сигнатуру входов и выходов с использованием saved_model_cli
.
The given SavedModel SignatureDef contains the following input(s):
inputs['dense_1_input'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 100)
name: serving_default_dense_1_input:0
The given SavedModel SignatureDef contains the following output(s):
outputs['conv2d_2'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 28, 28, 1)
name: StatefulPartitionedCall:0
Method name is: tensorflow/serving/predict
В моем файле вывода в python у меня есть функция, которая создает простой случайный нормальный вектор (generate_latent_data
).
def generate_latent_data(latent_dim, num_samples):
"""
Prepare latent dimensions for Generator.
It creates random gaussian values for "latent_dim" dimensions.
The number of dimensions can be changed.
:return: random latent data
"""
x_input_generator = randn(latent_dim * num_samples)
x_input_generator = x_input_generator.reshape(num_samples, latent_dim)
return x_input_generator
Теперь я пытаюсь развернуть свой файл вывода (Python) в исполняемом файле C ++. Я уже смог собрать Tensorflow, используя bazel
. Сейчас я выполняю преобразование в файл gan_loader.cc
, который позже будет построен с использованием bazel
. Я помещу оба файла, которые я сделал до сих пор, чтобы вы могли сравнить:
inference.py
from pathlib import Path
from numpy.random import randn
from matplotlib import pyplot as plt
from tensorflow.keras.models import load_model
# ================ #
LATENT_DIM = 100
SAMPLES_PER_ROW = 5
# ================ #
def generate_latent_data(latent_dim, num_samples):
"""
Prepare latent dimensions for Generator.
It creates random gaussian values for "latent_dim" dimensions.
The number of dimensions can be changed.
:return: random latent data
"""
x_input_generator = randn(latent_dim * num_samples)
x_input_generator = x_input_generator.reshape(num_samples, latent_dim)
return x_input_generator
def save_fig_inference(image, row_num_images=10):
"""
Save generated "fake" images during inference in root directory when project is located.
Each time is called, it will save a set of subplots (size: row_num_images ** 2) with grayscale generated images.
Function used as well for the inference.
:return: fake dataset X and fake labels Y
"""
filename = "generated_images_inference/generated_image_inference.png"
for i in range(row_num_images * row_num_images):
plt.subplot(row_num_images, row_num_images, 1 + i)
plt.axis("off")
plt.imshow(image[i, :, :, 0], cmap="gray_r")
plt.savefig(filename)
plt.close()
# Create folder for images
print("[INFO] Create folder for saving images during inference...")
Path("generated_images_inference").mkdir(parents=True, exist_ok=True)
# Load pre-trained Keras model
print("[INFO] Loading pre-trained model...")
#gan_model = load_model('generator_model_015.h5')
gan_model = load_model('generator_model_final')
# Generate input for Generator
print("[INFO] Generating latent data...")
x_latent = generate_latent_data(LATENT_DIM, 25)
# Inference
print("[INFO] Creating and saving prediction...")
generated_image = gan_model.predict(x_latent)
save_fig_inference(generated_image, SAMPLES_PER_ROW)
gan_loader. cc
/*
The given SavedModel SignatureDef contains the following input(s):
inputs['dense_1_input'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 100)
name: serving_default_dense_1_input:0
The given SavedModel SignatureDef contains the following output(s):
outputs['conv2d_2'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 28, 28, 1)
name: StatefulPartitionedCall:0
Method name is: tensorflow/serving/predict
*/
#include <fstream>
#include <utility>
#include <vector>
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/default_device.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/command_line_flags.h"
// These are all common classes it's handy to reference with no namespace.
using tensorflow::Flag;
using tensorflow::int32;
using tensorflow::Status;
using tensorflow::string;
using tensorflow::Tensor;
using tensorflow::tstring;
Status CreateLatentSpace(const int latent_dim, const int num_samples) {
/*
TODO: Create random vector, equivalent to generate_latent_data in python file
*/
}
int main(int argc, char* argv[]) {
// These are the command-line flags the program can understand.
// They define where the graph and input data is located, and what kind of
// input the model expects. If you train your own model, or use something
// other than inception_v3, then you'll need to update these.
string graph =
"generator_model_final/saved_model.pb";
int32 latent_dim = 100;
int32 samples_per_row = 5;
int32 num_samples = 25;
string input_layer = "serving_default_dense_1_input";
string output_layer = "StatefulPartitionedCall";
string root_dir = "";
std::vector<Flag> flag_list = {
Flag("graph", &graph, "graph to be executed"),
Flag("latent_dim", &latent_dim, "latent dimensions"),
Flag("samples_per_row", &samples_per_row, "samples per row"),
Flag("num_samples", &num_samples, "number of samples"),
Flag("input_layer", &input_layer, "name of input layer"),
Flag("output_layer", &output_layer, "name of output layer"),
Flag("root_dir", &root_dir, "interpret image and graph file names relative to this directory"),
};
string usage = tensorflow::Flags::Usage(argv[0], flag_list);
const bool parse_result = tensorflow::Flags::Parse(&argc, argv, flag_list);
if (!parse_result) {
LOG(ERROR) << usage;
return -1;
}
// We need to call this to set up global state for TensorFlow.
tensorflow::port::InitMain(argv[0], &argc, &argv);
if (argc > 1) {
LOG(ERROR) << "Unknown argument " << argv[1] << "\n" << usage;
return -1;
}
// First we load and initialize the model.
std::unique_ptr<tensorflow::Session> session;
string graph_path = tensorflow::io::JoinPath(root_dir, graph);
Status load_graph_status = LoadGraph(graph_path, &session);
if (!load_graph_status.ok()) {
LOG(ERROR) << load_graph_status;
return -1;
}
// TODO: Call function to create latent space
// TODO: Run the latent space through the model
// TODO: Save the figure
return 0;
}
Я уже выполнил инициализацию переменных и загрузку графика. Однако я все еще пытаюсь создать случайный вектор и пропустить его через модель, а также сохранить рисунок на C ++. Не могли бы вы дать мне какое-нибудь руководство или совет? У вас есть пример того, как это можно сделать?
Спасибо!