Реализовать модель PyTorch в среде C ++ через TorchScript - PullRequest
0 голосов
/ 30 января 2020

Я проследил свою модель PyTorch до модели Script, используя среду TorchScript. Мой вход объединял 5 тензоров (5 тензоров использовали для трассировки модели). Это мой код, когда я использую этот ввод для прогнозирования в среде C ++ через модель Script, но в нем все еще есть некоторые ошибки, и я не знаю, как это исправить Пожалуйста, помогите мне. Спасибо.

include <torch/script.h> // One-stop header.
include <iostream>
Include <memory>

int main(int argc, const char* argv[]) {
    if (argc != 2) {
    std::cerr << "usage: example-app <path-to-exported-script-module>\n";
    return -1;
    }


    torch::jit::script::Module module;
    try {
    // Deserialize the ScriptModule from a file using torch::jit::load().
    module = torch::jit::load(argv[1]);
    }
    catch (const c10::Error& e) {
    std::cerr << "error loading the model\n";
    return -1;
    }
    std:: cout << "0\n";

    std::cout << "ok\n";

    std:: cout << "1\n";

    // Create a vector of inputs.
    std::cout << "1\n";
    std::vector<torch::jit::IValue> inputs;
    std::cout << "2\n";

    at::Tensor f_f = torch::tensor({269,  90,  32, 269,  65,  85,  17, 269, 104,  13,   4,  21,  13, 269, 15,  95,   5, 269,  41,  30,  21,  29, 270, 270});

    at::Tensor f_p = torch::tensor({3,  7, 13, 17, 22, 23});

    at::Tensor b_f = torch::tensor({270, 270,  29,  21,  30,  41, 269,   5,  95,  15, 269,  13,  21,   4, 13, 104, 269,  17,  85,  65, 269,  32,  90, 269});

    at::Tensor b_p = torch::tensor({23, 20, 16, 10,  6,  1});

    at::Tensor w_f = torch::tensor({1020, 1083, 4027, 3087,  262, 8765});

    std::cout <<"3\n";
    inputs.push_back(f_f);
    inputs.push_back(f_p);
    inputs.push_back(b_f);
    inputs.push_back(b_p);
    inputs.push_back(w_f);

    std::cout << "input tensor successfully \n";
   # Error at there, the line below.
    at::Tensor output = module.forward(inputs).toTensor();
    std::cout << "hehe";

и ошибка

input tensor successfully.
terminate called after throwing an instance of 'std::runtime_error'
  what():  Dimension out of range (expected to be in range of [-1, 0], but got 1)
The above operation failed in interpreter, with the following stack trace:
at code/__torch__/lm_lstm_crf/model/lm_lstm_crf.py:58:31
_17 = _10.weight_ih_l0
_18 = _10.weight_ih_l0_reverse
_19 = self.crf.hidden2tag
weight1 = _19.weight
bias = _19.bias
_20 = ops.prim.NumToTensor(torch.size(sentence, 0))
_21 = int(_20)
_22 = int(_20)
_23 = int(_20)
_24 = ops.prim.NumToTensor(torch.size(sentence, 1))
                           ~~~~~~~~~~ <--- HERE
_25 = int(_24)
_26 = int(_24)
_27 = int(_24)
input2 = torch.embedding(weight, input, -1, False, False)
input3 = torch.embedding(weight, input0, -1, False, False)
input4 = torch.dropout(input2, 0.55000000000000004, False)
input5 = torch.dropout(input3, 0.55000000000000004, False)
max_batch_size = ops.prim.NumToTensor(torch.size(input4, 1))
hx = torch.zeros([1, int(max_batch_size), 300], dtype=6, layout=0, device=torch.device("cpu"), 
pin_memory=False)
Compiled from code /home/bao/Desktop/segment_vtcc_test/lm_lstm_crf/model/lm_lstm_crf.py(90): 
set_batch_seq_size
 /home/bao/Desktop/segment_vtcc_test/lm_lstm_crf/model/lm_lstm_crf.py(211): forward
 /home/bao/anaconda3/envs/env_test/lib/python3.6/site-packages/torch/nn/modules/module.py(525): 
 _slow_forward
 /home/bao/anaconda3/envs/env_test/lib/python3.6/site-packages/torch/nn/modules/module.py(539): 
 __call__
 /home/bao/anaconda3/envs/env_test/lib/python3.6/site-packages/torch/jit/__init__.py(997): 
 trace_module
 /home/bao/anaconda3/envs/env_test/lib/python3.6/site-packages/torch/jit/__init__.py(858): trace
 /home/bao/Desktop/segment_vtcc_test/convert_model_1.py(101): <module>
 Aborted (core dumped)
...