xref: /aosp_15_r20/external/pytorch/binaries/lite_interpreter_model_load.cc (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #include "ATen/ATen.h"
2 #include <torch/csrc/jit/api/module.h>
3 #include <torch/csrc/autograd/generated/variable_factories.h>
4 #include <torch/csrc/jit/mobile/import.h>
5 #include <torch/csrc/jit/mobile/module.h>
6 #include <torch/csrc/jit/serialization/import.h>
7 #include "torch/script.h"
8 
9 C10_DEFINE_string(model, "", "The given bytecode model to check if it is supported by lite_interpreter.");
10 
main(int argc,char ** argv)11 int main(int argc, char** argv) {
12   c10::SetUsageMessage(
13     "Check if exported bytecode model is runnable by lite_interpreter.\n"
14     "Example usage:\n"
15     "./lite_interpreter_model_load"
16     " --model=<model_file>");
17 
18   if (!c10::ParseCommandLineFlags(&argc, &argv)) {
19     std::cerr << "Failed to parse command line flags!" << std::endl;
20     return 1;
21   }
22 
23   if (FLAGS_model.empty()) {
24     std::cerr << FLAGS_model <<  ":Model file is not provided\n";
25     return -1;
26   }
27 
28   // TODO: avoid having to set this guard for custom mobile build with mobile
29   // interpreter.
30   c10::InferenceMode mode;
31   torch::jit::mobile::Module bc = torch::jit::_load_for_mobile(FLAGS_model);
32   return 0;
33 }
34