1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 /**
10 * @file
11 *
12 * This tool can run ExecuTorch model files that only use operators that
13 * are covered by the portable kernels, with possible delegate to the
14 * test_backend_compiler_lib.
15 *
16 * It sets all input tensor data to ones, and assumes that the outputs are
17 * all fp32 tensors.
18 */
19
20 #include <iostream>
21 #include <memory>
22
23 #include <gflags/gflags.h>
24
25 #include <executorch/extension/data_loader/file_data_loader.h>
26 #include <executorch/extension/evalue_util/print_evalue.h>
27 #include <executorch/extension/runner_util/inputs.h>
28 #include <executorch/runtime/executor/method.h>
29 #include <executorch/runtime/executor/program.h>
30 #include <executorch/runtime/platform/log.h>
31 #include <executorch/runtime/platform/runtime.h>
32
33 static uint8_t method_allocator_pool[4 * 1024U * 1024U]; // 4 MB
34
35 DEFINE_string(
36 model_path,
37 "model.pte",
38 "Model serialized in flatbuffer format.");
39
40 using executorch::extension::FileDataLoader;
41 using executorch::runtime::Error;
42 using executorch::runtime::EValue;
43 using executorch::runtime::HierarchicalAllocator;
44 using executorch::runtime::MemoryAllocator;
45 using executorch::runtime::MemoryManager;
46 using executorch::runtime::Method;
47 using executorch::runtime::MethodMeta;
48 using executorch::runtime::Program;
49 using executorch::runtime::Result;
50 using executorch::runtime::Span;
51
main(int argc,char ** argv)52 int main(int argc, char** argv) {
53 executorch::runtime::runtime_init();
54
55 gflags::ParseCommandLineFlags(&argc, &argv, true);
56 if (argc != 1) {
57 std::string msg = "Extra commandline args:";
58 for (int i = 1 /* skip argv[0] (program name) */; i < argc; i++) {
59 msg += std::string(" ") + argv[i];
60 }
61 ET_LOG(Error, "%s", msg.c_str());
62 return 1;
63 }
64
65 // Create a loader to get the data of the program file. There are other
66 // DataLoaders that use mmap() or point to data that's already in memory, and
67 // users can create their own DataLoaders to load from arbitrary sources.
68 const char* model_path = FLAGS_model_path.c_str();
69 Result<FileDataLoader> loader = FileDataLoader::from(model_path);
70 ET_CHECK_MSG(
71 loader.ok(),
72 "FileDataLoader::from() failed: 0x%" PRIx32,
73 (uint32_t)loader.error());
74
75 // Parse the program file. This is immutable, and can also be reused between
76 // multiple execution invocations across multiple threads.
77 Result<Program> program = Program::load(&loader.get());
78 if (!program.ok()) {
79 ET_LOG(Error, "Failed to parse model file %s", model_path);
80 return 1;
81 }
82 ET_LOG(Info, "Model file %s is loaded.", model_path);
83
84 // Use the first method in the program.
85 const char* method_name = nullptr;
86 {
87 const auto method_name_result = program->get_method_name(0);
88 ET_CHECK_MSG(method_name_result.ok(), "Program has no methods");
89 method_name = *method_name_result;
90 }
91 ET_LOG(Info, "Using method %s", method_name);
92
93 // MethodMeta describes the memory requirements of the method.
94 Result<MethodMeta> method_meta = program->method_meta(method_name);
95 ET_CHECK_MSG(
96 method_meta.ok(),
97 "Failed to get method_meta for %s: 0x%" PRIx32,
98 method_name,
99 (uint32_t)method_meta.error());
100
101 //
102 // The runtime does not use malloc/new; it allocates all memory using the
103 // MemoryManger provided by the client. Clients are responsible for allocating
104 // the memory ahead of time, or providing MemoryAllocator subclasses that can
105 // do it dynamically.
106 //
107
108 // The method allocator is used to allocate all dynamic C++ metadata/objects
109 // used to represent the loaded method. This allocator is only used during
110 // loading a method of the program, which will return an error if there was
111 // not enough memory.
112 //
113 // The amount of memory required depends on the loaded method and the runtime
114 // code itself. The amount of memory here is usually determined by running the
115 // method and seeing how much memory is actually used, though it's possible to
116 // subclass MemoryAllocator so that it calls malloc() under the hood (see
117 // MallocMemoryAllocator).
118 //
119 // In this example we use a statically allocated memory pool.
120 MemoryAllocator method_allocator{
121 MemoryAllocator(sizeof(method_allocator_pool), method_allocator_pool)};
122
123 // The memory-planned buffers will back the mutable tensors used by the
124 // method. The sizes of these buffers were determined ahead of time during the
125 // memory-planning pasees.
126 //
127 // Each buffer typically corresponds to a different hardware memory bank. Most
128 // mobile environments will only have a single buffer. Some embedded
129 // environments may have more than one for, e.g., slow/large DRAM and
130 // fast/small SRAM, or for memory associated with particular cores.
131 std::vector<std::unique_ptr<uint8_t[]>> planned_buffers; // Owns the memory
132 std::vector<Span<uint8_t>> planned_spans; // Passed to the allocator
133 size_t num_memory_planned_buffers = method_meta->num_memory_planned_buffers();
134 for (size_t id = 0; id < num_memory_planned_buffers; ++id) {
135 // .get() will always succeed because id < num_memory_planned_buffers.
136 size_t buffer_size =
137 static_cast<size_t>(method_meta->memory_planned_buffer_size(id).get());
138 ET_LOG(Info, "Setting up planned buffer %zu, size %zu.", id, buffer_size);
139 planned_buffers.push_back(std::make_unique<uint8_t[]>(buffer_size));
140 planned_spans.push_back({planned_buffers.back().get(), buffer_size});
141 }
142 HierarchicalAllocator planned_memory(
143 {planned_spans.data(), planned_spans.size()});
144
145 // Assemble all of the allocators into the MemoryManager that the Executor
146 // will use.
147 MemoryManager memory_manager(&method_allocator, &planned_memory);
148
149 //
150 // Load the method from the program, using the provided allocators. Running
151 // the method can mutate the memory-planned buffers, so the method should only
152 // be used by a single thread at at time, but it can be reused.
153 //
154
155 Result<Method> method = program->load_method(method_name, &memory_manager);
156 ET_CHECK_MSG(
157 method.ok(),
158 "Loading of method %s failed with status 0x%" PRIx32,
159 method_name,
160 (uint32_t)method.error());
161 ET_LOG(Info, "Method loaded.");
162
163 // Allocate input tensors and set all of their elements to 1. The `inputs`
164 // variable owns the allocated memory and must live past the last call to
165 // `execute()`.
166 auto inputs = executorch::extension::prepare_input_tensors(*method);
167 ET_CHECK_MSG(
168 inputs.ok(),
169 "Could not prepare inputs: 0x%" PRIx32,
170 (uint32_t)inputs.error());
171 ET_LOG(Info, "Inputs prepared.");
172
173 // Run the model.
174 Error status = method->execute();
175 ET_CHECK_MSG(
176 status == Error::Ok,
177 "Execution of method %s failed with status 0x%" PRIx32,
178 method_name,
179 (uint32_t)status);
180 ET_LOG(Info, "Model executed successfully.");
181
182 // Print the outputs.
183 std::vector<EValue> outputs(method->outputs_size());
184 ET_LOG(Info, "%zu outputs: ", outputs.size());
185 status = method->get_outputs(outputs.data(), outputs.size());
186 ET_CHECK(status == Error::Ok);
187 // Print the first and last 100 elements of long lists of scalars.
188 std::cout << executorch::extension::evalue_edge_items(100);
189 for (int i = 0; i < outputs.size(); ++i) {
190 std::cout << "Output " << i << ": " << outputs[i] << std::endl;
191 }
192
193 return 0;
194 }
195