xref: /aosp_15_r20/external/executorch/runtime/executor/method_meta.h (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1 /*
2  * Copyright (c) Meta Platforms, Inc. and affiliates.
3  * All rights reserved.
4  *
5  * This source code is licensed under the BSD-style license found in the
6  * LICENSE file in the root directory of this source tree.
7  */
8 
9 #pragma once
10 
11 #include <executorch/runtime/core/exec_aten/exec_aten.h>
12 #include <executorch/runtime/core/result.h>
13 #include <executorch/runtime/core/span.h>
14 #include <executorch/runtime/core/tag.h>
15 
16 // Forward declare flatbuffer types. This is a public header and must not
17 // include the generated flatbuffer header.
18 namespace executorch_flatbuffer {
19 struct ExecutionPlan;
20 } // namespace executorch_flatbuffer
21 
22 namespace executorch {
23 namespace runtime {
24 
25 /**
26  * Metadata about a specific tensor of an ExecuTorch Program.
27  *
28  * The program used to create the MethodMeta object that created this
29  * TensorInfo must outlive this TensorInfo.
30  */
31 class TensorInfo final {
32  public:
33   TensorInfo() = delete;
34   TensorInfo(const TensorInfo&) = default;
35   TensorInfo(TensorInfo&&) = default;
36   TensorInfo& operator=(const TensorInfo&) = default;
37   TensorInfo& operator=(TensorInfo&& other) = default;
38   ~TensorInfo() = default;
39 
40   /**
41    * Returns the sizes of the tensor.
42    */
43   Span<const int32_t> sizes() const;
44 
45   /**
46    * Returns the dim order of the tensor.
47    */
48   Span<const uint8_t> dim_order() const;
49 
50   /**
51    * Returns the scalar type of the input/output.
52    */
53   executorch::aten::ScalarType scalar_type() const;
54 
55   /**
56    * Returns whether the tensor's memory was planned during export.
57    */
58   bool is_memory_planned() const;
59 
60   /**
61    * Returns the size of the tensor in bytes.
62    */
63   size_t nbytes() const;
64 
65  private:
66   // Let MethodMeta create TensorInfo.
67   friend class MethodMeta;
68 
69   TensorInfo(
70       Span<const int32_t> sizes,
71       Span<const uint8_t> dim_order,
72       executorch::aten::ScalarType scalar_type,
73       const bool is_memory_planned);
74 
75   /**
76    * The sizes of the tensor.
77    *
78    * NOTE: References data from the Program, so the Program must outlive the
79    * TensorInfo.
80    */
81   Span<const int32_t> sizes_;
82 
83   /**
84    * The dim order of the tensor.
85    *
86    * NOTE: References data from the Program, so the Program must outlive the
87    * TensorInfo.
88    */
89   Span<const uint8_t> dim_order_;
90 
91   /// The scalar type of the tensor.
92   executorch::aten::ScalarType scalar_type_;
93 
94   /// Whether the tensor's memory was planned during export.
95   bool is_memory_planned_;
96 
97   /// The size in bytes of the tensor.
98   size_t nbytes_;
99 };
100 
101 /**
102  * Describes a a method in an ExecuTorch program.
103  *
104  * The program used to create a MethodMeta object must outlive the MethodMeta.
105  * It is separate from Method so that this information can be accessed without
106  * paying the initialization cost of loading the full Method.
107  */
108 class MethodMeta final {
109  public:
110   MethodMeta() = delete;
111   MethodMeta(const MethodMeta&) = default;
112   MethodMeta(MethodMeta&&) = default;
113   MethodMeta& operator=(const MethodMeta&) = default;
114   MethodMeta& operator=(MethodMeta&& other) = default;
115   ~MethodMeta() = default;
116 
117   /**
118    * Get the name of this method.
119    *
120    * @returns The method name.
121    */
122   const char* name() const;
123 
124   /**
125    * Get the number of inputs to this method.
126    *
127    * @returns The number of inputs.
128    */
129   size_t num_inputs() const;
130 
131   /**
132    * Get the tag of the specified input.
133    *
134    * @param[in] index The index of the input to look up.
135    * @returns The tag of input, can only be [Tensor, Int, Bool, Double, String].
136    */
137   Result<Tag> input_tag(size_t index) const;
138 
139   /**
140    * Get metadata about the specified input.
141    *
142    * @param[in] index The index of the input to look up.
143    * @returns The metadata on success, or an error on failure. Only valid for
144    * tag::Tensor
145    */
146   Result<TensorInfo> input_tensor_meta(size_t index) const;
147 
148   /**
149    * Get the number of outputs to this method.
150    *
151    * @returns The number of outputs.
152    */
153   size_t num_outputs() const;
154 
155   /**
156    * Get the tag of the specified output.
157    *
158    * @param[in] index The index of the output to look up.
159    * @returns The tag of output, can only be [Tensor, Int, Bool, Double,
160    * String].
161    */
162   Result<Tag> output_tag(size_t index) const;
163 
164   /**
165    * Get metadata about the specified output.
166    *
167    * @param[in] index The index of the output to look up.
168    * @returns The metadata on success, or an error on failure. Only valid for
169    * tag::Tensor
170    */
171   Result<TensorInfo> output_tensor_meta(size_t index) const;
172 
173   /**
174    * Get the number of memory-planned buffers this method requires.
175    *
176    * @returns The number of memory-planned buffers.
177    */
178   size_t num_memory_planned_buffers() const;
179 
180   /**
181    * Get the size in bytes of the specified memory-planned buffer.
182    *
183    * @param[in] index The index of the buffer to look up.
184    * @returns The size in bytes on success, or an error on failure.
185    */
186   Result<int64_t> memory_planned_buffer_size(size_t index) const;
187 
188   /**
189    * DEPRECATED: Use num_memory_planned_buffers() instead.
190    */
num_non_const_buffers()191   ET_DEPRECATED size_t num_non_const_buffers() const {
192     return num_memory_planned_buffers();
193   }
194 
195   /**
196    * DEPRECATED: Use memory_planned_buffer_size() instead.
197    */
non_const_buffer_size(size_t index)198   Result<int64_t> non_const_buffer_size(size_t index) const {
199     return memory_planned_buffer_size(index);
200   }
201 
202  private:
203   // Let Program create MethodMeta.
204   friend class Program;
205 
206   explicit MethodMeta(const executorch_flatbuffer::ExecutionPlan* s_plan);
207 
208   /// Source of truth for method information
209   const executorch_flatbuffer::ExecutionPlan* s_plan_;
210 };
211 
212 } // namespace runtime
213 } // namespace executorch
214 
215 namespace torch {
216 namespace executor {
217 // TODO(T197294990): Remove these deprecated aliases once all users have moved
218 // to the new `::executorch` namespaces.
219 using ::executorch::runtime::MethodMeta;
220 using ::executorch::runtime::TensorInfo;
221 } // namespace executor
222 } // namespace torch
223