xref: /aosp_15_r20/external/executorch/runtime/executor/tensor_parser.h (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1 /*
2  * Copyright (c) Meta Platforms, Inc. and affiliates.
3  * All rights reserved.
4  *
5  * This source code is licensed under the BSD-style license found in the
6  * LICENSE file in the root directory of this source tree.
7  */
8 
9 #pragma once
10 
11 #include <executorch/runtime/core/evalue.h>
12 #include <executorch/runtime/core/exec_aten/exec_aten.h>
13 #include <executorch/runtime/executor/memory_manager.h>
14 #include <executorch/runtime/executor/program.h>
15 #include <executorch/schema/program_generated.h>
16 
17 namespace executorch {
18 namespace runtime {
19 namespace deserialization {
20 
21 ET_NODISCARD Result<executorch::aten::Tensor> parseTensor(
22     const Program* program,
23     MemoryManager* memory_manager,
24     const executorch_flatbuffer::Tensor* s_tensor);
25 
26 ET_NODISCARD Result<BoxedEvalueList<executorch::aten::Tensor>> parseTensorList(
27     const flatbuffers::Vector<int32_t>* tensor_indices,
28     EValue* values_,
29     MemoryManager* memory_manager);
30 
31 // Deserializes a List of optional type. The code here is the same between all
32 // list of optionals: list of optional Tensor, list of optional float etc, so we
33 // just use a template to avoid boilerplate.
34 template <typename T>
35 ET_NODISCARD Result<BoxedEvalueList<executorch::aten::optional<T>>>
parseListOptionalType(const flatbuffers::Vector<int32_t> * value_indices,EValue * values_,MemoryManager * memory_manager)36 parseListOptionalType(
37     const flatbuffers::Vector<int32_t>* value_indices,
38     EValue* values_,
39     MemoryManager* memory_manager) {
40   auto* evalp_list = memory_manager->method_allocator()->allocateList<EValue*>(
41       value_indices->size());
42   if (evalp_list == nullptr) {
43     return Error::MemoryAllocationFailed;
44   }
45 
46   auto* optional_tensor_list =
47       memory_manager->method_allocator()
48           ->allocateList<executorch::aten::optional<T>>(value_indices->size());
49   if (optional_tensor_list == nullptr) {
50     return Error::MemoryAllocationFailed;
51   }
52 
53   size_t output_idx = 0;
54   // For each index look up the corresponding EValue (which has been
55   // already allocated) and stick it in the list.
56   for (int32_t index : *value_indices) {
57     // Lists of objects are stored in fbb as list[int] where the ints are
58     // indices into values_. Currently serialization is deciding if they want to
59     // put -1 for serialized None type indices, or give us a valid index to a
60     // serialized None. We support either for now.
61     // Placement new as the list elements are not initialized, so calling
62     // copy assignment is not defined if its non trivial.
63     if (index == -1) {
64       new (&optional_tensor_list[output_idx])
65           executorch::aten::optional<T>(executorch::aten::nullopt);
66       // no value to point to. BoxedEvalueList for optional tensor will convert
67       // this to nullopt.
68       // TODO(T161156879): do something less hacky here.
69       evalp_list[output_idx] = nullptr;
70     } else {
71       new (&optional_tensor_list[output_idx])
72           executorch::aten::optional<T>(values_[index].toOptional<T>());
73       evalp_list[output_idx] = &values_[static_cast<size_t>(index)];
74     }
75     output_idx++;
76   }
77   return BoxedEvalueList<executorch::aten::optional<T>>(
78       evalp_list, optional_tensor_list, value_indices->size());
79 }
80 
81 /**
82  * Returns the appropriate data pointer for `s_tensor`.
83  *
84  * Overall, a Tensor is either constant or non-constant, except we differentiate
85  * 2 special variants of non-constant Tensor ("input" and control-flow
86  * "placeholder") as a special optimization to avoid holding unnecessary
87  * AllocationDetails. Thus, s_tensor can be configured as 1 of 3 options:
88  * - constant_buffer > 0, allocation_info = Null: Constant Tensor.
89  * - constant_buffer = 0, allocation_info = Non Null: Non-constant Tensor.
90  * - constant_buffer = 0, allocation_info = Null: Input/placeholder Tensor.
91  *
92  * @param[in] s_tensor The tensor to find the data pointer for.
93  * @param[in] program The Program to use for constant buffer data.
94  * @param[in] nbytes The amount of memory to get from the allocator.
95  * @param[in] allocator The source of memory for non-constant tensors.
96  *
97  * @returns On success, the data pointer to use for the tensor. On failure, a
98  *     non-Ok Error.
99  */
100 ET_NODISCARD Result<void*> getTensorDataPtr(
101     const executorch_flatbuffer::Tensor* s_tensor,
102     const Program* program,
103     size_t nbytes,
104     HierarchicalAllocator* allocator);
105 
106 } // namespace deserialization
107 } // namespace runtime
108 } // namespace executorch
109 
110 namespace torch {
111 namespace executor {
112 namespace deserialization {
113 // TODO(T197294990): Remove these deprecated aliases once all users have moved
114 // to the new `::executorch` namespaces.
115 using ::executorch::runtime::deserialization::getTensorDataPtr;
116 using ::executorch::runtime::deserialization::parseListOptionalType;
117 using ::executorch::runtime::deserialization::parseTensor;
118 using ::executorch::runtime::deserialization::parseTensorList;
119 } // namespace deserialization
120 } // namespace executor
121 } // namespace torch
122