xref: /aosp_15_r20/external/ComputeLibrary/src/core/helpers/MemoryHelpers.h (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef SRC_COMMON_MEMORY_HELPERS_H
25 #define SRC_COMMON_MEMORY_HELPERS_H
26 
27 #include "arm_compute/core/ITensorPack.h"
28 #include "arm_compute/core/TensorInfo.h"
29 #include "arm_compute/core/experimental/Types.h"
30 #include "arm_compute/runtime/MemoryGroup.h"
31 
32 #include <memory>
33 #include <utility>
34 #include <vector>
35 
36 namespace arm_compute
37 {
offset_int_vec(int offset)38 inline int offset_int_vec(int offset)
39 {
40     return ACL_INT_VEC + offset;
41 }
42 
43 template <typename TensorType>
44 struct WorkspaceDataElement
45 {
46     int                          slot{ -1 };
47     experimental::MemoryLifetime lifetime{ experimental::MemoryLifetime::Temporary };
48     std::unique_ptr<TensorType>  tensor{ nullptr };
49 };
50 
51 template <typename TensorType>
52 using WorkspaceData = std::vector<WorkspaceDataElement<TensorType>>;
53 
54 template <typename TensorType>
manage_workspace(const experimental::MemoryRequirements & mem_reqs,MemoryGroup & mgroup,ITensorPack & run_pack)55 WorkspaceData<TensorType> manage_workspace(const experimental::MemoryRequirements &mem_reqs,
56                                            MemoryGroup                            &mgroup,
57                                            ITensorPack                            &run_pack)
58 {
59     ITensorPack dummy_pack = ITensorPack();
60     return manage_workspace<TensorType>(mem_reqs, mgroup, run_pack, dummy_pack);
61 }
62 
63 template <typename TensorType>
manage_workspace(const experimental::MemoryRequirements & mem_reqs,MemoryGroup & mgroup,ITensorPack & run_pack,ITensorPack & prep_pack)64 WorkspaceData<TensorType> manage_workspace(const experimental::MemoryRequirements &mem_reqs,
65                                            MemoryGroup                            &mgroup,
66                                            ITensorPack &run_pack, ITensorPack &prep_pack)
67 {
68     WorkspaceData<TensorType> workspace_memory;
69     for(const auto &req : mem_reqs)
70     {
71         if(req.size == 0)
72         {
73             continue;
74         }
75 
76         const auto aux_info = TensorInfo{ TensorShape(req.size), 1, DataType::U8 };
77         workspace_memory.emplace_back(WorkspaceDataElement<TensorType> { req.slot, req.lifetime, std::make_unique<TensorType>() });
78 
79         auto aux_tensor = workspace_memory.back().tensor.get();
80         ARM_COMPUTE_ERROR_ON_NULLPTR(aux_tensor);
81         aux_tensor->allocator()->init(aux_info, req.alignment);
82 
83         if(req.lifetime == experimental::MemoryLifetime::Temporary)
84         {
85             mgroup.manage(aux_tensor);
86         }
87         else
88         {
89             prep_pack.add_tensor(req.slot, aux_tensor);
90         }
91         run_pack.add_tensor(req.slot, aux_tensor);
92     }
93 
94     for(auto &mem : workspace_memory)
95     {
96         auto tensor = mem.tensor.get();
97         tensor->allocator()->allocate();
98     }
99 
100     return workspace_memory;
101 }
102 
103 template <typename TensorType>
release_prepare_tensors(WorkspaceData<TensorType> & workspace,ITensorPack & prep_pack)104 void release_prepare_tensors(WorkspaceData<TensorType> &workspace, ITensorPack &prep_pack)
105 {
106     workspace.erase(std::remove_if(workspace.begin(),
107                                    workspace.end(),
108                                    [&prep_pack](auto & wk)
109     {
110         const bool to_erase = wk.lifetime == experimental::MemoryLifetime::Prepare;
111         if(to_erase)
112         {
113             prep_pack.remove_tensor(wk.slot);
114         }
115         return to_erase;
116     }),
117     workspace.end());
118 }
119 
120 /** Utility function to release tensors with lifetime marked as Prepare */
121 template <typename TensorType>
release_temporaries(const experimental::MemoryRequirements & mem_reqs,WorkspaceData<TensorType> & workspace)122 void release_temporaries(const experimental::MemoryRequirements &mem_reqs,
123                          WorkspaceData<TensorType>              &workspace)
124 {
125     for(auto &ws : workspace)
126     {
127         const int slot = ws.slot;
128         for(auto &m : mem_reqs)
129         {
130             if(m.slot == slot && m.lifetime == experimental::MemoryLifetime::Prepare)
131             {
132                 auto tensor = ws.tensor.get();
133                 tensor->allocator()->free();
134                 break;
135             }
136         }
137     }
138 }
139 } // namespace arm_compute
140 #endif /* SRC_COMMON_MEMORY_HELPERS_H */
141