xref: /aosp_15_r20/external/ComputeLibrary/src/runtime/CL/CLTensorAllocator.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2016-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/runtime/CL/CLTensorAllocator.h"
25 
26 #include "arm_compute/core/Error.h"
27 #include "arm_compute/core/TensorInfo.h"
28 #include "arm_compute/runtime/CL/CLRuntimeContext.h"
29 #include "arm_compute/runtime/CL/CLScheduler.h"
30 
31 namespace arm_compute
32 {
33 const cl::Buffer CLTensorAllocator::_empty_buffer = cl::Buffer();
34 namespace
35 {
36 /** Global user-defined allocator that can be used for all internal allocations of a CLTensor */
37 static IAllocator *static_global_cl_allocator = nullptr;
38 
39 /** Helper function used to allocate the backing memory of a tensor
40  *
41  * @param[in] size      Size of the allocation
42  * @param[in] alignment Alignment of the allocation
43  *
44  * @return A wrapped memory region
45  */
allocate_region(size_t size,cl_uint alignment)46 std::unique_ptr<ICLMemoryRegion> allocate_region(size_t size, cl_uint alignment)
47 {
48     // Try fine-grain SVM
49     std::unique_ptr<ICLMemoryRegion> region = std::make_unique<CLFineSVMMemoryRegion>(CL_MEM_READ_WRITE | CL_MEM_SVM_FINE_GRAIN_BUFFER,
50                                                                                       size,
51                                                                                       alignment);
52 
53     // Try coarse-grain SVM in case of failure
54     if(region != nullptr && region->ptr() == nullptr)
55     {
56         region = std::make_unique<CLCoarseSVMMemoryRegion>(CL_MEM_READ_WRITE, size, alignment);
57     }
58     // Try legacy buffer memory in case of failure
59     if(region != nullptr && region->ptr() == nullptr)
60     {
61         region = std::make_unique<CLBufferMemoryRegion>(CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, size);
62     }
63     return region;
64 }
65 /** Clears quantization arrays
66  *
67  * @param[in, out] scale  Quantization scale array
68  * @param[in, out] offset Quantization offset array
69  */
clear_quantization_arrays(CLFloatArray & scale,CLInt32Array & offset)70 void clear_quantization_arrays(CLFloatArray &scale, CLInt32Array &offset)
71 {
72     // Clear arrays
73     scale  = CLFloatArray();
74     offset = CLInt32Array();
75 }
76 /** Helper function used to create quantization data arrays
77  *
78  * @param[in, out] scale    Quantization scale array
79  * @param[in, out] offset   Quantization offset array
80  * @param[in]      qinfo    Quantization info
81  * @param[in]      pad_size Pad size to use in case array needs to be padded for computation purposes
82  */
populate_quantization_info(CLFloatArray & scale,CLInt32Array & offset,const QuantizationInfo & qinfo,size_t pad_size)83 void populate_quantization_info(CLFloatArray &scale, CLInt32Array &offset, const QuantizationInfo &qinfo, size_t pad_size)
84 {
85     clear_quantization_arrays(scale, offset);
86 
87     // Create scale array
88     const std::vector<float> &qscale       = qinfo.scale();
89     const size_t              num_elements = qscale.size();
90     const size_t              element_size = sizeof(std::remove_reference<decltype(qscale)>::type::value_type);
91     scale                                  = CLFloatArray(num_elements + pad_size);
92     scale.resize(num_elements);
93     CLScheduler::get().queue().enqueueWriteBuffer(scale.cl_buffer(), CL_TRUE, 0, num_elements * element_size, qinfo.scale().data());
94 
95     if(!qinfo.offset().empty())
96     {
97         // Create offset array
98         const std::vector<int32_t> &qoffset             = qinfo.offset();
99         const size_t                offset_element_size = sizeof(std::remove_reference<decltype(qoffset)>::type::value_type);
100         offset                                          = CLInt32Array(num_elements + pad_size);
101         offset.resize(num_elements);
102         CLScheduler::get().queue().enqueueWriteBuffer(offset.cl_buffer(), CL_TRUE, 0, num_elements * offset_element_size, qinfo.offset().data());
103     }
104 }
105 } // namespace
106 
CLTensorAllocator(IMemoryManageable * owner,CLRuntimeContext * ctx)107 CLTensorAllocator::CLTensorAllocator(IMemoryManageable *owner, CLRuntimeContext *ctx)
108     : _ctx(ctx), _owner(owner), _associated_memory_group(nullptr), _memory(), _mapping(nullptr), _scale(), _offset()
109 {
110 }
111 
quantization() const112 CLQuantization CLTensorAllocator::quantization() const
113 {
114     return { &_scale, &_offset };
115 }
116 
data()117 uint8_t *CLTensorAllocator::data()
118 {
119     return _mapping;
120 }
121 
cl_data() const122 const cl::Buffer &CLTensorAllocator::cl_data() const
123 {
124     return _memory.region() == nullptr ? _empty_buffer : _memory.cl_region()->cl_data();
125 }
126 
allocate()127 void CLTensorAllocator::allocate()
128 {
129     // Allocate tensor backing memory
130     if(_associated_memory_group == nullptr)
131     {
132         // Perform memory allocation
133         if(static_global_cl_allocator != nullptr)
134         {
135             _memory.set_owned_region(static_global_cl_allocator->make_region(info().total_size(), 0));
136         }
137         else
138         {
139             _memory.set_owned_region(allocate_region(info().total_size(), 0));
140         }
141     }
142     else
143     {
144         // Finalize memory management instead
145         _associated_memory_group->finalize_memory(_owner, _memory, info().total_size(), alignment());
146     }
147 
148     // Allocate and fill the quantization parameter arrays
149     if(is_data_type_quantized_per_channel(info().data_type()))
150     {
151         const size_t pad_size = 0;
152         populate_quantization_info(_scale, _offset, info().quantization_info(), pad_size);
153     }
154 
155     // Lock allocator
156     info().set_is_resizable(false);
157 }
158 
free()159 void CLTensorAllocator::free()
160 {
161     _mapping = nullptr;
162     _memory.set_region(nullptr);
163     clear_quantization_arrays(_scale, _offset);
164     info().set_is_resizable(true);
165 }
166 
import_memory(cl::Buffer buffer)167 Status CLTensorAllocator::import_memory(cl::Buffer buffer)
168 {
169     ARM_COMPUTE_RETURN_ERROR_ON(buffer.get() == nullptr);
170     ARM_COMPUTE_RETURN_ERROR_ON(buffer.getInfo<CL_MEM_SIZE>() < info().total_size());
171     ARM_COMPUTE_RETURN_ERROR_ON(buffer.getInfo<CL_MEM_CONTEXT>().get() != CLScheduler::get().context().get());
172     ARM_COMPUTE_RETURN_ERROR_ON(_associated_memory_group != nullptr);
173 
174     _memory.set_owned_region(std::make_unique<CLBufferMemoryRegion>(buffer));
175 
176     info().set_is_resizable(false);
177     return Status{};
178 }
179 
set_associated_memory_group(IMemoryGroup * associated_memory_group)180 void CLTensorAllocator::set_associated_memory_group(IMemoryGroup *associated_memory_group)
181 {
182     ARM_COMPUTE_ERROR_ON(associated_memory_group == nullptr);
183     ARM_COMPUTE_ERROR_ON(_associated_memory_group != nullptr && _associated_memory_group != associated_memory_group);
184     ARM_COMPUTE_ERROR_ON(_memory.region() != nullptr && _memory.cl_region()->cl_data().get() != nullptr);
185 
186     _associated_memory_group = associated_memory_group;
187 }
188 
set_global_allocator(IAllocator * allocator)189 void CLTensorAllocator::set_global_allocator(IAllocator *allocator)
190 {
191     static_global_cl_allocator = allocator;
192 }
193 
lock()194 uint8_t *CLTensorAllocator::lock()
195 {
196     if(_ctx)
197     {
198         return map(_ctx->gpu_scheduler()->queue(), true);
199     }
200     else
201     {
202         return map(CLScheduler::get().queue(), true);
203     }
204 }
205 
unlock()206 void CLTensorAllocator::unlock()
207 {
208     ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
209     if(_ctx)
210     {
211         unmap(_ctx->gpu_scheduler()->queue(), reinterpret_cast<uint8_t *>(_memory.region()->buffer()));
212     }
213     else
214     {
215         //Legacy singleton api
216         unmap(CLScheduler::get().queue(), reinterpret_cast<uint8_t *>(_memory.region()->buffer()));
217     }
218 }
219 
map(cl::CommandQueue & q,bool blocking)220 uint8_t *CLTensorAllocator::map(cl::CommandQueue &q, bool blocking)
221 {
222     ARM_COMPUTE_ERROR_ON(_mapping != nullptr);
223     ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
224     ARM_COMPUTE_ERROR_ON(_memory.region()->buffer() != nullptr);
225 
226     _mapping = reinterpret_cast<uint8_t *>(_memory.cl_region()->map(q, blocking));
227     return _mapping;
228 }
229 
unmap(cl::CommandQueue & q,uint8_t * mapping)230 void CLTensorAllocator::unmap(cl::CommandQueue &q, uint8_t *mapping)
231 {
232     ARM_COMPUTE_ERROR_ON(_mapping == nullptr);
233     ARM_COMPUTE_ERROR_ON(_mapping != mapping);
234     ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
235     ARM_COMPUTE_ERROR_ON(_memory.region()->buffer() == nullptr);
236     ARM_COMPUTE_UNUSED(mapping);
237 
238     _memory.cl_region()->unmap(q);
239     _mapping = nullptr;
240 }
241 } // namespace arm_compute
242