xref: /aosp_15_r20/external/ComputeLibrary/src/core/NEON/kernels/NEROIPoolingLayerKernel.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/core/NEON/kernels/NEROIPoolingLayerKernel.h"
25 #include "arm_compute/core/TensorInfo.h"
26 #include "arm_compute/core/Validate.h"
27 #include "arm_compute/core/Window.h"
28 #include "src/core/CPP/Validate.h"
29 #include "src/core/helpers/AutoConfiguration.h"
30 #include "src/core/helpers/WindowHelpers.h"
31 #include "support/ToolchainSupport.h"
32 
33 #include <cfloat>
34 
35 namespace arm_compute
36 {
37 namespace
38 {
validate_arguments(const ITensorInfo * input,const ITensorInfo * rois,const ITensorInfo * output,const ROIPoolingLayerInfo & pool_info)39 Status validate_arguments(const ITensorInfo *input, const ITensorInfo *rois, const ITensorInfo *output, const ROIPoolingLayerInfo &pool_info)
40 {
41     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output, rois);
42 
43     //Validate arguments
44     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(rois, DataType::U16);
45     ARM_COMPUTE_RETURN_ERROR_ON(rois->dimension(0) != 5);
46     ARM_COMPUTE_RETURN_ERROR_ON(rois->num_dimensions() > 2);
47     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(input, DataType::F32, DataType::QASYMM8);
48     ARM_COMPUTE_RETURN_ERROR_ON((pool_info.pooled_width() == 0) || (pool_info.pooled_height() == 0));
49 
50     if(output->total_size() != 0)
51     {
52         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
53         ARM_COMPUTE_RETURN_ERROR_ON((output->dimension(0) != pool_info.pooled_width()) || (output->dimension(1) != pool_info.pooled_height()));
54         ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) != output->dimension(2));
55         ARM_COMPUTE_RETURN_ERROR_ON(rois->dimension(1) != output->dimension(3));
56     }
57 
58     return Status{};
59 }
60 
61 /** Evaluate number needing to be stored in output tensor as quantized format.
62  *
63  * @param[in]  input          Source tensor. Data types supported: QASYMM8
64  * @param[out] output         Destination tensor. Where output value will be stored, same datatype as input
65  * @param[in]  region_start_x Beginning region of x coordinate of pooling region
66  * @param[in]  region_start_y Beginning region of y coordinate of pooling region
67  * @param[in]  region_end_x   End of pooling region, x coordinate
68  * @param[in]  region_end_y   End of pooling region, y coordinate
69  * @param[in]  fm             Channel index of coordinate in output Tensor to store value
70  * @param[in]  px             Width index of coodinate in output Tensor to store value
71  * @param[in]  py             Height index of coordinate in output Tensor to store value
72  * @param[in]  roi_batch      Index of image to perform Pooling on in input Tensor
73  * @param[in]  roi_indx       Index of image of coordinate in output Tensor to store value
74  */
75 template <typename T>
template_eval(const ITensor * input,const ITensor * output,int region_start_x,int region_start_y,int region_end_x,int region_end_y,int fm,int px,int py,int roi_batch,int roi_indx)76 void template_eval(const ITensor *input, const ITensor *output, int region_start_x, int region_start_y,
77                    int region_end_x, int region_end_y, int fm, int px, int py, int roi_batch, int roi_indx)
78 {
79     if((region_end_x <= region_start_x) || (region_end_y <= region_start_y))
80     {
81         *reinterpret_cast<T *>(output->ptr_to_element(Coordinates(px, py, fm, roi_indx))) = 0;
82     }
83     else
84     {
85         T curr_max = std::numeric_limits<T>::lowest(); // Min value of typename T
86         for(int j = region_start_y; j < region_end_y; ++j)
87         {
88             for(int i = region_start_x; i < region_end_x; ++i)
89             {
90                 const auto val = *reinterpret_cast<const T *>(input->ptr_to_element(Coordinates(i, j, fm, roi_batch)));
91                 curr_max       = std::max(val, curr_max);
92             }
93         }
94 
95         // if quantized datatype, requantize then store in output tensor
96         if(is_data_type_quantized(input->info()->data_type()))
97         {
98             // covert qasymm to new output quantization scale and offset
99             UniformQuantizationInfo uqinfo = compute_requantization_scale_offset(input->info()->quantization_info().uniform(), output->info()->quantization_info().uniform());
100             *reinterpret_cast<T *>(output->ptr_to_element(Coordinates(px, py, fm, roi_indx))) = quantize_qasymm8(curr_max, uqinfo);
101         }
102         else
103         {
104             *reinterpret_cast<T *>(output->ptr_to_element(Coordinates(px, py, fm, roi_indx))) = curr_max;
105         }
106     }
107 }
108 } // namespace
109 
NEROIPoolingLayerKernel()110 NEROIPoolingLayerKernel::NEROIPoolingLayerKernel()
111     : _input(nullptr), _rois(nullptr), _output(nullptr), _pool_info(0, 0, 0.f)
112 {
113 }
114 
validate(const ITensorInfo * input,const ITensorInfo * rois,const ITensorInfo * output,const ROIPoolingLayerInfo & pool_info)115 Status NEROIPoolingLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *rois, const ITensorInfo *output, const ROIPoolingLayerInfo &pool_info)
116 {
117     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, rois, output, pool_info));
118     return Status{};
119 }
120 
configure(const ITensor * input,const ITensor * rois,const ITensor * output,const ROIPoolingLayerInfo & pool_info)121 void NEROIPoolingLayerKernel::configure(const ITensor *input, const ITensor *rois, const ITensor *output, const ROIPoolingLayerInfo &pool_info)
122 {
123     ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, rois);
124 
125     //Validate arguments
126     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), rois->info(), output->info(), pool_info));
127 
128     // Output auto initialization if not yet initialized
129     TensorShape output_shape(pool_info.pooled_width(), pool_info.pooled_height(), input->info()->dimension(2), rois->info()->dimension(1));
130 
131     auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type(), output->info()->quantization_info());
132 
133     ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
134     ARM_COMPUTE_ERROR_ON((output->info()->dimension(0) != pool_info.pooled_width()) || (output->info()->dimension(1) != pool_info.pooled_height()));
135 
136     // Set instance variables
137     _input     = input;
138     _rois      = rois;
139     _output    = output;
140     _pool_info = pool_info;
141 
142     // Configure kernel window
143     Window window;
144     window.set(Window::DimX, Window::Dimension(0, rois->info()->dimension(1)));
145     window.set(Window::DimY, Window::Dimension(0, 1));
146 
147     INEKernel::configure(window);
148 }
149 
run(const Window & window,const ThreadInfo & info)150 void NEROIPoolingLayerKernel::run(const Window &window, const ThreadInfo &info)
151 {
152     ARM_COMPUTE_UNUSED(info);
153     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
154     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
155 
156     const size_t values_per_roi = _rois->info()->dimension(0);
157 
158     const int   roi_list_start = window.x().start();
159     const int   roi_list_end   = window.x().end();
160     const int   width          = _input->info()->dimension(Window::DimX);
161     const int   height         = _input->info()->dimension(Window::DimY);
162     const int   fms            = _input->info()->dimension(Window::DimZ);
163     const int   pooled_w       = _pool_info.pooled_width();
164     const int   pooled_h       = _pool_info.pooled_height();
165     const float spatial_scale  = _pool_info.spatial_scale();
166 
167     const auto *rois_ptr  = reinterpret_cast<const uint16_t *>(_rois->buffer());
168     const auto  data_type = _input->info()->data_type();
169 
170     for(int roi_indx = roi_list_start; roi_indx < roi_list_end; ++roi_indx)
171     {
172         const unsigned int roi_batch = rois_ptr[values_per_roi * roi_indx];
173         const auto         x1        = rois_ptr[values_per_roi * roi_indx + 1];
174         const auto         y1        = rois_ptr[values_per_roi * roi_indx + 2];
175         const auto         x2        = rois_ptr[values_per_roi * roi_indx + 3];
176         const auto         y2        = rois_ptr[values_per_roi * roi_indx + 4];
177 
178         // Scale ROI
179         const int roi_anchor_x = support::cpp11::round(x1 * spatial_scale);
180         const int roi_anchor_y = support::cpp11::round(y1 * spatial_scale);
181         const int roi_width    = std::max(support::cpp11::round((x2 - x1) * spatial_scale), 1.f);
182         const int roi_height   = std::max(support::cpp11::round((y2 - y1) * spatial_scale), 1.f);
183 
184         // Iterate through all feature maps
185         for(int fm = 0; fm < fms; ++fm)
186         {
187             // Iterate through all output pixels
188             for(int py = 0; py < pooled_h; ++py)
189             {
190                 for(int px = 0; px < pooled_w; ++px)
191                 {
192                     auto region_start_x = static_cast<int>(std::floor((static_cast<float>(px) / pooled_w) * roi_width));
193                     auto region_end_x   = static_cast<int>(std::floor((static_cast<float>(px + 1) / pooled_w) * roi_width));
194                     auto region_start_y = static_cast<int>(std::floor((static_cast<float>(py) / pooled_h) * roi_height));
195                     auto region_end_y   = static_cast<int>(std::floor((static_cast<float>(py + 1) / pooled_h) * roi_height));
196 
197                     region_start_x = std::min(std::max(region_start_x + roi_anchor_x, 0), width);
198                     region_end_x   = std::min(std::max(region_end_x + roi_anchor_x, 0), width);
199                     region_start_y = std::min(std::max(region_start_y + roi_anchor_y, 0), height);
200                     region_end_y   = std::min(std::max(region_end_y + roi_anchor_y, 0), height);
201 
202                     switch(data_type)
203                     {
204                         case DataType::F32:
205                             template_eval<float>(_input, _output, region_start_x, region_start_y, region_end_x, region_end_y, fm, px, py, roi_batch, roi_indx);
206                             break;
207                         case DataType::QASYMM8:
208                             template_eval<qasymm8_t>(_input, _output, region_start_x, region_start_y, region_end_x, region_end_y, fm, px, py, roi_batch, roi_indx);
209                             break;
210                         default:
211                             ARM_COMPUTE_ERROR("DataType not Supported");
212                             break;
213                     }
214                 }
215             }
216         }
217     }
218 }
219 } // namespace arm_compute