xref: /aosp_15_r20/external/ComputeLibrary/src/core/NEON/kernels/NEGenerateProposalsLayerKernel.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2019-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/core/NEON/kernels/NEGenerateProposalsLayerKernel.h"
25 
26 #include "arm_compute/core/Helpers.h"
27 #include "arm_compute/core/TensorInfo.h"
28 #include "arm_compute/core/Utils.h"
29 #include "arm_compute/core/Window.h"
30 #include "src/core/CPP/Validate.h"
31 #include "src/core/common/Registrars.h"
32 #include "src/core/helpers/AutoConfiguration.h"
33 #include "src/core/helpers/WindowHelpers.h"
34 #include "src/cpu/kernels/genproposals/list.h"
35 #include <arm_neon.h>
36 
37 namespace arm_compute
38 {
39 namespace
40 {
41 struct ComputeAllAnchorsData
42 {
43     DataType dt;
44 };
45 
46 using ComputeAllAnchorsSelectorPtr = std::add_pointer<bool(const ComputeAllAnchorsData &data)>::type;
47 using ComputeAllAnchorsUKernelPtr  = std::add_pointer<void(const ITensor *anchors, ITensor *all_anchors, ComputeAnchorsInfo anchors_info, const Window &window)>::type;
48 
49 struct ComputeAllAnchorsKernel
50 {
51     const char                        *name;
52     const ComputeAllAnchorsSelectorPtr is_selected;
53     ComputeAllAnchorsUKernelPtr        ukernel;
54 };
55 
56 static const ComputeAllAnchorsKernel available_kernels[] =
57 {
58 #if defined(ARM_COMPUTE_ENABLE_NEON)
59     {
60         "neon_qu16_computeallanchors",
__anon19315b690202() 61         [](const ComputeAllAnchorsData & data) { return data.dt == DataType::QSYMM16; },
62         REGISTER_QSYMM16_NEON(arm_compute::cpu::neon_qu16_computeallanchors)
63     },
64 #endif //defined(ARM_COMPUTE_ENABLE_NEON)
65 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
66     {
67         "neon_fp16_computeallanchors",
__anon19315b690302() 68         [](const ComputeAllAnchorsData & data) { return data.dt == DataType::F16; },
69         REGISTER_FP16_NEON(arm_compute::cpu::neon_fp16_computeallanchors)
70     },
71 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
72     {
73         "neon_fp32_computeallanchors",
__anon19315b690402() 74         [](const ComputeAllAnchorsData & data) { return data.dt == DataType::F32; },
75         REGISTER_FP32_NEON(arm_compute::cpu::neon_fp32_computeallanchors)
76     },
77 };
78 
79 /** Micro-kernel selector
80  *
81  * @param[in] data Selection data passed to help pick the appropriate micro-kernel
82  *
83  * @return A matching micro-kernel else nullptr
84  */
get_implementation(const ComputeAllAnchorsData & data)85 const ComputeAllAnchorsKernel *get_implementation(const ComputeAllAnchorsData &data)
86 {
87     for(const auto &uk : available_kernels)
88     {
89         if(uk.is_selected(data))
90         {
91             return &uk;
92         }
93     }
94     return nullptr;
95 }
96 
validate_arguments(const ITensorInfo * anchors,const ITensorInfo * all_anchors,const ComputeAnchorsInfo & info)97 Status validate_arguments(const ITensorInfo *anchors, const ITensorInfo *all_anchors, const ComputeAnchorsInfo &info)
98 {
99     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(anchors, all_anchors);
100     ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(anchors);
101     ARM_COMPUTE_RETURN_ERROR_ON(anchors->dimension(0) != info.values_per_roi());
102     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(anchors, DataType::QSYMM16, DataType::F16, DataType::F32);
103     ARM_COMPUTE_RETURN_ERROR_ON(anchors->num_dimensions() > 2);
104     if(all_anchors->total_size() > 0)
105     {
106         const size_t feature_height = info.feat_height();
107         const size_t feature_width  = info.feat_width();
108         const size_t num_anchors    = anchors->dimension(1);
109         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(all_anchors, anchors);
110         ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->num_dimensions() > 2);
111         ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->dimension(0) != info.values_per_roi());
112         ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->dimension(1) != feature_height * feature_width * num_anchors);
113 
114         if(is_data_type_quantized(anchors->data_type()))
115         {
116             ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(anchors, all_anchors);
117         }
118     }
119     return Status{};
120 }
121 
122 } // namespace
123 
NEComputeAllAnchorsKernel()124 NEComputeAllAnchorsKernel::NEComputeAllAnchorsKernel()
125     : _anchors(nullptr), _all_anchors(nullptr), _anchors_info(0.f, 0.f, 0.f)
126 {
127 }
128 
configure(const ITensor * anchors,ITensor * all_anchors,const ComputeAnchorsInfo & info)129 void NEComputeAllAnchorsKernel::configure(const ITensor *anchors, ITensor *all_anchors, const ComputeAnchorsInfo &info)
130 {
131     ARM_COMPUTE_ERROR_ON_NULLPTR(anchors, all_anchors);
132     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(anchors->info(), all_anchors->info(), info));
133 
134     // Metadata
135     const size_t   num_anchors = anchors->info()->dimension(1);
136     const DataType data_type   = anchors->info()->data_type();
137     const float    width       = info.feat_width();
138     const float    height      = info.feat_height();
139 
140     // Initialize the output if empty
141     const TensorShape output_shape(info.values_per_roi(), width * height * num_anchors);
142     auto_init_if_empty(*all_anchors->info(), TensorInfo(output_shape, 1, data_type, anchors->info()->quantization_info()));
143 
144     // Set instance variables
145     _anchors      = anchors;
146     _all_anchors  = all_anchors;
147     _anchors_info = info;
148 
149     Window win = calculate_max_window(*all_anchors->info(), Steps(info.values_per_roi()));
150 
151     INEKernel::configure(win);
152 }
153 
validate(const ITensorInfo * anchors,const ITensorInfo * all_anchors,const ComputeAnchorsInfo & info)154 Status NEComputeAllAnchorsKernel::validate(const ITensorInfo *anchors, const ITensorInfo *all_anchors, const ComputeAnchorsInfo &info)
155 {
156     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(anchors, all_anchors, info));
157     return Status{};
158 }
159 
run(const Window & window,const ThreadInfo & info)160 void NEComputeAllAnchorsKernel::run(const Window &window, const ThreadInfo &info)
161 {
162     ARM_COMPUTE_UNUSED(info);
163     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
164     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
165 
166     const auto *uk = get_implementation(ComputeAllAnchorsData{ _anchors->info()->data_type() });
167     ARM_COMPUTE_ERROR_ON(uk == nullptr || uk->ukernel == nullptr);
168 
169     uk->ukernel(_anchors, _all_anchors, _anchors_info, window);
170 }
171 } // namespace arm_compute
172