xref: /aosp_15_r20/external/ComputeLibrary/src/cpu/kernels/CpuConcatenateWidthKernel.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2018-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/cpu/kernels/CpuConcatenateWidthKernel.h"
25 
26 #include "arm_compute/core/Error.h"
27 #include "arm_compute/core/Helpers.h"
28 #include "arm_compute/core/ITensor.h"
29 #include "arm_compute/core/TensorInfo.h"
30 #include "arm_compute/core/Utils.h"
31 #include "arm_compute/core/Validate.h"
32 #include "arm_compute/core/Window.h"
33 #include "src/core/NEON/NEAsymm.h"
34 #include "src/core/NEON/wrapper/wrapper.h"
35 #include "src/core/helpers/AutoConfiguration.h"
36 #include "src/core/helpers/WindowHelpers.h"
37 
38 #include <cstdint>
39 
40 namespace arm_compute
41 {
42 namespace cpu
43 {
44 namespace kernels
45 {
46 namespace
47 {
validate_arguments(const ITensorInfo * src,unsigned int width_offset,const ITensorInfo * dst)48 Status validate_arguments(const ITensorInfo *src, unsigned int width_offset, const ITensorInfo *dst)
49 {
50     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
51     // Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use CPU FP16 instructions.
52     ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
53     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
54     ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(0) + width_offset > dst->dimension(0));
55 
56     for(size_t i = 1; i < Coordinates::num_max_dimensions; ++i)
57     {
58         ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(i) != dst->dimension(i));
59     }
60 
61     return Status{};
62 }
63 } // namespace
64 
configure(const ITensorInfo * src,unsigned int width_offset,ITensorInfo * dst)65 void CpuConcatenateWidthKernel::configure(const ITensorInfo *src, unsigned int width_offset, ITensorInfo *dst)
66 {
67     ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
68     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, width_offset, dst));
69     ARM_COMPUTE_UNUSED(dst);
70 
71     _width_offset = width_offset;
72 
73     // Configure kernel window
74     Window win = calculate_max_window(*src, Steps());
75 
76     ICpuKernel::configure(win);
77 }
78 
validate(const ITensorInfo * src,unsigned int width_offset,const ITensorInfo * dst)79 Status CpuConcatenateWidthKernel::validate(const ITensorInfo *src, unsigned int width_offset, const ITensorInfo *dst)
80 {
81     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, width_offset, dst));
82     return Status{};
83 }
84 
run_op(ITensorPack & tensors,const Window & window,const ThreadInfo & info)85 void CpuConcatenateWidthKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
86 {
87     ARM_COMPUTE_UNUSED(info);
88     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
89     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICpuKernel::window(), window);
90 
91     const auto src = tensors.get_const_tensor(TensorType::ACL_SRC);
92     auto       dst = tensors.get_tensor(TensorType::ACL_DST);
93 
94     // Offset output pointer to the correct position
95     uint8_t *dst_ptr = dst->buffer() + dst->info()->offset_first_element_in_bytes() + _width_offset * dst->info()->strides_in_bytes()[0];
96 
97     const auto    window_start_x = static_cast<int>(window.x().start());
98     const auto    window_end_x   = static_cast<int>(window.x().end()) * static_cast<int>(dst->info()->element_size());
99     constexpr int window_step_x  = 16;
100 
101     Window win{ window };
102     win.set(Window::DimX, Window::Dimension(0, 1, 1));
103 
104     // Create iterators
105     Iterator                       src_it(src, win);
106     Iterator                       dst_it(dst, win);
107     const DataType                 dt        = src->info()->data_type();
108     const UniformQuantizationInfo &src_qinfo = src->info()->quantization_info().uniform();
109     const UniformQuantizationInfo &dst_qinfo = dst->info()->quantization_info().uniform();
110     if(dt == DataType::QASYMM8 && src_qinfo != dst_qinfo)
111     {
112         execute_window_loop(win, [&](const Coordinates &)
113         {
114             int x = window_start_x;
115             for(; x <= (window_end_x - window_step_x); x += window_step_x)
116             {
117                 vst1q_u8(dst_ptr + dst_it.offset() + x, vquantize(vdequantize(vld1q_u8(src_it.ptr() + x), src_qinfo), dst_qinfo));
118             }
119 
120             // Compute left-over elements
121             for(; x < window_end_x; ++x)
122             {
123                 *(dst_ptr + dst_it.offset() + x) = quantize_qasymm8(dequantize_qasymm8(*(src_it.ptr() + x), src_qinfo), dst_qinfo);
124             }
125         },
126         src_it, dst_it);
127     }
128     else if(dt == DataType::QASYMM8_SIGNED && src_qinfo != dst_qinfo)
129     {
130         execute_window_loop(win, [&](const Coordinates &)
131         {
132             int x = window_start_x;
133             for(; x <= (window_end_x - window_step_x); x += window_step_x)
134             {
135                 vst1q_s8(reinterpret_cast<int8_t *>(dst_ptr + dst_it.offset() + x),
136                          vquantize_signed(vdequantize(vld1q_s8(reinterpret_cast<int8_t *>(src_it.ptr() + x)), src_qinfo), dst_qinfo));
137             }
138 
139             // Compute left-over elements
140             for(; x < window_end_x; ++x)
141             {
142                 *(dst_ptr + dst_it.offset() + x) = quantize_qasymm8_signed(dequantize_qasymm8_signed(*(src_it.ptr() + x), src_qinfo), dst_qinfo);
143             }
144         },
145         src_it, dst_it);
146     }
147     else
148     {
149         execute_window_loop(win, [&](const Coordinates &)
150         {
151             const auto in_ptr  = src_it.ptr();
152             const auto out_ptr = dst_ptr + dst_it.offset();
153             int        x       = window_start_x;
154             for(; x <= (window_end_x - window_step_x); x += window_step_x)
155             {
156                 wrapper::vstore(out_ptr + x, wrapper::vloadq(in_ptr + x));
157             }
158 
159             // Compute left-over elements
160             for(; x < window_end_x; ++x)
161             {
162                 *(out_ptr + x) = *(in_ptr + x);
163             }
164         },
165         src_it, dst_it);
166     }
167 }
168 
name() const169 const char *CpuConcatenateWidthKernel::name() const
170 {
171     return "CpuConcatenateWidthKernel";
172 }
173 } // namespace kernels
174 } // namespace cpu
175 } // namespace arm_compute
176