xref: /aosp_15_r20/external/ComputeLibrary/src/core/NEON/kernels/NELogicalKernel.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2020-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/core/NEON/kernels/NELogicalKernel.h"
25 
26 #include "arm_compute/core/Helpers.h"
27 #include "arm_compute/core/Validate.h"
28 #include "src/common/utils/Validate.h"
29 #include "src/core/helpers/AutoConfiguration.h"
30 #include "src/core/helpers/WindowHelpers.h"
31 
32 #include <arm_neon.h>
33 
34 namespace arm_compute
35 {
36 namespace kernels
37 {
38 namespace
39 {
40 static const uint8x8_t  c0_x8     = vdup_n_u8(0);
41 static const uint8x16_t c0_x16    = vdupq_n_u8(0);
42 static const uint8x8_t  c1_x8     = vdup_n_u8(1);
43 static const uint8x16_t c1_x16    = vdupq_n_u8(1);
44 static const uint32_t   step      = 16;
45 static const uint32_t   half_step = step / 2;
46 
neon_logical_and(const uint8_t * src0,const uint8_t * src1,uint8_t * dst,uint32_t len)47 void neon_logical_and(const uint8_t *src0, const uint8_t *src1, uint8_t *dst, uint32_t len)
48 {
49     ARM_COMPUTE_ASSERT_NOT_NULLPTR(src0);
50     ARM_COMPUTE_ASSERT_NOT_NULLPTR(src1);
51     ARM_COMPUTE_ASSERT_NOT_NULLPTR(dst);
52 
53     for(; len >= step; len -= step)
54     {
55         vst1q_u8(dst, vandq_u8(vminq_u8(vld1q_u8(src0), c1_x16), vminq_u8(vld1q_u8(src1), c1_x16)));
56         src0 += step;
57         src1 += step;
58         dst += step;
59     }
60 
61     for(; len >= half_step; len -= half_step)
62     {
63         vst1_u8(dst, vand_u8(vmin_u8(vld1_u8(src0), c1_x8), vmin_u8(vld1_u8(src1), c1_x8)));
64         src0 += half_step;
65         src1 += half_step;
66         dst += half_step;
67     }
68 
69     for(; len > 0; --len)
70     {
71         *dst = (*src0) && (*src1);
72         ++src0;
73         ++src1;
74         ++dst;
75     }
76 }
77 
neon_logical_and_broadcast(const uint8_t * src,uint8_t broadcast_val,uint8_t * dst,uint32_t len)78 void neon_logical_and_broadcast(const uint8_t *src, uint8_t broadcast_val, uint8_t *dst, uint32_t len)
79 {
80     ARM_COMPUTE_ASSERT_NOT_NULLPTR(src);
81     ARM_COMPUTE_ASSERT_NOT_NULLPTR(dst);
82 
83     const auto broadcast_val_clamped_s   = std::min<uint8_t>(broadcast_val, 1);
84     const auto broadcast_val_clamped_x16 = vdupq_n_u8(broadcast_val_clamped_s);
85     const auto broadcast_val_clamped_x8  = vdup_n_u8(broadcast_val_clamped_s);
86 
87     for(; len >= step; len -= step)
88     {
89         vst1q_u8(dst, vandq_u8(vminq_u8(vld1q_u8(src), c1_x16), broadcast_val_clamped_x16));
90         src += step;
91         dst += step;
92     }
93 
94     for(; len >= half_step; len -= half_step)
95     {
96         vst1_u8(dst, vand_u8(vmin_u8(vld1_u8(src), c1_x8), broadcast_val_clamped_x8));
97         src += half_step;
98         dst += half_step;
99     }
100 
101     for(; len > 0; --len)
102     {
103         *dst = (*src) && broadcast_val_clamped_s;
104         ++src;
105         ++dst;
106     }
107 }
108 
neon_logical_or(const uint8_t * src0,const uint8_t * src1,uint8_t * dst,uint32_t len)109 void neon_logical_or(const uint8_t *src0, const uint8_t *src1, uint8_t *dst, uint32_t len)
110 {
111     ARM_COMPUTE_ASSERT_NOT_NULLPTR(src0);
112     ARM_COMPUTE_ASSERT_NOT_NULLPTR(src1);
113     ARM_COMPUTE_ASSERT_NOT_NULLPTR(dst);
114 
115     for(; len >= step; len -= step)
116     {
117         vst1q_u8(dst, vorrq_u8(vminq_u8(vld1q_u8(src0), c1_x16), vminq_u8(vld1q_u8(src1), c1_x16)));
118         src0 += step;
119         src1 += step;
120         dst += step;
121     }
122 
123     for(; len >= half_step; len -= half_step)
124     {
125         vst1_u8(dst, vorr_u8(vmin_u8(vld1_u8(src0), c1_x8), vmin_u8(vld1_u8(src1), c1_x8)));
126         src0 += half_step;
127         src1 += half_step;
128         dst += half_step;
129     }
130 
131     for(; len > 0; --len)
132     {
133         *dst = (*src0) || (*src1);
134         ++src0;
135         ++src1;
136         ++dst;
137     }
138 }
139 
neon_logical_or_broadcast(const uint8_t * src,uint8_t broadcast_val,uint8_t * dst,uint32_t len)140 void neon_logical_or_broadcast(const uint8_t *src, uint8_t broadcast_val, uint8_t *dst, uint32_t len)
141 {
142     ARM_COMPUTE_ASSERT_NOT_NULLPTR(src);
143     ARM_COMPUTE_ASSERT_NOT_NULLPTR(dst);
144 
145     const auto broadcast_val_clamped_s   = std::min<uint8_t>(broadcast_val, 1);
146     const auto broadcast_val_clamped_x16 = vdupq_n_u8(broadcast_val_clamped_s);
147     const auto broadcast_val_clamped_x8  = vdup_n_u8(broadcast_val_clamped_s);
148 
149     for(; len >= step; len -= step)
150     {
151         vst1q_u8(dst, vorrq_u8(vminq_u8(vld1q_u8(src), c1_x16), broadcast_val_clamped_x16));
152         src += step;
153         dst += step;
154     }
155 
156     for(; len >= half_step; len -= half_step)
157     {
158         vst1_u8(dst, vorr_u8(vmin_u8(vld1_u8(src), c1_x8), broadcast_val_clamped_x8));
159         src += half_step;
160         dst += half_step;
161     }
162 
163     for(; len > 0; --len)
164     {
165         *dst = (*src) || broadcast_val_clamped_s;
166         ++src;
167         ++dst;
168     }
169 }
170 
neon_logical_not(const uint8_t * src,uint8_t * dst,uint32_t len)171 void neon_logical_not(const uint8_t *src, uint8_t *dst, uint32_t len)
172 {
173     ARM_COMPUTE_ASSERT_NOT_NULLPTR(src);
174     ARM_COMPUTE_ASSERT_NOT_NULLPTR(dst);
175 
176     for(; len >= step; len -= step)
177     {
178         vst1q_u8(dst, vbslq_u8(vceqq_u8(vld1q_u8(src), c0_x16), c1_x16, c0_x16));
179         src += step;
180         dst += step;
181     }
182 
183     for(; len >= half_step; len -= half_step)
184     {
185         vst1_u8(dst, vbsl_u8(vceq_u8(vld1_u8(src), c0_x8), c1_x8, c0_x8));
186         src += half_step;
187         dst += half_step;
188     }
189 
190     for(; len > 0; --len)
191     {
192         *dst = !(*src);
193         ++src;
194         ++dst;
195     }
196 }
197 
run_unary(const Window & window,const ITensor * src,ITensor * dst)198 void run_unary(const Window &window, const ITensor *src, ITensor *dst)
199 {
200     Window win{ window };
201     win.set(Window::DimX, Window::Dimension(0, 1, 1));
202     const auto len = window.x().end() - window.x().start();
203 
204     Iterator in(src, win);
205     Iterator out(dst, win);
206 
207     execute_window_loop(win, [&](const Coordinates &)
208     {
209         neon_logical_not(in.ptr(), out.ptr(), len);
210     },
211     in, out);
212 }
213 
run_binary(const Window & window,const ITensor * src0,const ITensor * src1,ITensor * dst,LogicalOperation op)214 void run_binary(const Window &window, const ITensor *src0, const ITensor *src1, ITensor *dst, LogicalOperation op)
215 {
216     Window src0_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape());
217     Window src1_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape());
218 
219     Window win{ window };
220     win.set(Window::DimX, Window::Dimension(0, 1, 1));
221 
222     const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x();
223     const auto len                   = window.x().end() - window.x().start();
224 
225     if(is_broadcast_across_x)
226     {
227         using LogicalBroadcastUKernelPtr        = std::add_pointer<void(const uint8_t *, uint8_t, uint8_t *, uint32_t)>::type;
228         LogicalBroadcastUKernelPtr logical_func = op == LogicalOperation::Or ? &neon_logical_or_broadcast : &neon_logical_and_broadcast;
229 
230         const bool     is_broadcast_input_1 = src1_win.x().step() == 0;
231         Window         broadcast_win        = is_broadcast_input_1 ? src1_win : src0_win;
232         Window         non_broadcast_win    = !is_broadcast_input_1 ? src1_win : src0_win;
233         const ITensor *broadcast_tensor     = is_broadcast_input_1 ? src1 : src0;
234         const ITensor *non_broadcast_tensor = !is_broadcast_input_1 ? src1 : src0;
235         non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
236 
237         Iterator broadcast_in(broadcast_tensor, broadcast_win);
238         Iterator non_broadcast_in(non_broadcast_tensor, non_broadcast_win);
239         Iterator out(dst, win);
240 
241         execute_window_loop(win, [&](const Coordinates &)
242         {
243             const uint8_t broadcast_value = *broadcast_in.ptr();
244             logical_func(non_broadcast_in.ptr(), broadcast_value, out.ptr(), len);
245 
246         },
247         broadcast_in, non_broadcast_in, out);
248     }
249     else
250     {
251         using LogicalUKernelPtr        = std::add_pointer<void(const uint8_t *, const uint8_t *, uint8_t *, uint32_t)>::type;
252         LogicalUKernelPtr logical_func = op == LogicalOperation::Or ? &neon_logical_or : &neon_logical_and;
253 
254         src0_win.set(Window::DimX, Window::Dimension(0, 1, 1));
255         src1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
256 
257         Iterator in0(src0, src0_win);
258         Iterator in1(src1, src1_win);
259         Iterator out(dst, win);
260         execute_window_loop(win, [&](const Coordinates &)
261         {
262             logical_func(in0.ptr(), in1.ptr(), out.ptr(), len);
263         },
264         in0, in1, out);
265     }
266 }
267 } // namespace
name() const268 const char *NELogicalKernel::name() const
269 {
270     return "NELogicalKernel";
271 }
272 
configure(const ITensorInfo * input1,const ITensorInfo * input2,ITensorInfo * output,LogicalOperation op)273 void NELogicalKernel::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, LogicalOperation op)
274 {
275     ARM_COMPUTE_ERROR_ON_NULLPTR(input1, output);
276     ARM_COMPUTE_ERROR_THROW_ON(validate(input1, input2, output, op));
277 
278     _op = op;
279 
280     Window      win       = calculate_max_window(*input1, Steps());
281     TensorShape out_shape = input1->tensor_shape();
282     if(op != LogicalOperation::Not)
283     {
284         ARM_COMPUTE_ERROR_ON_NULLPTR(input2);
285         out_shape = TensorShape::broadcast_shape(input1->tensor_shape(), input2->tensor_shape());
286         win       = calculate_max_window(out_shape, Steps());
287     }
288     ICPPKernel::configure(win);
289 
290     // Auto initialize if empty
291     set_shape_if_empty(*output, out_shape);
292     set_data_type_if_unknown(*output, input1->data_type());
293 }
294 
validate(const ITensorInfo * input1,const ITensorInfo * input2,const ITensorInfo * output,LogicalOperation op)295 Status NELogicalKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, LogicalOperation op)
296 {
297     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8);
298     ARM_COMPUTE_RETURN_ERROR_ON(op == LogicalOperation::Unknown);
299 
300     TensorShape out_shape = input1->tensor_shape();
301     if(op != LogicalOperation::Not)
302     {
303         out_shape = TensorShape::broadcast_shape(input1->tensor_shape(), input2->tensor_shape());
304         ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
305         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, input2);
306     }
307 
308     // Checks performed when output is configured
309     if((output != nullptr) && (output->total_size() != 0))
310     {
311         ARM_COMPUTE_RETURN_ERROR_ON(detail::have_different_dimensions(out_shape, output->tensor_shape(), 0));
312         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, output);
313     }
314 
315     return Status{};
316 }
317 
run_op(ITensorPack & tensors,const Window & window,const ThreadInfo & info)318 void NELogicalKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
319 {
320     ARM_COMPUTE_UNUSED(info);
321     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
322     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
323     ARM_COMPUTE_ERROR_ON(tensors.empty());
324 
325     const ITensor *src0 = tensors.get_const_tensor(TensorType::ACL_SRC_0);
326     const ITensor *src1 = tensors.get_const_tensor(TensorType::ACL_SRC_1);
327     ITensor       *dst  = tensors.get_tensor(TensorType::ACL_DST);
328 
329     if(_op == LogicalOperation::Not)
330     {
331         run_unary(window, src0, dst);
332     }
333     else
334     {
335         run_binary(window, src0, src1, dst, _op);
336     }
337 }
338 } // namespace kernels
339 } // namespace arm_compute
340