1 /*
2 * Copyright (c) 2021-2022 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 /* Utilities for constructing functions which constrain which kernels are
26 * selected for a given depthwise problem.
27 *
28 * It is expected that this will be included in the files which list the
29 * available kernels. To avoid multiple definitions, an anonymous namespace is
30 * used.
31 */
32
33 #pragma once
34
35 #include "arm_gemm.hpp"
36 #include "src/core/NEON/kernels/assembly/depthwise.hpp"
37
38 namespace arm_conv
39 {
40 namespace depthwise
41 {
42 namespace
43 {
44
45 template <class OutputStage>
46 using ConstraintFn = std::function<bool(const DepthwiseArgs &, const OutputStage &)>;
47
48 using GenericConstraintFn = std::function<bool(const DepthwiseArgs &, const void *)>;
49
50 GenericConstraintFn make_constraint(const GenericConstraintFn &f) __attribute__ ((unused));
make_constraint(const GenericConstraintFn & f)51 GenericConstraintFn make_constraint(const GenericConstraintFn &f)
52 {
53 return f;
54 }
55
56 template <typename ... Fs>
make_constraint(const GenericConstraintFn & f,Fs...fs)57 GenericConstraintFn make_constraint(const GenericConstraintFn &f, Fs ... fs)
58 {
59 return [f, fs...] (const DepthwiseArgs &args, const void *os) -> bool {
60 return f(args, os) && make_constraint(fs...)(args, os);
61 };
62 }
63
64 template <typename OutputStage=Nothing, typename ... Fs>
constraint(Fs...fs)65 ConstraintFn<OutputStage> constraint(Fs ... fs)
66 {
67 return [fs...] (const DepthwiseArgs &args, const OutputStage &os) -> bool {
68 return make_constraint(fs...)(args, &os);
69 };
70 }
71
72 // Some useful constraints
73 template <class Strategy>
is_supported(const DepthwiseArgs & args,const void *)74 bool is_supported(const DepthwiseArgs &args, const void *)
75 {
76 return ((args.kernel_rows == Strategy::kernel_rows) &&
77 (args.kernel_cols == Strategy::kernel_cols) &&
78 (args.stride_rows == Strategy::stride_rows) &&
79 (args.stride_cols == Strategy::stride_cols));
80 }
81
82 bool cpu_has_dot_product(const DepthwiseArgs &args, const void *) __attribute__ ((unused));
cpu_has_dot_product(const DepthwiseArgs & args,const void *)83 bool cpu_has_dot_product(const DepthwiseArgs &args, const void *)
84 {
85 return args.cpu_info->has_dotprod();
86 }
87
88 bool cpu_has_sme(const DepthwiseArgs &args, const void *) __attribute__ ((unused));
cpu_has_sme(const DepthwiseArgs & args,const void *)89 bool cpu_has_sme(const DepthwiseArgs &args, const void *)
90 {
91 return args.cpu_info->has_sme();
92 }
93
94 bool cpu_has_sme2(const DepthwiseArgs &args, const void *) __attribute__ ((unused));
cpu_has_sme2(const DepthwiseArgs & args,const void *)95 bool cpu_has_sme2(const DepthwiseArgs &args, const void *)
96 {
97 return args.cpu_info->has_sme2();
98 }
99
100 bool cpu_has_sve(const DepthwiseArgs &args, const void *) __attribute__ ((unused));
cpu_has_sve(const DepthwiseArgs & args,const void *)101 bool cpu_has_sve(const DepthwiseArgs &args, const void *)
102 {
103 return args.cpu_info->has_sve();
104 }
105
106 bool cpu_has_sve2(const DepthwiseArgs &args, const void *) __attribute__ ((unused));
cpu_has_sve2(const DepthwiseArgs & args,const void *)107 bool cpu_has_sve2(const DepthwiseArgs &args, const void *)
108 {
109 return args.cpu_info->has_sve2();
110 }
111
112 bool cpu_has_fp16(const DepthwiseArgs &args, const void *) __attribute__ ((unused));
cpu_has_fp16(const DepthwiseArgs & args,const void *)113 bool cpu_has_fp16(const DepthwiseArgs &args, const void *)
114 {
115 return args.cpu_info->has_fp16();
116 }
117
118 bool has_no_channel_multiplier(const DepthwiseArgs &args, const void *) __attribute__ ((unused));
has_no_channel_multiplier(const DepthwiseArgs & args,const void *)119 bool has_no_channel_multiplier(const DepthwiseArgs &args, const void *)
120 {
121 return args.channel_multiplier == 1;
122 }
123
124 bool has_channel_multiplier(const DepthwiseArgs &args, const void *) __attribute__ ((unused));
has_channel_multiplier(const DepthwiseArgs & args,const void *)125 bool has_channel_multiplier(const DepthwiseArgs &args, const void *)
126 {
127 return args.channel_multiplier > 1;
128 }
129
130 // Planar kernels require a "priming" step before the main processing loop. The kernels can prime with left padding
131 // or input data, but not right padding - which could be needed in some extreme cases such as a 5x5 kernel, width 1
132 // padding 2. These are rare enough and can be handled with other kernels anyway, so filter them out with this.
133 bool no_prime_right_pad(const DepthwiseArgs &args, const void *) __attribute__ ((unused));
no_prime_right_pad(const DepthwiseArgs & args,const void *)134 bool no_prime_right_pad(const DepthwiseArgs &args, const void *)
135 {
136 return (args.input_cols + args.padding.left) >= (args.kernel_cols - 1);
137 }
138
139 bool qp_has_no_left_shift(const DepthwiseArgs &args, const void *_qp) __attribute__ ((unused));
qp_has_no_left_shift(const DepthwiseArgs &,const void * _qp)140 bool qp_has_no_left_shift(const DepthwiseArgs &, const void *_qp)
141 {
142 const auto qp = static_cast<const arm_gemm::Requantize32 *>(_qp);
143 return qp->per_channel_requant ?
144 (qp->per_channel_left_shifts == nullptr) :
145 (qp->per_layer_left_shift == 0);
146 }
147
148 bool qp_zero_a_offset(const DepthwiseArgs &args, const void *_qp) __attribute__ ((unused));
qp_zero_a_offset(const DepthwiseArgs &,const void * _qp)149 bool qp_zero_a_offset(const DepthwiseArgs &, const void *_qp)
150 {
151 const auto qp = static_cast<const arm_gemm::Requantize32 *>(_qp);
152 return qp->a_offset == 0;
153 }
154
155 template <typename T> bool qp_skip_clamp(const DepthwiseArgs &args, const void *_qp) __attribute__ ((unused));
qp_skip_clamp(const DepthwiseArgs &,const void * _qp)156 template <typename T> bool qp_skip_clamp(const DepthwiseArgs &, const void *_qp)
157 {
158 const auto qp = static_cast<const arm_gemm::Requantize32 *>(_qp);
159 return (qp->minval == std::numeric_limits<T>::min() &&
160 qp->maxval == std::numeric_limits<T>::max());
161 }
162
163 } // namespace
164 } // namespace depthwise
165 } // namespace arm_conv
166