1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <algorithm>
7 #include <cfloat>
8 #include <cmath>
9 #include <functional>
10 #include <random>
11 #include <vector>
12
13 #include <cpuinfo.h>
14
15 #include <benchmark/benchmark.h>
16 #include <fp16/fp16.h>
17 #include "bench/dwconv.h"
18 #include "bench/utils.h"
19
20 #include <xnnpack.h>
21 #include <xnnpack/aligned-allocator.h>
22 #include <xnnpack/common.h>
23 #include <xnnpack/dwconv.h>
24 #include <xnnpack/indirection.h>
25 #include <xnnpack/microfnptr.h>
26 #include <xnnpack/microparams-init.h>
27 #include <xnnpack/operator.h>
28 #include <xnnpack/pack.h>
29
30
f16_dwconv(benchmark::State & state,xnn_f16_dwconv_minmax_unipass_ukernel_function dwconv,xnn_init_f16_minmax_params_fn init_params,uint32_t channel_tile,uint32_t primary_tile,benchmark::utils::IsaCheckFunction isa_check=nullptr)31 static void f16_dwconv(benchmark::State& state,
32 xnn_f16_dwconv_minmax_unipass_ukernel_function dwconv,
33 xnn_init_f16_minmax_params_fn init_params,
34 uint32_t channel_tile, uint32_t primary_tile,
35 benchmark::utils::IsaCheckFunction isa_check = nullptr)
36 {
37 if (!cpuinfo_initialize()) {
38 state.SkipWithError("cpuinfo initialization failed");
39 return;
40 }
41 if (isa_check && !isa_check(state)) {
42 return;
43 }
44
45 const size_t input_height = state.range(0);
46 const size_t input_width = state.range(1);
47 const size_t kernel_height = state.range(2);
48 const size_t kernel_width = state.range(3);
49 const size_t padding_height = state.range(4);
50 const size_t padding_width = state.range(5);
51 const size_t subsampling = state.range(6);
52 const size_t dilation = state.range(7);
53 const size_t channels = state.range(8);
54
55 const size_t kernel_size = kernel_height * kernel_width;
56 if (kernel_size != primary_tile) {
57 state.SkipWithError("kernel size mismatch");
58 return;
59 }
60
61 std::random_device random_device;
62 auto rng = std::mt19937(random_device());
63 auto f32rng = std::bind(std::uniform_real_distribution<float>(0.0f, 1.0f), std::ref(rng));
64 auto f16rng = std::bind(fp16_ieee_from_fp32_value, f32rng);
65
66 const size_t effective_kernel_height = (kernel_height - 1) * dilation + 1;
67 const size_t effective_kernel_width = (kernel_width - 1) * dilation + 1;
68 const size_t padding_left = padding_width / 2;
69 const size_t padding_top = padding_height / 2;
70 const size_t output_height = (input_height + padding_height - effective_kernel_height) / subsampling + 1;
71 const size_t output_width = (input_width + padding_width - effective_kernel_width) / subsampling + 1;
72 const size_t output_size = output_height * output_width;
73 const size_t step_width = dilation == 1 ? subsampling : kernel_width;
74 const size_t step_height = kernel_size + (output_width - 1) * step_width * kernel_height;
75
76 const size_t c_stride = benchmark::utils::RoundUp<size_t>(channels, channel_tile);
77
78 std::vector<uint16_t> a(channels * input_height * input_width + XNN_EXTRA_BYTES / sizeof(uint16_t));
79 std::generate(a.begin(), a.end(), std::ref(f16rng));
80 std::vector<uint16_t> k(channels * kernel_height * kernel_width);
81 std::generate(k.begin(), k.end(), std::ref(f16rng));
82 std::vector<uint16_t> b(channels);
83 std::generate(b.begin(), b.end(), std::ref(f16rng));
84
85 std::vector<uint16_t> z(channels + XNN_EXTRA_BYTES / sizeof(uint16_t));
86
87 const size_t w_elements = (kernel_size + 1) * c_stride;
88 const size_t i_elements = output_height * step_height;
89 const size_t c_elements = output_size * channels;
90 const size_t num_buffers = 1 +
91 benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(),
92 sizeof(uint16_t) * (w_elements + c_elements) + sizeof(void*) * i_elements);
93
94 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> w(w_elements * num_buffers);
95 std::fill(w.begin(), w.end(), 0.0f);
96 xnn_pack_f16_dwconv_ghw_w(primary_tile, kernel_height, kernel_width, channels, channel_tile,
97 k.data(), b.data(), w.data(), 0 /* extra bytes */, nullptr);
98 for (size_t n = 1; n < num_buffers; n++) {
99 std::copy(w.cbegin(), w.cbegin() + w_elements, w.begin() + n * w_elements);
100 }
101
102 std::vector<const uint16_t*> i(i_elements * num_buffers);
103 xnn_operator convolution_op = { };
104 convolution_op.indirection_buffer = reinterpret_cast<const void**>(i.data());
105 convolution_op.input = a.data();
106 convolution_op.input_pixel_stride = channels;
107 convolution_op.zero_buffer = z.data();
108 convolution_op.input_height = input_height;
109 convolution_op.input_width = input_width;
110 convolution_op.output_height = output_height;
111 convolution_op.output_width = output_width;
112 convolution_op.kernel_height = kernel_height;
113 convolution_op.kernel_width = kernel_width;
114 convolution_op.stride_height = subsampling;
115 convolution_op.stride_width = subsampling;
116 convolution_op.dilation_height = dilation;
117 convolution_op.dilation_width = dilation;
118 convolution_op.padding_top = padding_top;
119 convolution_op.padding_left = padding_left;
120
121 xnn_indirection_init_dwconv2d(&convolution_op, step_height, step_width, primary_tile, 1 /* log2(sizeof(uint16_t)) */);
122 for (size_t n = 1; n < num_buffers; n++) {
123 std::copy(i.cbegin(), i.cbegin() + i_elements, i.begin() + n * i_elements);
124 }
125
126 std::vector<uint16_t> c(c_elements * num_buffers);
127 std::fill(c.begin(), c.end(), UINT16_C(0x7E00) /* NaN */);
128
129 xnn_f16_minmax_params params;
130 init_params(¶ms, UINT16_C(0xFC00) /* -inf */, UINT16_C(0x7C00) /* inf */);
131
132 size_t buffer_index = 0;
133 for (auto _ : state) {
134 state.PauseTiming();
135 benchmark::utils::PrefetchToL1(a.data(), a.size() * sizeof(uint16_t));
136 buffer_index = (buffer_index + 1) % num_buffers;
137 state.ResumeTiming();
138
139 for (size_t y = 0; y < output_height; y++) {
140 dwconv(channels, output_width,
141 reinterpret_cast<const void**>(i.data() + buffer_index * i_elements + step_height * y),
142 w.data() + buffer_index * w_elements,
143 c.data() + buffer_index * c_elements + y * output_width * channels,
144 kernel_height * step_width * sizeof(void*), 0,
145 0, z.data(), ¶ms);
146 }
147 }
148
149 const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
150 if (cpu_frequency != 0) {
151 state.counters["cpufreq"] = cpu_frequency;
152 }
153
154 state.counters["FLOPS"] = benchmark::Counter(
155 uint64_t(state.iterations()) * 2 * output_size * channels * kernel_size, benchmark::Counter::kIsRate);
156
157 state.counters["bytes"] = benchmark::Counter(
158 uint64_t(state.iterations()) * (output_size + input_height * input_width + kernel_size + 1 /* bias */) * channels * sizeof(uint16_t),
159 benchmark::Counter::kIsRate);
160 }
161
162 #if XNN_ENABLE_ARM_FP16 && (XNN_ARCH_ARM || XNN_ARCH_ARM64)
f16_dwconv_8x4__neonfp16arith_acc2(benchmark::State & state,const char * net)163 static void f16_dwconv_8x4__neonfp16arith_acc2(benchmark::State& state, const char* net) {
164 f16_dwconv(state,
165 xnn_f16_dwconv_minmax_ukernel_up8x4__neonfp16arith_acc2,
166 xnn_init_f16_minmax_neon_params,
167 8, 4, benchmark::utils::CheckNEONFP16ARITH);
168 }
169
f16_dwconv_8x4__neonfp16arith(benchmark::State & state,const char * net)170 static void f16_dwconv_8x4__neonfp16arith(benchmark::State& state, const char* net) {
171 f16_dwconv(state,
172 xnn_f16_dwconv_minmax_ukernel_up8x4__neonfp16arith,
173 xnn_init_f16_minmax_neon_params,
174 8, 4, benchmark::utils::CheckNEONFP16ARITH);
175 }
176
f16_dwconv_8x9__neonfp16arith_acc2(benchmark::State & state,const char * net)177 static void f16_dwconv_8x9__neonfp16arith_acc2(benchmark::State& state, const char* net) {
178 f16_dwconv(state,
179 xnn_f16_dwconv_minmax_ukernel_up8x9__neonfp16arith_acc2,
180 xnn_init_f16_minmax_neon_params,
181 8, 9, benchmark::utils::CheckNEONFP16ARITH);
182 }
183
f16_dwconv_8x9__neonfp16arith(benchmark::State & state,const char * net)184 static void f16_dwconv_8x9__neonfp16arith(benchmark::State& state, const char* net) {
185 f16_dwconv(state,
186 xnn_f16_dwconv_minmax_ukernel_up8x9__neonfp16arith,
187 xnn_init_f16_minmax_neon_params,
188 8, 9, benchmark::utils::CheckNEONFP16ARITH);
189 }
190
f16_dwconv_8x25__neonfp16arith_acc2(benchmark::State & state,const char * net)191 static void f16_dwconv_8x25__neonfp16arith_acc2(benchmark::State& state, const char* net) {
192 f16_dwconv(state,
193 xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith_acc2,
194 xnn_init_f16_minmax_neon_params,
195 8, 25, benchmark::utils::CheckNEONFP16ARITH);
196 }
197
f16_dwconv_8x25__neonfp16arith(benchmark::State & state,const char * net)198 static void f16_dwconv_8x25__neonfp16arith(benchmark::State& state, const char* net) {
199 f16_dwconv(state,
200 xnn_f16_dwconv_minmax_ukernel_up8x25__neonfp16arith,
201 xnn_init_f16_minmax_neon_params,
202 8, 25, benchmark::utils::CheckNEONFP16ARITH);
203 }
204
f16_dwconv_16x4__neonfp16arith_acc2(benchmark::State & state,const char * net)205 static void f16_dwconv_16x4__neonfp16arith_acc2(benchmark::State& state, const char* net) {
206 f16_dwconv(state,
207 xnn_f16_dwconv_minmax_ukernel_up16x4__neonfp16arith_acc2,
208 xnn_init_f16_minmax_neon_params,
209 16, 4, benchmark::utils::CheckNEONFP16ARITH);
210 }
211
f16_dwconv_16x4__neonfp16arith(benchmark::State & state,const char * net)212 static void f16_dwconv_16x4__neonfp16arith(benchmark::State& state, const char* net) {
213 f16_dwconv(state,
214 xnn_f16_dwconv_minmax_ukernel_up16x4__neonfp16arith,
215 xnn_init_f16_minmax_neon_params,
216 16, 4, benchmark::utils::CheckNEONFP16ARITH);
217 }
218
f16_dwconv_16x9__neonfp16arith_acc2(benchmark::State & state,const char * net)219 static void f16_dwconv_16x9__neonfp16arith_acc2(benchmark::State& state, const char* net) {
220 f16_dwconv(state,
221 xnn_f16_dwconv_minmax_ukernel_up16x9__neonfp16arith_acc2,
222 xnn_init_f16_minmax_neon_params,
223 16, 9, benchmark::utils::CheckNEONFP16ARITH);
224 }
225
f16_dwconv_16x9__neonfp16arith(benchmark::State & state,const char * net)226 static void f16_dwconv_16x9__neonfp16arith(benchmark::State& state, const char* net) {
227 f16_dwconv(state,
228 xnn_f16_dwconv_minmax_ukernel_up16x9__neonfp16arith,
229 xnn_init_f16_minmax_neon_params,
230 16, 9, benchmark::utils::CheckNEONFP16ARITH);
231 }
232
f16_dwconv_16x25__neonfp16arith_acc2(benchmark::State & state,const char * net)233 static void f16_dwconv_16x25__neonfp16arith_acc2(benchmark::State& state, const char* net) {
234 f16_dwconv(state,
235 xnn_f16_dwconv_minmax_ukernel_up16x25__neonfp16arith_acc2,
236 xnn_init_f16_minmax_neon_params,
237 16, 25, benchmark::utils::CheckNEONFP16ARITH);
238 }
239
f16_dwconv_16x25__neonfp16arith(benchmark::State & state,const char * net)240 static void f16_dwconv_16x25__neonfp16arith(benchmark::State& state, const char* net) {
241 f16_dwconv(state,
242 xnn_f16_dwconv_minmax_ukernel_up16x25__neonfp16arith,
243 xnn_init_f16_minmax_neon_params,
244 16, 25, benchmark::utils::CheckNEONFP16ARITH);
245 }
246
f16_dwconv_32x4__neonfp16arith_acc2(benchmark::State & state,const char * net)247 static void f16_dwconv_32x4__neonfp16arith_acc2(benchmark::State& state, const char* net) {
248 f16_dwconv(state,
249 xnn_f16_dwconv_minmax_ukernel_up32x4__neonfp16arith_acc2,
250 xnn_init_f16_minmax_neon_params,
251 32, 4, benchmark::utils::CheckNEONFP16ARITH);
252 }
253
f16_dwconv_32x4__neonfp16arith(benchmark::State & state,const char * net)254 static void f16_dwconv_32x4__neonfp16arith(benchmark::State& state, const char* net) {
255 f16_dwconv(state,
256 xnn_f16_dwconv_minmax_ukernel_up32x4__neonfp16arith,
257 xnn_init_f16_minmax_neon_params,
258 32, 4, benchmark::utils::CheckNEONFP16ARITH);
259 }
260
f16_dwconv_32x9__neonfp16arith_acc2(benchmark::State & state,const char * net)261 static void f16_dwconv_32x9__neonfp16arith_acc2(benchmark::State& state, const char* net) {
262 f16_dwconv(state,
263 xnn_f16_dwconv_minmax_ukernel_up32x9__neonfp16arith_acc2,
264 xnn_init_f16_minmax_neon_params,
265 32, 9, benchmark::utils::CheckNEONFP16ARITH);
266 }
267
f16_dwconv_32x9__neonfp16arith(benchmark::State & state,const char * net)268 static void f16_dwconv_32x9__neonfp16arith(benchmark::State& state, const char* net) {
269 f16_dwconv(state,
270 xnn_f16_dwconv_minmax_ukernel_up32x9__neonfp16arith,
271 xnn_init_f16_minmax_neon_params,
272 32, 9, benchmark::utils::CheckNEONFP16ARITH);
273 }
274
f16_dwconv_32x25__neonfp16arith_acc2(benchmark::State & state,const char * net)275 static void f16_dwconv_32x25__neonfp16arith_acc2(benchmark::State& state, const char* net) {
276 f16_dwconv(state,
277 xnn_f16_dwconv_minmax_ukernel_up32x25__neonfp16arith_acc2,
278 xnn_init_f16_minmax_neon_params,
279 32, 25, benchmark::utils::CheckNEONFP16ARITH);
280 }
281
f16_dwconv_32x25__neonfp16arith(benchmark::State & state,const char * net)282 static void f16_dwconv_32x25__neonfp16arith(benchmark::State& state, const char* net) {
283 f16_dwconv(state,
284 xnn_f16_dwconv_minmax_ukernel_up32x25__neonfp16arith,
285 xnn_init_f16_minmax_neon_params,
286 32, 25, benchmark::utils::CheckNEONFP16ARITH);
287 }
288
289 BENCHMARK_DWCONV(f16_dwconv_8x4__neonfp16arith_acc2)
290 BENCHMARK_DWCONV(f16_dwconv_8x4__neonfp16arith)
291 BENCHMARK_DWCONV(f16_dwconv_8x9__neonfp16arith_acc2)
292 BENCHMARK_DWCONV(f16_dwconv_8x9__neonfp16arith)
293 BENCHMARK_DWCONV(f16_dwconv_8x25__neonfp16arith_acc2)
294 BENCHMARK_DWCONV(f16_dwconv_8x25__neonfp16arith)
295 BENCHMARK_DWCONV(f16_dwconv_16x4__neonfp16arith_acc2)
296 BENCHMARK_DWCONV(f16_dwconv_16x4__neonfp16arith)
297 BENCHMARK_DWCONV(f16_dwconv_16x9__neonfp16arith_acc2)
298 BENCHMARK_DWCONV(f16_dwconv_16x9__neonfp16arith)
299 BENCHMARK_DWCONV(f16_dwconv_16x25__neonfp16arith_acc2)
300 BENCHMARK_DWCONV(f16_dwconv_16x25__neonfp16arith)
301 BENCHMARK_DWCONV(f16_dwconv_32x4__neonfp16arith_acc2)
302 BENCHMARK_DWCONV(f16_dwconv_32x4__neonfp16arith)
303 BENCHMARK_DWCONV(f16_dwconv_32x9__neonfp16arith_acc2)
304 BENCHMARK_DWCONV(f16_dwconv_32x9__neonfp16arith)
305 BENCHMARK_DWCONV(f16_dwconv_32x25__neonfp16arith_acc2)
306 BENCHMARK_DWCONV(f16_dwconv_32x25__neonfp16arith)
307 #endif // XNN_ENABLE_ARM_FP16 && (XNN_ARCH_ARM || XNN_ARCH_ARM64)
308
309 #ifndef XNNPACK_BENCHMARK_NO_MAIN
310 BENCHMARK_MAIN();
311 #endif
312