1 /* benchmark_adler32_copy.cc -- benchmark adler32 (elided copy) variants
2  * Copyright (C) 2022 Nathan Moinvaziri, Adam Stylinski
3  * For conditions of distribution and use, see copyright notice in zlib.h
4  */
5 
6 #include <stdio.h>
7 #include <assert.h>
8 #include <string.h>
9 
10 #include <benchmark/benchmark.h>
11 
12 extern "C" {
13 #  include "zbuild.h"
14 #  include "zutil_p.h"
15 #  include "cpu_features.h"
16 }
17 
18 #define MAX_RANDOM_INTS (1024 * 1024)
19 #define MAX_RANDOM_INTS_SIZE (MAX_RANDOM_INTS * sizeof(uint32_t))
20 
21 typedef uint32_t (*adler32_cpy_func)(uint32_t adler, unsigned char *dst, const unsigned char *buf, size_t len);
22 
23 class adler32_copy: public benchmark::Fixture {
24 private:
25     uint32_t *random_ints_src;
26     uint32_t *random_ints_dst;
27 
28 public:
SetUp(const::benchmark::State & state)29     void SetUp(const ::benchmark::State& state) {
30         /* Control the alignment so that we have the best case scenario for loads. With
31          * AVX512, unaligned loads can mean we're crossing a cacheline boundary at every load.
32          * And while this is a realistic scenario, it makes it difficult to compare benchmark
33          * to benchmark because one allocation could have been aligned perfectly for the loads
34          * while the subsequent one happened to not be. This is not to be advantageous to AVX512
35          * (indeed, all lesser SIMD implementations benefit from this aligned allocation), but to
36          * control the _consistency_ of the results */
37         random_ints_src = (uint32_t *)zng_alloc(MAX_RANDOM_INTS_SIZE);
38         random_ints_dst = (uint32_t *)zng_alloc(MAX_RANDOM_INTS_SIZE);
39         assert(random_ints != NULL);
40 
41         for (int32_t i = 0; i < MAX_RANDOM_INTS; i++) {
42             random_ints_src[i] = rand();
43         }
44     }
45 
Bench(benchmark::State & state,adler32_cpy_func adler32_func)46     void Bench(benchmark::State& state, adler32_cpy_func adler32_func) {
47         uint32_t hash = 0;
48 
49         for (auto _ : state) {
50             hash = adler32_func(hash, (unsigned char *)random_ints_dst,
51                                 (const unsigned char*)random_ints_src, state.range(0));
52         }
53 
54         benchmark::DoNotOptimize(hash);
55     }
56 
TearDown(const::benchmark::State & state)57     void TearDown(const ::benchmark::State& state) {
58         zng_free(random_ints_src);
59         zng_free(random_ints_dst);
60     }
61 };
62 
63 #define BENCHMARK_ADLER32_COPY(name, fptr, support_flag) \
64     BENCHMARK_DEFINE_F(adler32_copy, name)(benchmark::State& state) { \
65         if (!support_flag) { \
66             state.SkipWithError("CPU does not support " #name); \
67         } \
68         Bench(state, fptr); \
69     } \
70     BENCHMARK_REGISTER_F(adler32_copy, name)->Range(8192, MAX_RANDOM_INTS_SIZE);
71 
72 #define BENCHMARK_ADLER32_BASELINE_COPY(name, fptr, support_flag) \
73     BENCHMARK_DEFINE_F(adler32_copy, name)(benchmark::State& state) { \
74         if (!support_flag) { \
75             state.SkipWithError("CPU does not support " #name); \
76         } \
77         Bench(state, [](uint32_t init_sum, unsigned char *dst, \
78                         const unsigned char *buf, size_t len) -> uint32_t { \
79             memcpy(dst, buf, len); \
80             return fptr(init_sum, buf, len); \
81         }); \
82     } \
83     BENCHMARK_REGISTER_F(adler32_copy, name)->Range(8192, MAX_RANDOM_INTS_SIZE);
84 
85 BENCHMARK_ADLER32_BASELINE_COPY(c, adler32_c, 1);
86 
87 #ifdef ARM_NEON_ADLER32
88 /* If we inline this copy for neon, the function would go here */
89 //BENCHMARK_ADLER32_COPY(neon, adler32_neon, arm_cpu_has_neon);
90 BENCHMARK_ADLER32_BASELINE_COPY(neon_copy_baseline, adler32_neon, arm_cpu_has_neon);
91 #endif
92 
93 #ifdef PPC_VMX_ADLER32
94 //BENCHMARK_ADLER32_COPY(vmx_inline_copy, adler32_fold_copy_vmx, power_cpu_has_altivec);
95 BENCHMARK_ADLER32_BASELINE_COPY(vmx_copy_baseline, adler32_vmx, power_cpu_has_altivec);
96 #endif
97 #ifdef POWER8_VSX_ADLER32
98 //BENCHMARK_ADLER32_COPY(power8_inline_copy, adler32_fold_copy_power8, power_cpu_has_arch_2_07);
99 BENCHMARK_ADLER32_BASELINE_COPY(power8, adler32_power8, power_cpu_has_arch_2_07);
100 #endif
101 
102 #ifdef X86_SSE42_ADLER32
103 BENCHMARK_ADLER32_BASELINE_COPY(sse42_baseline, adler32_ssse3, x86_cpu_has_ssse3);
104 BENCHMARK_ADLER32_COPY(sse42, adler32_fold_copy_sse42, x86_cpu_has_sse42);
105 #endif
106 #ifdef X86_AVX2_ADLER32
107 BENCHMARK_ADLER32_BASELINE_COPY(avx2_baseline, adler32_avx2, x86_cpu_has_avx2);
108 BENCHMARK_ADLER32_COPY(avx2, adler32_fold_copy_avx2, x86_cpu_has_avx2);
109 #endif
110 #ifdef X86_AVX512_ADLER32
111 BENCHMARK_ADLER32_BASELINE_COPY(avx512_baseline, adler32_avx512, x86_cpu_has_avx512);
112 BENCHMARK_ADLER32_COPY(avx512, adler32_fold_copy_avx512, x86_cpu_has_avx512);
113 #endif
114 #ifdef X86_AVX512VNNI_ADLER32
115 BENCHMARK_ADLER32_BASELINE_COPY(avx512_vnni_baseline, adler32_avx512_vnni, x86_cpu_has_avx512vnni);
116 BENCHMARK_ADLER32_COPY(avx512_vnni, adler32_fold_copy_avx512_vnni, x86_cpu_has_avx512vnni);
117 #endif
118