1 /* benchmark_adler32.cc -- benchmark adler32 variants
2  * Copyright (C) 2022 Nathan Moinvaziri, Adam Stylinski
3  * For conditions of distribution and use, see copyright notice in zlib.h
4  */
5 
6 #include <stdio.h>
7 #include <assert.h>
8 
9 #include <benchmark/benchmark.h>
10 
11 extern "C" {
12 #  include "zbuild.h"
13 #  include "zutil_p.h"
14 #  include "cpu_features.h"
15 }
16 
17 #define MAX_RANDOM_INTS (1024 * 1024)
18 #define MAX_RANDOM_INTS_SIZE (MAX_RANDOM_INTS * sizeof(uint32_t))
19 
20 class adler32: public benchmark::Fixture {
21 private:
22     uint32_t *random_ints;
23 
24 public:
SetUp(const::benchmark::State & state)25     void SetUp(const ::benchmark::State& state) {
26         /* Control the alignment so that we have the best case scenario for loads. With
27          * AVX512, unaligned loads can mean we're crossing a cacheline boundary at every load.
28          * And while this is a realistic scenario, it makes it difficult to compare benchmark
29          * to benchmark because one allocation could have been aligned perfectly for the loads
30          * while the subsequent one happened to not be. This is not to be advantageous to AVX512
31          * (indeed, all lesser SIMD implementations benefit from this aligned allocation), but to
32          * control the _consistency_ of the results */
33         random_ints = (uint32_t *)zng_alloc(MAX_RANDOM_INTS_SIZE);
34         assert(random_ints != NULL);
35 
36         for (int32_t i = 0; i < MAX_RANDOM_INTS; i++) {
37             random_ints[i] = rand();
38         }
39     }
40 
Bench(benchmark::State & state,adler32_func adler32)41     void Bench(benchmark::State& state, adler32_func adler32) {
42         uint32_t hash = 0;
43 
44         for (auto _ : state) {
45             hash = adler32(hash, (const unsigned char *)random_ints, state.range(0));
46         }
47 
48         benchmark::DoNotOptimize(hash);
49     }
50 
TearDown(const::benchmark::State & state)51     void TearDown(const ::benchmark::State& state) {
52         zng_free(random_ints);
53     }
54 };
55 
56 #define BENCHMARK_ADLER32(name, fptr, support_flag) \
57     BENCHMARK_DEFINE_F(adler32, name)(benchmark::State& state) { \
58         if (!support_flag) { \
59             state.SkipWithError("CPU does not support " #name); \
60         } \
61         Bench(state, fptr); \
62     } \
63     BENCHMARK_REGISTER_F(adler32, name)->Range(2048, MAX_RANDOM_INTS_SIZE);
64 
65 BENCHMARK_ADLER32(c, adler32_c, 1);
66 
67 #ifdef ARM_NEON_ADLER32
68 BENCHMARK_ADLER32(neon, adler32_neon, arm_cpu_has_neon);
69 #endif
70 
71 #ifdef PPC_VMX_ADLER32
72 BENCHMARK_ADLER32(vmx, adler32_vmx, power_cpu_has_altivec);
73 #endif
74 #ifdef POWER8_VSX_ADLER32
75 BENCHMARK_ADLER32(power8, adler32_power8, power_cpu_has_arch_2_07);
76 #endif
77 
78 #ifdef X86_SSSE3_ADLER32
79 BENCHMARK_ADLER32(ssse3, adler32_ssse3, x86_cpu_has_ssse3);
80 #endif
81 #ifdef X86_AVX2_ADLER32
82 BENCHMARK_ADLER32(avx2, adler32_avx2, x86_cpu_has_avx2);
83 #endif
84 #ifdef X86_AVX512_ADLER32
85 BENCHMARK_ADLER32(avx512, adler32_avx512, x86_cpu_has_avx512);
86 #endif
87 #ifdef X86_AVX512VNNI_ADLER32
88 BENCHMARK_ADLER32(avx512_vnni, adler32_avx512_vnni, x86_cpu_has_avx512vnni);
89 #endif
90