xref: /aosp_15_r20/external/XNNPACK/test/raddstoreexpminusmax-microkernel-tester.h (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5 
6 #pragma once
7 
8 #include <gtest/gtest.h>
9 
10 #include <algorithm>
11 #include <cassert>
12 #include <cstddef>
13 #include <cstdlib>
14 #include <random>
15 #include <vector>
16 
17 #include <fp16.h>
18 
19 #include <xnnpack.h>
20 #include <xnnpack/microfnptr.h>
21 #include <xnnpack/microparams-init.h>
22 
23 
24 class RAddStoreExpMinusMaxMicrokernelTester {
25  public:
elements(size_t elements)26   inline RAddStoreExpMinusMaxMicrokernelTester& elements(size_t elements) {
27     assert(elements != 0);
28     this->elements_ = elements;
29     return *this;
30   }
31 
elements()32   inline size_t elements() const {
33     return this->elements_;
34   }
35 
iterations(size_t iterations)36   inline RAddStoreExpMinusMaxMicrokernelTester& iterations(size_t iterations) {
37     this->iterations_ = iterations;
38     return *this;
39   }
40 
iterations()41   inline size_t iterations() const {
42     return this->iterations_;
43   }
44 
Test(xnn_f16_raddstoreexpminusmax_ukernel_function raddstoreexpminusmax,xnn_init_f16_expminus_params_fn init_params)45   void Test(xnn_f16_raddstoreexpminusmax_ukernel_function raddstoreexpminusmax, xnn_init_f16_expminus_params_fn init_params) const {
46     std::random_device random_device;
47     auto rng = std::mt19937(random_device());
48     // Choose such range that exph(x[i]) overflows, but exph(x[i] - x_max) doesn't.
49     // However, the range is still narrow enough that double-precision exp doesn't overflow.
50     std::uniform_real_distribution<float> f32dist(15.0f, 20.0f);
51 
52     std::vector<uint16_t> x(elements() + XNN_EXTRA_BYTES / sizeof(uint16_t));
53     std::vector<uint16_t> y(elements());
54     std::vector<float> y_ref(elements());
55     for (size_t iteration = 0; iteration < iterations(); iteration++) {
56       std::generate(x.begin(), x.end(), [&]() { return fp16_ieee_from_fp32_value(f32dist(rng)); });
57       std::fill(y.begin(), y.end(), UINT16_C(0x7E00) /* NaN */);
58 
59       // Compute reference results.
60       float sum_ref = 0.0f;
61       float x_max_as_float = -std::numeric_limits<float>::infinity();
62       for (size_t i = 0; i < elements(); i++) {
63         x_max_as_float = std::max(x_max_as_float, fp16_ieee_to_fp32_value(x[i]));
64       }
65       const uint16_t x_max_as_half = fp16_ieee_from_fp32_value(x_max_as_float);
66       for (size_t i = 0; i < elements(); i++) {
67         const float y_ref_value = exp(fp16_ieee_to_fp32_value(x[i]) - x_max_as_float);
68         y_ref[i] = y_ref_value;
69         sum_ref += y_ref_value;
70       }
71 
72       // Call optimized micro-kernel.
73       uint16_t sum = UINT16_C(0x7E00) /* NaN */;
74       xnn_f16_expminus_params params;
75       init_params(&params);
76       raddstoreexpminusmax(elements() * sizeof(uint16_t), x.data(), &x_max_as_half, y.data(), &sum, &params);
77 
78       // Verify results.
79       for (size_t i = 0; i < elements(); i++) {
80       ASSERT_NEAR(y_ref[i], fp16_ieee_to_fp32_value(y[i]), std::abs(y_ref[i]) * 5.0e-3f)
81         << "element " << i << " / " << elements() << ", x_max " << x_max_as_float;
82       }
83       ASSERT_NEAR(sum_ref, fp16_ieee_to_fp32_value(sum), std::abs(sum_ref) * 5.0e-3f)
84         << "batch " << elements() << ", x_max " << x_max_as_float;
85     }
86   }
87 
Test(xnn_f32_raddstoreexpminusmax_ukernel_function raddstoreexpminusmax,xnn_init_f32_expminus_params_fn init_params)88   void Test(xnn_f32_raddstoreexpminusmax_ukernel_function raddstoreexpminusmax, xnn_init_f32_expminus_params_fn init_params) const {
89     std::random_device random_device;
90     auto rng = std::mt19937(random_device());
91     // Choose such range that expf(x[i]) overflows, but expf(x[i] - x_max) doesn't.
92     // However, the range is still narrow enough that double-precision exp doesn't overflow.
93     std::uniform_real_distribution<float> f32dist(90.0f, 100.0f);
94 
95     std::vector<float> x(elements() + XNN_EXTRA_BYTES / sizeof(float));
96     std::vector<float> y(elements());
97     std::vector<double> y_ref(elements());
98     for (size_t iteration = 0; iteration < iterations(); iteration++) {
99       std::generate(x.begin(), x.end(), [&]() { return f32dist(rng); });
100       std::fill(y.begin(), y.end(), std::nanf(""));
101 
102       // Compute reference results.
103       double sum_ref = 0.0f;
104       const float x_max = *std::max_element(x.begin(), x.begin() + elements());
105       for (size_t i = 0; i < elements(); i++) {
106         const double y_ref_value = exp(double(x[i]) - double(x_max));
107         y_ref[i] = y_ref_value;
108         sum_ref += y_ref_value;
109       }
110 
111       // Call optimized micro-kernel.
112       float sum = std::nanf("");
113       xnn_f32_expminus_params params;
114       init_params(&params);
115       raddstoreexpminusmax(elements() * sizeof(float), x.data(), &x_max, y.data(), &sum, &params);
116 
117       // Verify results.
118       for (size_t i = 0; i < elements(); i++) {
119       ASSERT_NEAR(y_ref[i], double(y[i]), std::abs(y_ref[i]) * 1.0e-6)
120         << "element " << i << " / " << elements() << ", x_max " << x_max;
121       }
122       ASSERT_NEAR(sum_ref, double(sum), std::abs(sum_ref) * 1.0e-6)
123         << "batch " << elements() << ", x_max " << x_max;
124     }
125   }
126 
127  private:
128   size_t elements_{1};
129   size_t iterations_{15};
130 };
131