1 // Copyright 2022 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <algorithm>
7 #include <array>
8 #include <cstddef>
9 #include <cstdint>
10 #include <limits>
11 #include <memory>
12 #include <numeric>
13 #include <random>
14
15 #include <xnnpack.h>
16 #include <xnnpack/node-type.h>
17 #include <xnnpack/operator.h>
18 #include <xnnpack/subgraph.h>
19
20 #include <gtest/gtest.h>
21
22 class PreluTestF32 : public ::testing::Test {
23 protected:
SetUp()24 void SetUp() override
25 {
26 random_device = std::unique_ptr<std::random_device>(new std::random_device());
27 rng = std::mt19937((*random_device)());
28 dim_dist = std::uniform_int_distribution<size_t>(1, 9);
29 input_dims = RandomShape(4);
30 output_dims = input_dims;
31 batch_size = input_dims[0] * input_dims[1] * input_dims[2];
32 channels = input_dims[3];
33 slope_dims = {channels};
34 input = std::vector<float>(XNN_EXTRA_BYTES / sizeof(float) + NumElements(input_dims));
35 slope = std::vector<float>(channels);
36 operator_output = std::vector<float>(NumElements(output_dims));
37 subgraph_output = std::vector<float>(operator_output.size());
38 }
39
RandomShape(size_t num_dims)40 std::vector<size_t> RandomShape(size_t num_dims)
41 {
42 std::vector<size_t> dims(num_dims);
43 std::generate(dims.begin(), dims.end(), [&] { return dim_dist(rng); });
44 return dims;
45 }
46
NumElements(std::vector<size_t> & dims)47 size_t NumElements(std::vector<size_t>& dims)
48 {
49 return std::accumulate(dims.begin(), dims.end(), size_t(1), std::multiplies<size_t>());
50 }
51
52 std::unique_ptr<std::random_device> random_device;
53 std::mt19937 rng;
54 std::uniform_int_distribution<size_t> dim_dist;
55
56 std::vector<size_t> output_dims;
57 std::vector<size_t> input_dims;
58 std::vector<size_t> slope_dims;
59 std::vector<float> input;
60 std::vector<float> slope;
61 std::vector<float> operator_output;
62 std::vector<float> subgraph_output;
63 size_t channels;
64 size_t batch_size;
65 };
66
TEST_F(PreluTestF32,define)67 TEST_F(PreluTestF32, define)
68 {
69 ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr));
70
71 xnn_subgraph_t subgraph = nullptr;
72 ASSERT_EQ(xnn_status_success, xnn_create_subgraph(/*external_value_ids=*/3, /*flags=*/0, &subgraph));
73 std::unique_ptr<xnn_subgraph, decltype(&xnn_delete_subgraph)> auto_subgraph(subgraph, xnn_delete_subgraph);
74
75 uint32_t input_id = XNN_INVALID_NODE_ID;
76 ASSERT_EQ(
77 xnn_status_success, xnn_define_tensor_value(
78 subgraph, xnn_datatype_fp32, input_dims.size(), input_dims.data(), nullptr, 0,
79 /*flags=*/XNN_VALUE_FLAG_EXTERNAL_INPUT, &input_id));
80 ASSERT_NE(input_id, XNN_INVALID_NODE_ID);
81
82 uint32_t slope_id = XNN_INVALID_NODE_ID;
83 ASSERT_EQ(
84 xnn_status_success, xnn_define_tensor_value(
85 subgraph, xnn_datatype_fp32, slope_dims.size(), slope_dims.data(), slope.data(), 1,
86 /*flags=*/0, &slope_id));
87 ASSERT_NE(slope_id, XNN_INVALID_NODE_ID);
88
89 uint32_t output_id = XNN_INVALID_NODE_ID;
90 ASSERT_EQ(
91 xnn_status_success, xnn_define_tensor_value(
92 subgraph, xnn_datatype_fp32, input_dims.size(), input_dims.data(), nullptr, 2,
93 /*flags=*/XNN_VALUE_FLAG_EXTERNAL_OUTPUT, &output_id));
94 ASSERT_NE(output_id, XNN_INVALID_NODE_ID);
95
96 ASSERT_EQ(xnn_status_success, xnn_define_prelu(subgraph, input_id, slope_id, output_id, /*flags=*/0));
97
98 ASSERT_EQ(subgraph->num_nodes, 1);
99 const struct xnn_node* node = &subgraph->nodes[0];
100 ASSERT_EQ(node->type, xnn_node_type_prelu);
101 ASSERT_EQ(node->compute_type, xnn_compute_type_fp32);
102 ASSERT_EQ(node->num_inputs, 2);
103 ASSERT_EQ(node->inputs[0], input_id);
104 ASSERT_EQ(node->inputs[1], slope_id);
105 ASSERT_EQ(node->num_outputs, 1);
106 ASSERT_EQ(node->outputs[0], output_id);
107 ASSERT_EQ(node->flags, 0);
108 }
109
TEST_F(PreluTestF32,matches_operator_api)110 TEST_F(PreluTestF32, matches_operator_api)
111 {
112 std::uniform_real_distribution<float> f32idist(-1.0f, 1.0f);
113 std::uniform_real_distribution<float> f32wdist(0.25f, 0.75f);
114 std::generate(input.begin(), input.end(), [&]() { return f32idist(rng); });
115 std::generate(slope.begin(), slope.end(), [&]() { return f32wdist(rng); });
116 std::fill(operator_output.begin(), operator_output.end(), nanf(""));
117 std::fill(subgraph_output.begin(), subgraph_output.end(), nanf(""));
118
119 ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr));
120
121 // Call operator API.
122 xnn_operator_t op = nullptr;
123 const xnn_status status =
124 xnn_create_prelu_nc_f32(channels, channels, channels, slope.data(), /*flags=*/0, nullptr, &op);
125 if (status == xnn_status_unsupported_hardware) {
126 GTEST_SKIP();
127 }
128
129 ASSERT_EQ(xnn_status_success, status);
130 ASSERT_NE(nullptr, op);
131 std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_op(op, xnn_delete_operator);
132
133 ASSERT_EQ(
134 xnn_status_success,
135 xnn_setup_prelu_nc_f32(op, batch_size, input.data(), operator_output.data(), /*threadpool=*/nullptr));
136
137 ASSERT_EQ(xnn_status_success, xnn_run_operator(op, /*threadpool=*/nullptr));
138
139 // Call subgraph API.
140 xnn_subgraph_t subgraph = nullptr;
141 ASSERT_EQ(xnn_status_success, xnn_create_subgraph(/*external_value_ids=*/3, /*flags=*/0, &subgraph));
142 std::unique_ptr<xnn_subgraph, decltype(&xnn_delete_subgraph)> auto_subgraph(subgraph, xnn_delete_subgraph);
143 uint32_t input_id = XNN_INVALID_NODE_ID;
144 ASSERT_EQ(
145 xnn_status_success, xnn_define_tensor_value(
146 subgraph, xnn_datatype_fp32, input_dims.size(), input_dims.data(), nullptr, /*external_id=*/0,
147 /*flags=*/XNN_VALUE_FLAG_EXTERNAL_INPUT, &input_id));
148 ASSERT_NE(input_id, XNN_INVALID_NODE_ID);
149
150 uint32_t slope_id = XNN_INVALID_NODE_ID;
151 ASSERT_EQ(
152 xnn_status_success,
153 xnn_define_tensor_value(
154 subgraph, xnn_datatype_fp32, slope_dims.size(), slope_dims.data(), slope.data(), /*external_id=*/1,
155 /*flags=*/0, &slope_id));
156 ASSERT_NE(slope_id, XNN_INVALID_NODE_ID);
157
158 uint32_t output_id = XNN_INVALID_NODE_ID;
159 ASSERT_EQ(
160 xnn_status_success,
161 xnn_define_tensor_value(
162 subgraph, xnn_datatype_fp32, output_dims.size(), output_dims.data(), nullptr, /*external_id=*/2,
163 /*flags=*/XNN_VALUE_FLAG_EXTERNAL_OUTPUT, &output_id));
164 ASSERT_NE(output_id, XNN_INVALID_NODE_ID);
165
166 xnn_runtime_t runtime = nullptr;
167 ASSERT_EQ(xnn_status_success, xnn_define_prelu(subgraph, input_id, slope_id, output_id, /*flags=*/0));
168 ASSERT_EQ(xnn_status_success, xnn_create_runtime_v3(subgraph, nullptr, nullptr, /*flags=*/0, &runtime));
169 ASSERT_NE(nullptr, runtime);
170 std::unique_ptr<xnn_runtime, decltype(&xnn_delete_runtime)> auto_runtime(runtime, xnn_delete_runtime);
171 std::array<xnn_external_value, 2> external = {
172 xnn_external_value{input_id, input.data()}, xnn_external_value{output_id, subgraph_output.data()}};
173 ASSERT_EQ(xnn_status_success, xnn_setup_runtime(runtime, external.size(), external.data()));
174 ASSERT_EQ(xnn_status_success, xnn_invoke_runtime(runtime));
175
176 ASSERT_EQ(subgraph_output, operator_output);
177 }
178