xref: /aosp_15_r20/external/executorch/kernels/test/op_gelu_test.cpp (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1 /*
2  * Copyright (c) Meta Platforms, Inc. and affiliates.
3  * All rights reserved.
4  *
5  * This source code is licensed under the BSD-style license found in the
6  * LICENSE file in the root directory of this source tree.
7  */
8 
9 #include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10 #include <executorch/kernels/test/TestUtil.h>
11 #include <executorch/kernels/test/supported_features.h>
12 #include <executorch/runtime/core/exec_aten/exec_aten.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15 
16 #include <gtest/gtest.h>
17 
18 using namespace ::testing;
19 using exec_aten::ScalarType;
20 using exec_aten::string_view;
21 using exec_aten::Tensor;
22 using torch::executor::testing::SupportedFeatures;
23 using torch::executor::testing::TensorFactory;
24 
25 class OpGeluTest : public OperatorTest {
26  protected:
27   Tensor&
op_gelu_out(const Tensor & self,string_view approximate,Tensor & out)28   op_gelu_out(const Tensor& self, string_view approximate, Tensor& out) {
29     return torch::executor::aten::gelu_outf(context_, self, approximate, out);
30   }
31 
32   // Common testing for gelu on two floating point Tensors.
33   template <ScalarType DTYPE>
test_gelu_execution()34   void test_gelu_execution() {
35     TensorFactory<DTYPE> tf;
36 
37     const std::vector<int32_t> sizes = {3, 2};
38 
39     Tensor in = tf.make(
40         sizes, /*data=*/{-0.4775, 0.2948, -0.3984, 1.8690, -0.4048, -0.4848});
41 
42     // Destination for the gelu.
43     Tensor out = tf.zeros(sizes);
44 
45     // Run full gelu.
46     op_gelu_out(in, "none", out);
47 
48     // Check that it matches the expected output.
49     EXPECT_TENSOR_CLOSE(
50         out,
51         tf.make(
52             sizes,
53             /*data=*/
54             {-0.15113, 0.181575, -0.137515, 1.81141, -0.13877, -0.152183}));
55 
56     // Run tanh gelu appx.
57     op_gelu_out(in, "tanh", out);
58 
59     // Check that it matches the expected output.
60     EXPECT_TENSOR_CLOSE(
61         out,
62         tf.make(
63             sizes,
64             /*data=*/
65             {-0.151145, 0.181573, -0.137522, 1.8114, -0.138778, -0.152199}));
66   }
67 };
68 
TEST_F(OpGeluTest,FloatTensors)69 TEST_F(OpGeluTest, FloatTensors) {
70   test_gelu_execution<ScalarType::Float>();
71 }
72 
TEST_F(OpGeluTest,DoubleTensors)73 TEST_F(OpGeluTest, DoubleTensors) {
74   if (!SupportedFeatures::get()->op_gelu_dtype_double) {
75     GTEST_SKIP();
76   }
77 
78   test_gelu_execution<ScalarType::Double>();
79 }
80 
TEST_F(OpGeluTest,UnhandledDtypeDies)81 TEST_F(OpGeluTest, UnhandledDtypeDies) {
82   // gelu() doesn't handle Bool.
83   TensorFactory<ScalarType::Bool> tf;
84 
85   const std::vector<int32_t> sizes = {2, 2};
86 
87   Tensor a = tf.make(sizes, /*data=*/{false, true, false, true});
88 
89   // Destination for the gelu.
90   Tensor out = tf.zeros(sizes);
91 
92   ET_EXPECT_KERNEL_FAILURE(context_, op_gelu_out(a, "none", out));
93 }
94 
95 // The output tensor may not have a dtype different from the inputs even if it
96 // has the same shape.
TEST_F(OpGeluTest,MismatchedOutputDtypeDies)97 TEST_F(OpGeluTest, MismatchedOutputDtypeDies) {
98   // Two different dtypes. This test uses two types with the same size to
99   // demonstrate that the ScalarType itself matters, not the size of the
100   // tensor elements.
101   TensorFactory<ScalarType::Float> tf_float;
102   TensorFactory<ScalarType::Double> tf_double;
103 
104   const std::vector<int32_t> sizes = {2, 2};
105 
106   Tensor a = tf_float.ones(sizes);
107 
108   // Destination with a dtype different from the input.
109   Tensor out = tf_double.zeros(sizes);
110 
111   // Running Gelu on an input into an output of a different dtype should kill
112   // the program
113   ET_EXPECT_KERNEL_FAILURE(context_, op_gelu_out(a, "none", out));
114 }
115 
TEST_F(OpGeluTest,InvalidAppxStringDies)116 TEST_F(OpGeluTest, InvalidAppxStringDies) {
117   TensorFactory<ScalarType::Float> tf;
118 
119   Tensor a = tf.ones(/*sizes=*/{4});
120 
121   // Destination for the gelu; matches the shape of the inputs.
122   Tensor out = tf.zeros(/*sizes=*/{4});
123 
124   // Running Gelu with an invalid appx method should kill the program.
125   ET_EXPECT_KERNEL_FAILURE(context_, op_gelu_out(a, "foo", out));
126 }
127 
TEST_F(OpGeluTest,SimpleGeneratedCase)128 TEST_F(OpGeluTest, SimpleGeneratedCase) {
129   TensorFactory<ScalarType::Float> tf;
130 
131   Tensor x = tf.make(
132       {10, 10},
133       {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
134        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
135        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
136        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
137        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
138        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
139        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
140        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0});
141   Tensor expected_result = tf.make(
142       {10, 10}, {0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
143                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
144                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
145                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
146                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
147                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
148                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
149                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
150                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
151                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
152                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
153                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
154                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
155                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
156                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
157                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
158                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
159                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
160                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
161                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
162                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
163                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
164                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
165                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
166                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
167                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
168                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
169                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
170                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
171                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
172                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
173                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
174                  0.8411920070648193, 0.8411920070648193, 0.8411920070648193,
175                  0.8411920070648193});
176 
177   Tensor out = tf.zeros({10, 10});
178   Tensor ret = op_gelu_out(x, "tanh", out);
179   EXPECT_TENSOR_CLOSE(out, expected_result);
180 }
181 
TEST_F(OpGeluTest,DynamicShapeUpperBoundSameAsExpected)182 TEST_F(OpGeluTest, DynamicShapeUpperBoundSameAsExpected) {
183   TensorFactory<ScalarType::Float> tf;
184 
185   Tensor x = tf.make(
186       {3, 2},
187       {0.9769402146339417,
188        0.4728269577026367,
189        0.04416435956954956,
190        0.7145527601242065,
191        0.7109619975090027,
192        0.36388522386550903});
193   Tensor expected_result = tf.make(
194       {3, 2},
195       {0.8162848949432373,
196        0.3223743438720703,
197        0.022860059514641762,
198        0.5448282957077026,
199        0.5413010716438293,
200        0.23361928761005402});
201 
202   Tensor out =
203       tf.zeros({3, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
204   Tensor ret = op_gelu_out(x, "tanh", out);
205   EXPECT_TENSOR_CLOSE(out, expected_result);
206 }
207 
TEST_F(OpGeluTest,DynamicShapeUpperBoundLargerThanExpected)208 TEST_F(OpGeluTest, DynamicShapeUpperBoundLargerThanExpected) {
209   GTEST_SKIP() << "Dynamic shape not supported";
210   TensorFactory<ScalarType::Float> tf;
211 
212   Tensor x = tf.make(
213       {3, 2},
214       {0.9769402146339417,
215        0.4728269577026367,
216        0.04416435956954956,
217        0.7145527601242065,
218        0.7109619975090027,
219        0.36388522386550903});
220   Tensor expected_result = tf.make(
221       {3, 2},
222       {0.8162848949432373,
223        0.3223743438720703,
224        0.022860059514641762,
225        0.5448282957077026,
226        0.5413010716438293,
227        0.23361928761005402});
228 
229   Tensor out =
230       tf.zeros({10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
231   Tensor ret = op_gelu_out(x, "tanh", out);
232   EXPECT_TENSOR_CLOSE(out, expected_result);
233 }
234 
TEST_F(OpGeluTest,DynamicShapeUnbound)235 TEST_F(OpGeluTest, DynamicShapeUnbound) {
236   GTEST_SKIP() << "Dynamic shape not supported";
237   TensorFactory<ScalarType::Float> tf;
238 
239   Tensor x = tf.make(
240       {3, 2},
241       {0.9769402146339417,
242        0.4728269577026367,
243        0.04416435956954956,
244        0.7145527601242065,
245        0.7109619975090027,
246        0.36388522386550903});
247   Tensor expected_result = tf.make(
248       {3, 2},
249       {0.8162848949432373,
250        0.3223743438720703,
251        0.022860059514641762,
252        0.5448282957077026,
253        0.5413010716438293,
254        0.23361928761005402});
255 
256   Tensor out =
257       tf.zeros({1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
258   Tensor ret = op_gelu_out(x, "tanh", out);
259   EXPECT_TENSOR_CLOSE(out, expected_result);
260 }
261