1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10 #include <executorch/kernels/test/TestUtil.h>
11 #include <executorch/kernels/test/supported_features.h>
12 #include <executorch/runtime/core/exec_aten/exec_aten.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15 #include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
16 #include <executorch/test/utils/DeathTest.h>
17 #include <gtest/gtest.h>
18 #include <cmath>
19
20 using namespace ::testing;
21 using exec_aten::ArrayRef;
22 using exec_aten::optional;
23 using exec_aten::ScalarType;
24 using exec_aten::Tensor;
25 using torch::executor::testing::TensorFactory;
26
27 class OpMeanOutTest : public OperatorTest {
28 protected:
op_mean_out(const Tensor & self,optional<ArrayRef<int64_t>> dim,bool keepdim,optional<ScalarType> dtype,Tensor & out)29 Tensor& op_mean_out(
30 const Tensor& self,
31 optional<ArrayRef<int64_t>> dim,
32 bool keepdim,
33 optional<ScalarType> dtype,
34 Tensor& out) {
35 return torch::executor::aten::mean_outf(
36 context_, self, dim, keepdim, dtype, out);
37 }
38
39 template <ScalarType IN_DTYPE, ScalarType OUT_DTYPE>
test_mean_dim_out_invalid_dimensions()40 void test_mean_dim_out_invalid_dimensions() {
41 TensorFactory<IN_DTYPE> tf_in;
42 TensorFactory<OUT_DTYPE> tf_out;
43
44 // clang-format off
45 Tensor self = tf_in.make(
46 {2, 3, 4},
47 {
48 0, 1, 2, 3,
49 4, 5, 6, 7,
50 8, 9, 10, 11,
51
52 12, 13, 14, 15,
53 16, 17, 18, 19,
54 20, 21, 22, 23,
55 });
56 // clang-format on
57 Tensor out = tf_out.zeros({2, 3, 1});
58 optional<ScalarType> dtype = OUT_DTYPE;
59
60 // out-of-bound dim in dim list
61 int64_t dims_1[1] = {3};
62 optional<ArrayRef<int64_t>> optional_dim_list{ArrayRef<int64_t>{dims_1, 1}};
63 ET_EXPECT_KERNEL_FAILURE(
64 context_,
65 op_mean_out(self, optional_dim_list, /*keepdim=*/true, dtype, out));
66
67 // the same dim appears multiple times in list of dims
68 int64_t dims_2[2] = {2, 2};
69 optional_dim_list = ArrayRef<int64_t>{dims_2, 2};
70 ET_EXPECT_KERNEL_FAILURE(
71 context_,
72 op_mean_out(self, optional_dim_list, /*keepdim=*/true, dtype, out));
73 }
74
75 template <ScalarType IN_DTYPE, ScalarType OUT_DTYPE>
test_mean_dim_out_invalid_shape()76 void test_mean_dim_out_invalid_shape() {
77 TensorFactory<IN_DTYPE> tf_in;
78 TensorFactory<OUT_DTYPE> tf_out;
79
80 // clang-format off
81 Tensor self = tf_in.make(
82 {2, 3, 4},
83 {
84 0, 1, 2, 3,
85 4, 5, 6, 7,
86 8, 9, 10, 11,
87
88 12, 13, 14, 15,
89 16, 17, 18, 19,
90 20, 21, 22, 23,
91 });
92 // clang-format on
93
94 // dimension size mismatch when keepdim is true
95 Tensor out = tf_out.zeros({2, 4});
96 optional<ScalarType> dtype = OUT_DTYPE;
97 int64_t dims_1[1] = {1};
98 optional<ArrayRef<int64_t>> optional_dim_list{ArrayRef<int64_t>{dims_1, 1}};
99 ET_EXPECT_KERNEL_FAILURE(
100 context_,
101 op_mean_out(self, optional_dim_list, /*keepdim=*/true, dtype, out));
102
103 // dimension size mismatch when keepdim is false
104 out = tf_out.zeros({2, 1, 4});
105 ET_EXPECT_KERNEL_FAILURE(
106 context_,
107 op_mean_out(self, optional_dim_list, /*keepdim=*/false, dtype, out));
108 }
109
110 template <ScalarType IN_DTYPE, ScalarType OUT_DTYPE>
test_mean_dim_out_dtype()111 void test_mean_dim_out_dtype() {
112 TensorFactory<IN_DTYPE> tf_in;
113 TensorFactory<OUT_DTYPE> tf_out;
114 // clang-format off
115 Tensor self = tf_in.make(
116 {2, 3, 4},
117 {
118 0, 1, 2, 3,
119 4, 5, 6, 7,
120 8, 9, 10, 11,
121
122 12, 13, 14, 15,
123 16, 17, 18, 19,
124 20, 21, 22, 23,
125 });
126 // clang-format on
127
128 // keepdim=true should work
129 Tensor out = tf_out.zeros({2, 3, 1});
130 int64_t dims_1[1] = {2};
131 optional<ArrayRef<int64_t>> optional_dim_list{ArrayRef<int64_t>{dims_1, 1}};
132 optional<ScalarType> dtype = OUT_DTYPE;
133 op_mean_out(self, optional_dim_list, /*keepdim=*/true, dtype, out);
134 // clang-format off
135 EXPECT_TENSOR_CLOSE(out, tf_out.make(
136 {2, 3, 1},
137 {
138 1.5,
139 5.5,
140 9.5,
141
142 13.5,
143 17.5,
144 21.5
145 }));
146 // clang-format on
147
148 // keepdim=false should work
149 out = tf_out.zeros({2, 3});
150 op_mean_out(self, optional_dim_list, /*keepdim=*/false, dtype, out);
151 // clang-format off
152 EXPECT_TENSOR_CLOSE(out, tf_out.make(
153 {2, 3},
154 {
155 1.5, 5.5, 9.5,
156 13.5, 17.5, 21.5
157 }));
158 // clang-format on
159
160 // dim list with multiple dimensions should work
161 out = tf_out.zeros({1, 1, 4});
162 int64_t dims_2[2] = {0, 1};
163 optional_dim_list = ArrayRef<int64_t>{dims_2, 2};
164 op_mean_out(self, optional_dim_list, /*keepdim=*/true, dtype, out);
165 EXPECT_TENSOR_CLOSE(out, tf_out.make({1, 1, 4}, {10, 11, 12, 13}));
166
167 out = tf_out.zeros({4});
168 op_mean_out(self, optional_dim_list, false, dtype, out);
169 EXPECT_TENSOR_CLOSE(out, tf_out.make({4}, {10, 11, 12, 13}));
170
171 // dim list with negative dimensions should work
172 out = tf_out.zeros({2, 1, 4});
173 int64_t dims_3[1] = {-2};
174 optional_dim_list = ArrayRef<int64_t>{dims_3, 1};
175 op_mean_out(self, optional_dim_list, /*keepdim=*/true, dtype, out);
176 // clang-format off
177 EXPECT_TENSOR_CLOSE(out, tf_out.make(
178 {2, 1, 4},
179 {
180 4, 5, 6, 7,
181
182 16, 17, 18, 19,
183 }));
184 // clang-format on
185
186 // empty/null dim list should work
187 out = tf_out.zeros({1, 1, 1});
188 optional<ArrayRef<int64_t>> null_dim_list;
189 op_mean_out(self, null_dim_list, /*keepdim=*/true, dtype, out);
190 EXPECT_TENSOR_CLOSE(out, tf_out.make({1, 1, 1}, {11.5}));
191
192 optional<ArrayRef<int64_t>> empty_dim_list{ArrayRef<int64_t>{}};
193 op_mean_out(self, empty_dim_list, /*keepdim=*/true, dtype, out);
194 EXPECT_TENSOR_CLOSE(out, tf_out.make({1, 1, 1}, {11.5}));
195
196 out = tf_out.zeros({});
197 op_mean_out(self, null_dim_list, /*keepdim=*/false, dtype, out);
198 EXPECT_TENSOR_CLOSE(out, tf_out.make({}, {11.5}));
199
200 op_mean_out(self, empty_dim_list, /*keepdim=*/false, dtype, out);
201 EXPECT_TENSOR_CLOSE(out, tf_out.make({}, {11.5}));
202 }
203
204 template <ScalarType OUT_DTYPE>
test_mean_dim_out_bool()205 void test_mean_dim_out_bool() {
206 TensorFactory<ScalarType::Bool> tf_bool;
207 TensorFactory<OUT_DTYPE> tf_float;
208 // clang-format off
209 Tensor self = tf_bool.make(
210 {2, 3, 4},
211 {
212 true, false, true, false,
213 false, false, false, false,
214 false, true, true, false,
215
216 false, false, true, false,
217 false, false, false, true,
218 true, true, true, true,
219 });
220 // clang-format on
221
222 Tensor out = tf_float.zeros({1, 1, 4});
223 int64_t dims[2] = {0, 1};
224 optional<ArrayRef<int64_t>> optional_dim_list{ArrayRef<int64_t>{dims, 2}};
225 optional<ScalarType> dtype = OUT_DTYPE;
226 op_mean_out(self, optional_dim_list, /*keepdim=*/true, dtype, out);
227 EXPECT_TENSOR_CLOSE(
228 out,
229 tf_float.make({1, 1, 4}, {0.333333, 0.333333, 0.666667, 0.333333}));
230 }
231 };
232
233 template <>
234 void OpMeanOutTest::
test_mean_dim_out_dtype()235 test_mean_dim_out_dtype<ScalarType::Bool, ScalarType::Float>() {
236 test_mean_dim_out_bool<ScalarType::Float>();
237 }
238
239 template <>
240 void OpMeanOutTest::
test_mean_dim_out_dtype()241 test_mean_dim_out_dtype<ScalarType::Bool, ScalarType::Double>() {
242 test_mean_dim_out_bool<ScalarType::Double>();
243 }
244
TEST_F(OpMeanOutTest,InvalidDimensionListDies)245 TEST_F(OpMeanOutTest, InvalidDimensionListDies) {
246 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
247 GTEST_SKIP() << "ATen kernel test fails";
248 }
249 // Use a two layer switch to hanldle each possible data pair
250 #define TEST_KERNEL(INPUT_CTYPE, INPUT_DTYPE, OUTPUT_CTYPE, OUTPUT_DTYPE) \
251 test_mean_dim_out_invalid_dimensions< \
252 ScalarType::INPUT_DTYPE, \
253 ScalarType::OUTPUT_DTYPE>();
254
255 #define TEST_ENTRY(INPUT_CTYPE, INPUT_DTYPE) \
256 ET_FORALL_FLOAT_TYPES_WITH2(INPUT_CTYPE, INPUT_DTYPE, TEST_KERNEL);
257
258 ET_FORALL_REAL_TYPES(TEST_ENTRY);
259 #undef TEST_ENTRY
260 #undef TEST_KERNEL
261 }
262
TEST_F(OpMeanOutTest,InvalidShapeDies)263 TEST_F(OpMeanOutTest, InvalidShapeDies) {
264 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
265 GTEST_SKIP() << "ATen kernel test fails";
266 }
267 // Use a two layer switch to hanldle each possible data pair
268 #define TEST_KERNEL(INPUT_CTYPE, INPUT_DTYPE, OUTPUT_CTYPE, OUTPUT_DTYPE) \
269 test_mean_dim_out_invalid_shape< \
270 ScalarType::INPUT_DTYPE, \
271 ScalarType::OUTPUT_DTYPE>();
272
273 #define TEST_ENTRY(INPUT_CTYPE, INPUT_DTYPE) \
274 ET_FORALL_FLOAT_TYPES_WITH2(INPUT_CTYPE, INPUT_DTYPE, TEST_KERNEL);
275
276 ET_FORALL_REAL_TYPES(TEST_ENTRY);
277 #undef TEST_ENTRY
278 #undef TEST_KERNEL
279 }
280
TEST_F(OpMeanOutTest,MismatchedDTypesDies)281 TEST_F(OpMeanOutTest, MismatchedDTypesDies) {
282 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
283 GTEST_SKIP() << "ATen kernel test fails";
284 }
285 TensorFactory<ScalarType::Float> tf_float;
286 TensorFactory<ScalarType::Int> tf_int;
287
288 // clang-format off
289 Tensor self = tf_int.make(
290 {2, 3, 4},
291 {
292 0, 1, 2, 3,
293 4, 5, 6, 7,
294 8, 9, 10, 11,
295
296 12, 13, 14, 15,
297 16, 17, 18, 19,
298 20, 21, 22, 23,
299 });
300 // clang-format on
301
302 // keepdim=true should work
303 Tensor out = tf_float.zeros({2, 3, 1});
304 int64_t dims_1[1] = {2};
305 optional<ArrayRef<int64_t>> optional_dim_list{ArrayRef<int64_t>{dims_1, 1}};
306 optional<ScalarType> dtype;
307
308 // self tensor must have a floating point dtype when dtype is not specified
309 ET_EXPECT_KERNEL_FAILURE(
310 context_,
311 op_mean_out(self, optional_dim_list, /*keepdim=*/true, dtype, out));
312
313 dtype = ScalarType::Double;
314 // out tensor should be of the same dtype with dtype when dtype is specified
315 ET_EXPECT_KERNEL_FAILURE(
316 context_,
317 op_mean_out(self, optional_dim_list, /*keepdim=*/true, dtype, out));
318 }
319
TEST_F(OpMeanOutTest,AllRealInputFloatOutputPasses)320 TEST_F(OpMeanOutTest, AllRealInputFloatOutputPasses) {
321 // Use a two layer switch to hanldle each possible data pair
322 #define TEST_KERNEL(INPUT_CTYPE, INPUT_DTYPE, OUTPUT_CTYPE, OUTPUT_DTYPE) \
323 test_mean_dim_out_dtype<ScalarType::INPUT_DTYPE, ScalarType::OUTPUT_DTYPE>();
324
325 #define TEST_ENTRY(INPUT_CTYPE, INPUT_DTYPE) \
326 ET_FORALL_FLOAT_TYPES_WITH2(INPUT_CTYPE, INPUT_DTYPE, TEST_KERNEL);
327
328 ET_FORALL_REAL_TYPES_AND(Bool, TEST_ENTRY);
329 #undef TEST_ENTRY
330 #undef TEST_KERNEL
331 }
332
TEST_F(OpMeanOutTest,HalfSupport)333 TEST_F(OpMeanOutTest, HalfSupport) {
334 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
335 GTEST_SKIP() << "Test Half support only for ExecuTorch mode";
336 }
337 #define TEST_ENTRY(ctype, dtype) \
338 test_mean_dim_out_dtype<ScalarType::dtype, ScalarType::Half>();
339 ET_FORALL_REALH_TYPES(TEST_ENTRY);
340 #undef TEST_ENTRY
341
342 #define TEST_ENTRY(ctype, dtype) \
343 test_mean_dim_out_dtype<ScalarType::Half, ScalarType::dtype>();
344 ET_FORALL_FLOATH_TYPES(TEST_ENTRY);
345 #undef TEST_ENTRY
346 }
347
TEST_F(OpMeanOutTest,InfinityAndNANTest)348 TEST_F(OpMeanOutTest, InfinityAndNANTest) {
349 TensorFactory<ScalarType::Float> tf_float;
350 // clang-format off
351 Tensor self = tf_float.make(
352 {2, 3, 4},
353 {
354 0, 1, 2, INFINITY,
355 INFINITY, -INFINITY, 1, 0,
356 NAN, INFINITY, -INFINITY, 2,
357
358 NAN, NAN, 1, 0,
359 0, INFINITY, NAN, 4,
360 1, NAN, 3.14, 2,
361 });
362 // clang-format on
363
364 Tensor out = tf_float.zeros({2, 3, 1});
365 int64_t dims[1] = {-1};
366 optional<ArrayRef<int64_t>> optional_dim_list{ArrayRef<int64_t>{dims, 1}};
367 optional<ScalarType> dtype;
368 op_mean_out(self, optional_dim_list, /*keepdim=*/true, dtype, out);
369 // clang-format off
370 EXPECT_TENSOR_CLOSE(out, tf_float.make(
371 {2, 3, 1},
372 {
373 INFINITY,
374 NAN,
375 NAN,
376
377 NAN,
378 NAN,
379 NAN
380 }));
381 // clang-format on
382 }
383
TEST_F(OpMeanOutTest,SimpleGeneratedCase)384 TEST_F(OpMeanOutTest, SimpleGeneratedCase) {
385 TensorFactory<ScalarType::Float> tf;
386
387 Tensor x = tf.make(
388 {10, 10},
389 {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
390 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
391 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
392 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
393 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
394 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
395 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
396 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0});
397 Tensor expected_result =
398 tf.make({10}, {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0});
399
400 Tensor out = tf.zeros({10});
401 Tensor ret =
402 op_mean_out(x, ArrayRef<int64_t>{1}, false, ScalarType::Float, out);
403 EXPECT_TENSOR_CLOSE(out, expected_result);
404 }
405
TEST_F(OpMeanOutTest,DynamicShapeUpperBoundSameAsExpected)406 TEST_F(OpMeanOutTest, DynamicShapeUpperBoundSameAsExpected) {
407 TensorFactory<ScalarType::Float> tf;
408
409 Tensor x = tf.make(
410 {3, 2},
411 {0.49627798795700073,
412 0.40115922689437866,
413 0.5627331733703613,
414 0.3858276605606079,
415 0.4964867830276489,
416 0.5637965202331543});
417 Tensor expected_result = tf.make(
418 {3}, {0.4487186074256897, 0.4742804169654846, 0.5301416516304016});
419
420 Tensor out =
421 tf.zeros({3}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
422 Tensor ret =
423 op_mean_out(x, ArrayRef<int64_t>{1}, false, ScalarType::Float, out);
424 EXPECT_TENSOR_CLOSE(out, expected_result);
425 }
426
TEST_F(OpMeanOutTest,DynamicShapeUpperBoundLargerThanExpected)427 TEST_F(OpMeanOutTest, DynamicShapeUpperBoundLargerThanExpected) {
428 TensorFactory<ScalarType::Float> tf;
429
430 Tensor x = tf.make(
431 {3, 2},
432 {0.49627798795700073,
433 0.40115922689437866,
434 0.5627331733703613,
435 0.3858276605606079,
436 0.4964867830276489,
437 0.5637965202331543});
438 Tensor expected_result = tf.make(
439 {3}, {0.4487186074256897, 0.4742804169654846, 0.5301416516304016});
440
441 Tensor out =
442 tf.zeros({10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
443 Tensor ret =
444 op_mean_out(x, ArrayRef<int64_t>{1}, false, ScalarType::Float, out);
445 EXPECT_TENSOR_CLOSE(out, expected_result);
446 }
447
TEST_F(OpMeanOutTest,DynamicShapeUnbound)448 TEST_F(OpMeanOutTest, DynamicShapeUnbound) {
449 GTEST_SKIP() << "Dynamic shape unbound not supported";
450 TensorFactory<ScalarType::Float> tf;
451
452 Tensor x = tf.make(
453 {3, 2},
454 {0.49627798795700073,
455 0.40115922689437866,
456 0.5627331733703613,
457 0.3858276605606079,
458 0.4964867830276489,
459 0.5637965202331543});
460 Tensor expected_result = tf.make(
461 {3}, {0.4487186074256897, 0.4742804169654846, 0.5301416516304016});
462
463 Tensor out =
464 tf.zeros({1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
465 Tensor ret =
466 op_mean_out(x, ArrayRef<int64_t>{1}, false, ScalarType::Float, out);
467 EXPECT_TENSOR_CLOSE(out, expected_result);
468 }
469