1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10 #include <executorch/kernels/test/TestUtil.h>
11 #include <executorch/kernels/test/supported_features.h>
12 #include <executorch/runtime/core/exec_aten/exec_aten.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15 #include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
16 #include <executorch/test/utils/DeathTest.h>
17 #include <gtest/gtest.h>
18 #include <cmath>
19
20 using namespace ::testing;
21 using exec_aten::ArrayRef;
22 using exec_aten::optional;
23 using exec_aten::ScalarType;
24 using exec_aten::Tensor;
25 using torch::executor::testing::TensorFactory;
26
27 class OpSumOutTest : public OperatorTest {
28 protected:
op_sum_intlist_out(const Tensor & self,optional<ArrayRef<int64_t>> dim,bool keepdim,optional<ScalarType> dtype,Tensor & out)29 Tensor& op_sum_intlist_out(
30 const Tensor& self,
31 optional<ArrayRef<int64_t>> dim,
32 bool keepdim,
33 optional<ScalarType> dtype,
34 Tensor& out) {
35 return torch::executor::aten::sum_outf(
36 context_, self, dim, keepdim, dtype, out);
37 }
38
39 template <ScalarType IN_DTYPE, ScalarType OUT_DTYPE>
test_sum_dim_out_invalid_dimensions()40 void test_sum_dim_out_invalid_dimensions() {
41 TensorFactory<IN_DTYPE> tf_in;
42 TensorFactory<OUT_DTYPE> tf_out;
43
44 // clang-format off
45 Tensor self = tf_in.make(
46 {2, 3, 4},
47 {
48 0, 1, 2, 3,
49 4, 5, 6, 7,
50 8, 9, 10, 11,
51
52 12, 13, 14, 15,
53 16, 17, 18, 19,
54 20, 21, 22, 23,
55 });
56 // clang-format on
57 Tensor out = tf_out.zeros({2, 3, 1});
58 optional<ScalarType> dtype = OUT_DTYPE;
59
60 // out-of-bound dim in dim list
61 int64_t dims_1[1] = {3};
62 optional<ArrayRef<int64_t>> optional_dim_list{ArrayRef<int64_t>{dims_1, 1}};
63 ET_EXPECT_KERNEL_FAILURE(
64 context_,
65 op_sum_intlist_out(
66 self, optional_dim_list, /*keepdim=*/true, dtype, out));
67
68 // the same dim appears multiple times in list of dims
69 int64_t dims_2[2] = {2, 2};
70 optional_dim_list = ArrayRef<int64_t>{dims_2, 2};
71 ET_EXPECT_KERNEL_FAILURE(
72 context_,
73 op_sum_intlist_out(
74 self, optional_dim_list, /*keepdim=*/true, dtype, out));
75 }
76
77 template <ScalarType IN_DTYPE, ScalarType OUT_DTYPE>
test_sum_dim_out_invalid_shape()78 void test_sum_dim_out_invalid_shape() {
79 TensorFactory<IN_DTYPE> tf_in;
80 TensorFactory<OUT_DTYPE> tf_out;
81
82 // clang-format off
83 Tensor self = tf_in.make(
84 {2, 3, 4},
85 {
86 0, 1, 2, 3,
87 4, 5, 6, 7,
88 8, 9, 10, 11,
89
90 12, 13, 14, 15,
91 16, 17, 18, 19,
92 20, 21, 22, 23,
93 });
94 // clang-format on
95
96 // dimension size mismatch when keepdim is true
97 Tensor out = tf_out.zeros({2, 4});
98 optional<ScalarType> dtype = OUT_DTYPE;
99 int64_t dims_1[1] = {1};
100 optional<ArrayRef<int64_t>> optional_dim_list{ArrayRef<int64_t>{dims_1, 1}};
101 ET_EXPECT_KERNEL_FAILURE(
102 context_,
103 op_sum_intlist_out(
104 self, optional_dim_list, /*keepdim=*/true, dtype, out));
105
106 // dimension size mismatch when keepdim is false
107 out = tf_out.zeros({2, 1, 4});
108 ET_EXPECT_KERNEL_FAILURE(
109 context_,
110 op_sum_intlist_out(
111 self, optional_dim_list, /*keepdim=*/false, dtype, out));
112 }
113
114 template <ScalarType IN_DTYPE, ScalarType OUT_DTYPE>
test_sum_dim_out_dtype()115 void test_sum_dim_out_dtype() {
116 TensorFactory<IN_DTYPE> tf_in;
117 TensorFactory<OUT_DTYPE> tf_out;
118 // clang-format off
119 Tensor self = tf_in.make(
120 {2, 3, 4},
121 {
122 0, 1, 2, 3,
123 4, 5, 6, 7,
124 8, 9, 10, 11,
125
126 12, 13, 14, 15,
127 16, 17, 18, 19,
128 20, 21, 22, 23,
129 });
130 // clang-format on
131
132 // keepdim=true should work
133 Tensor out = tf_out.zeros({2, 3, 1});
134 int64_t dims_1[1] = {2};
135 optional<ArrayRef<int64_t>> optional_dim_list{ArrayRef<int64_t>{dims_1, 1}};
136 optional<ScalarType> dtype = OUT_DTYPE;
137 op_sum_intlist_out(self, optional_dim_list, /*keepdim=*/true, dtype, out);
138 // clang-format off
139 EXPECT_TENSOR_CLOSE(out, tf_out.make(
140 {2, 3, 1},
141 {
142 6,
143 22,
144 38,
145
146 54,
147 70,
148 86
149 }));
150 // clang-format on
151
152 // keepdim=false should work
153 out = tf_out.zeros({2, 3});
154 op_sum_intlist_out(self, optional_dim_list, /*keepdim=*/false, dtype, out);
155 // clang-format off
156 EXPECT_TENSOR_CLOSE(out, tf_out.make(
157 {2, 3},
158 {
159 6, 22, 38,
160 54, 70, 86
161 }));
162 // clang-format on
163
164 // dim list with multiple dimensions should work
165 out = tf_out.zeros({1, 1, 4});
166 int64_t dims_01[2] = {0, 1};
167 optional_dim_list = ArrayRef<int64_t>{dims_01, 2};
168 op_sum_intlist_out(self, optional_dim_list, /*keepdim=*/true, dtype, out);
169 EXPECT_TENSOR_CLOSE(out, tf_out.make({1, 1, 4}, {60, 66, 72, 78}));
170
171 out = tf_out.zeros({4});
172 op_sum_intlist_out(self, optional_dim_list, /*keepdim=*/false, dtype, out);
173 EXPECT_TENSOR_CLOSE(out, tf_out.make({4}, {60, 66, 72, 78}));
174
175 out = tf_out.zeros({1, 3, 1});
176 int64_t dims_02[2] = {0, 2};
177 optional_dim_list = ArrayRef<int64_t>{dims_02, 2};
178 op_sum_intlist_out(self, optional_dim_list, /*keepdim=*/true, dtype, out);
179 EXPECT_TENSOR_CLOSE(out, tf_out.make({1, 3, 1}, {60, 92, 124}));
180
181 out = tf_out.zeros({3});
182 op_sum_intlist_out(self, optional_dim_list, /*keepdim=*/false, dtype, out);
183 EXPECT_TENSOR_CLOSE(out, tf_out.make({3}, {60, 92, 124}));
184
185 // dim list with negative dimensions should work
186 out = tf_out.zeros({2, 1, 4});
187 int64_t dims_3[1] = {-2};
188 optional_dim_list = ArrayRef<int64_t>{dims_3, 1};
189 op_sum_intlist_out(self, optional_dim_list, /*keepdim=*/true, dtype, out);
190 // clang-format off
191 EXPECT_TENSOR_CLOSE(out, tf_out.make(
192 {2, 1, 4},
193 {
194 12, 15, 18, 21,
195
196 48, 51, 54, 57,
197 }));
198 // clang-format on
199
200 // empty/null dim list should work
201 // clang-format off
202 self = tf_in.make(
203 {2, 2, 4},
204 {
205 0, 1, 2, 3,
206 4, 5, 6, 7,
207
208 0, 1, 2, 3,
209 4, 5, 6, 7,
210 });
211 // clang-format on
212 out = tf_out.zeros({1, 1, 1});
213 optional<ArrayRef<int64_t>> null_dim_list;
214 op_sum_intlist_out(self, null_dim_list, /*keepdim=*/true, dtype, out);
215 EXPECT_TENSOR_CLOSE(out, tf_out.make({1, 1, 1}, {56}));
216
217 optional<ArrayRef<int64_t>> empty_dim_list{ArrayRef<int64_t>{}};
218 op_sum_intlist_out(self, empty_dim_list, /*keepdim=*/true, dtype, out);
219 EXPECT_TENSOR_CLOSE(out, tf_out.make({1, 1, 1}, {56}));
220
221 out = tf_out.zeros({});
222 op_sum_intlist_out(self, null_dim_list, /*keepdim=*/false, dtype, out);
223 EXPECT_TENSOR_CLOSE(out, tf_out.make({}, {56}));
224
225 op_sum_intlist_out(self, empty_dim_list, /*keepdim=*/false, dtype, out);
226 EXPECT_TENSOR_CLOSE(out, tf_out.make({}, {56}));
227 }
228 };
229
TEST_F(OpSumOutTest,InvalidDimensionListDies)230 TEST_F(OpSumOutTest, InvalidDimensionListDies) {
231 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
232 GTEST_SKIP() << "ATen kernel test fails";
233 }
234 // Use a two layer switch to hanldle each possible data pair
235 #define TEST_KERNEL(INPUT_CTYPE, INPUT_DTYPE, OUTPUT_CTYPE, OUTPUT_DTYPE) \
236 test_sum_dim_out_invalid_dimensions< \
237 ScalarType::INPUT_DTYPE, \
238 ScalarType::OUTPUT_DTYPE>();
239
240 #define TEST_ENTRY(INPUT_CTYPE, INPUT_DTYPE) \
241 ET_FORALL_REAL_TYPES_WITH2(INPUT_CTYPE, INPUT_DTYPE, TEST_KERNEL);
242
243 ET_FORALL_REAL_TYPES_AND(Bool, TEST_ENTRY);
244 #undef TEST_ENTRY
245 #undef TEST_KERNEL
246 }
247
TEST_F(OpSumOutTest,InvalidShapeDies)248 TEST_F(OpSumOutTest, InvalidShapeDies) {
249 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
250 GTEST_SKIP() << "ATen kernel test fails";
251 }
252 // Use a two layer switch to hanldle each possible data pair
253 #define TEST_KERNEL(INPUT_CTYPE, INPUT_DTYPE, OUTPUT_CTYPE, OUTPUT_DTYPE) \
254 test_sum_dim_out_invalid_shape< \
255 ScalarType::INPUT_DTYPE, \
256 ScalarType::OUTPUT_DTYPE>();
257
258 #define TEST_ENTRY(INPUT_CTYPE, INPUT_DTYPE) \
259 ET_FORALL_REAL_TYPES_WITH2(INPUT_CTYPE, INPUT_DTYPE, TEST_KERNEL);
260
261 ET_FORALL_REAL_TYPES(TEST_ENTRY);
262 #undef TEST_ENTRY
263 #undef TEST_KERNEL
264 }
265
TEST_F(OpSumOutTest,MismatchedDTypesDies)266 TEST_F(OpSumOutTest, MismatchedDTypesDies) {
267 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
268 GTEST_SKIP() << "ATen kernel test fails";
269 }
270 TensorFactory<ScalarType::Float> tf_float;
271 TensorFactory<ScalarType::Int> tf_int;
272
273 // clang-format off
274 Tensor self = tf_int.make(
275 {2, 3, 4},
276 {
277 0, 1, 2, 3,
278 4, 5, 6, 7,
279 8, 9, 10, 11,
280
281 12, 13, 14, 15,
282 16, 17, 18, 19,
283 20, 21, 22, 23,
284 });
285 // clang-format on
286
287 Tensor out = tf_float.zeros({2, 3, 1});
288 int64_t dims_1[1] = {2};
289 optional<ArrayRef<int64_t>> optional_dim_list{ArrayRef<int64_t>{dims_1, 1}};
290 optional<ScalarType> dtype = ScalarType::Double;
291
292 // out tensor should be of the same dtype with dtype when dtype is specified
293 ET_EXPECT_KERNEL_FAILURE(
294 context_,
295 op_sum_intlist_out(
296 self, optional_dim_list, /*keepdim=*/true, dtype, out));
297 }
298
TEST_F(OpSumOutTest,AllRealInputRealOutputPasses)299 TEST_F(OpSumOutTest, AllRealInputRealOutputPasses) {
300 // Use a two layer switch to hanldle each possible data pair
301 #define TEST_KERNEL(INPUT_CTYPE, INPUT_DTYPE, OUTPUT_CTYPE, OUTPUT_DTYPE) \
302 test_sum_dim_out_dtype<ScalarType::INPUT_DTYPE, ScalarType::OUTPUT_DTYPE>();
303
304 #define TEST_ENTRY(INPUT_CTYPE, INPUT_DTYPE) \
305 ET_FORALL_REAL_TYPES_WITH2(INPUT_CTYPE, INPUT_DTYPE, TEST_KERNEL);
306
307 ET_FORALL_REAL_TYPES(TEST_ENTRY);
308 #undef TEST_ENTRY
309 #undef TEST_KERNEL
310 }
311
TEST_F(OpSumOutTest,TypeConversionTest)312 TEST_F(OpSumOutTest, TypeConversionTest) {
313 TensorFactory<ScalarType::Byte> tf_byte;
314 TensorFactory<ScalarType::Bool> tf_bool;
315 TensorFactory<ScalarType::Int> tf_int;
316 // clang-format off
317 Tensor self = tf_int.make(
318 {2, 3, 4},
319 {
320 0, 0, 0, 0,
321 2, 2, 2, 2,
322 4, 4, 4, 4,
323
324 8, 8, 8, 8,
325 16, 16, 16, 16,
326 64, 64, 64, 64,
327 });
328 // clang-format on
329
330 int64_t dims_1[1] = {2};
331 optional<ArrayRef<int64_t>> optional_dim_list{ArrayRef<int64_t>{dims_1, 1}};
332 optional<ScalarType> dtype;
333
334 // int -> bool conversion should work
335 Tensor out = tf_bool.zeros({2, 3, 1});
336 op_sum_intlist_out(self, optional_dim_list, /*keepdim=*/true, dtype, out);
337 // clang-format off
338 EXPECT_TENSOR_CLOSE(out, tf_bool.make(
339 {2, 3, 1},
340 {
341 false,
342 true,
343 true,
344
345 true,
346 true,
347 true
348 }));
349 // clang-format on
350
351 // int -> byte conversion should work
352 out = tf_byte.zeros({2, 3, 1});
353 op_sum_intlist_out(self, optional_dim_list, /*keepdim=*/true, dtype, out);
354 // clang-format off
355 EXPECT_TENSOR_CLOSE(out, tf_byte.make(
356 {2, 3, 1},
357 {
358 0,
359 8,
360 16,
361
362 32,
363 64,
364 0,
365 }));
366 // clang-format on
367 }
368
TEST_F(OpSumOutTest,InfinityAndNANTest)369 TEST_F(OpSumOutTest, InfinityAndNANTest) {
370 TensorFactory<ScalarType::Float> tf_float;
371 // clang-format off
372 Tensor self = tf_float.make(
373 {2, 3, 4},
374 {
375 0, 1, 2, INFINITY,
376 INFINITY, -INFINITY, 1, 0,
377 NAN, INFINITY, -INFINITY, 2,
378
379 NAN, NAN, 1, 0,
380 0, INFINITY, NAN, 4,
381 1, NAN, 3.14, 2,
382 });
383 // clang-format on
384
385 Tensor out = tf_float.zeros({2, 3, 1});
386 int64_t dims[1] = {-1};
387 optional<ArrayRef<int64_t>> optional_dim_list{ArrayRef<int64_t>{dims, 1}};
388 optional<ScalarType> dtype;
389 op_sum_intlist_out(self, optional_dim_list, /*keepdim=*/true, dtype, out);
390 // clang-format off
391 EXPECT_TENSOR_CLOSE(out, tf_float.make(
392 {2, 3, 1},
393 {
394 INFINITY,
395 NAN,
396 NAN,
397
398 NAN,
399 NAN,
400 NAN
401 }));
402 // clang-format on
403 }
404