1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10 #include <executorch/kernels/test/TestUtil.h>
11 #include <executorch/kernels/test/supported_features.h>
12 #include <executorch/runtime/core/exec_aten/exec_aten.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15 #include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
16 #include <gtest/gtest.h>
17 #include <cmath>
18
19 using namespace ::testing;
20 using exec_aten::ArrayRef;
21 using exec_aten::optional;
22 using exec_aten::ScalarType;
23 using exec_aten::Tensor;
24 using torch::executor::testing::TensorFactory;
25
26 class OpCumSumOutTest : public OperatorTest {
27 protected:
op_cumsum_out(const Tensor & self,int64_t dim,optional<ScalarType> enforced_dtype,Tensor & out)28 Tensor& op_cumsum_out(
29 const Tensor& self,
30 int64_t dim,
31 optional<ScalarType> enforced_dtype,
32 Tensor& out) {
33 return torch::executor::aten::cumsum_outf(
34 context_, self, dim, enforced_dtype, out);
35 }
36
37 template <ScalarType IN_DTYPE, ScalarType OUT_DTYPE>
test_cumsum_out_dtype()38 void test_cumsum_out_dtype() {
39 TensorFactory<IN_DTYPE> tf_in;
40 TensorFactory<OUT_DTYPE> tf_out;
41 // clang-format off
42 Tensor in = tf_in.make(
43 {2, 4},
44 {
45 0, 1, 2, 4,
46 8, 16, 32, 64
47 });
48 // clang-format on
49
50 Tensor out = tf_out.zeros({2, 4});
51 optional<ScalarType> enforced_dtype = OUT_DTYPE;
52 op_cumsum_out(in, /*dim=*/1, enforced_dtype, out);
53
54 // clang-format off
55 Tensor expected = tf_out.make(
56 {2, 4},
57 {
58 0, 1, 3, 7,
59 8, 24, 56, 120
60 });
61 // clang-format on
62
63 EXPECT_TENSOR_CLOSE(out, expected);
64
65 // negative dim should work
66 op_cumsum_out(in, /*dim=*/-1, enforced_dtype, out);
67 EXPECT_TENSOR_CLOSE(out, expected);
68
69 op_cumsum_out(in, /*dim=*/0, enforced_dtype, out);
70 // clang-format off
71 expected = tf_out.make(
72 {2, 4},
73 {
74 0, 1, 2, 4,
75 8, 17, 34, 68
76 });
77 // clang-format on
78 EXPECT_TENSOR_CLOSE(out, expected);
79 }
80
81 template <ScalarType OUT_DTYPE>
test_cumsum_out_float()82 void test_cumsum_out_float() {
83 TensorFactory<ScalarType::Float> tf_float;
84 TensorFactory<OUT_DTYPE> tf_out;
85
86 Tensor in = tf_float.make({1, 2}, {1, INFINITY});
87 Tensor out = tf_out.zeros({1, 2});
88 optional<ScalarType> enforced_dtype = OUT_DTYPE;
89 op_cumsum_out(in, /*dim=*/1, enforced_dtype, out);
90 EXPECT_TENSOR_CLOSE(out, tf_out.make({1, 2}, {1, INFINITY}));
91
92 in = tf_float.make({1, 2}, {1, -INFINITY});
93 op_cumsum_out(in, /*dim=*/1, enforced_dtype, out);
94 EXPECT_TENSOR_CLOSE(out, tf_out.make({1, 2}, {1, -INFINITY}));
95
96 in = tf_float.make({1, 2}, {1, NAN});
97 op_cumsum_out(in, /*dim=*/1, enforced_dtype, out);
98 EXPECT_TENSOR_CLOSE(out, tf_out.make({1, 2}, {1, NAN}));
99
100 in = tf_float.make({1, 2}, {-INFINITY, INFINITY});
101 op_cumsum_out(in, /*dim=*/1, enforced_dtype, out);
102 EXPECT_TENSOR_CLOSE(out, tf_out.make({1, 2}, {-INFINITY, NAN}));
103 }
104 };
105
TEST_F(OpCumSumOutTest,MismatchedDimensionsDies)106 TEST_F(OpCumSumOutTest, MismatchedDimensionsDies) {
107 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
108 GTEST_SKIP() << "ATen kernel can handle mismatched dimensions";
109 }
110 TensorFactory<ScalarType::Float> tff;
111
112 Tensor in = tff.make({1, 3}, {0, 1, 2});
113
114 // Output shape should be (1, 3)
115 Tensor out = tff.zeros({1, 3});
116
117 // Dim out of bounds
118 optional<ScalarType> enforced_dtype;
119 ET_EXPECT_KERNEL_FAILURE(
120 context_, op_cumsum_out(in, /*dim=*/3, enforced_dtype, out));
121
122 // wrong_out has incompatible dim
123 Tensor wrong_out = tff.zeros({2, 10, 4});
124 ET_EXPECT_KERNEL_FAILURE(
125 context_, op_cumsum_out(in, /*dim=*/1, enforced_dtype, wrong_out));
126 }
127
128 /* A generic smoke test that works for the supported dtypes with
129 * enforced_dtype specified.
130 */
TEST_F(OpCumSumOutTest,EnforcedDtypePasses)131 TEST_F(OpCumSumOutTest, EnforcedDtypePasses) {
132 // Use a two layer switch to hanldle each possible data pair
133 #define TEST_KERNEL(INPUT_CTYPE, INPUT_DTYPE, OUTPUT_CTYPE, OUTPUT_DTYPE) \
134 test_cumsum_out_dtype<ScalarType::INPUT_DTYPE, ScalarType::OUTPUT_DTYPE>();
135
136 #define TEST_ENTRY(INPUT_CTYPE, INPUT_DTYPE) \
137 ET_FORALL_REAL_TYPES_WITH2(INPUT_CTYPE, INPUT_DTYPE, TEST_KERNEL);
138
139 ET_FORALL_REAL_TYPES(TEST_ENTRY);
140 #undef TEST_ENTRY
141 #undef TEST_KERNEL
142 }
143
TEST_F(OpCumSumOutTest,TypeCastCornerCases)144 TEST_F(OpCumSumOutTest, TypeCastCornerCases) {
145 TensorFactory<ScalarType::Int> tf_int;
146 TensorFactory<ScalarType::Float> tf_float;
147 TensorFactory<ScalarType::Byte> tf_byte;
148
149 // Cast floating point to int
150 Tensor in = tf_float.make({1, 2}, {1.1, 2.2});
151 Tensor out = tf_int.zeros({1, 2});
152 optional<ScalarType> enforced_dtype = ScalarType::Int;
153 op_cumsum_out(in, /*dim=*/1, enforced_dtype, out);
154 EXPECT_TENSOR_CLOSE(out, tf_int.make({1, 2}, {1, 3}));
155
156 // Cast negative values to unsigned type
157 in = tf_int.make({1, 2}, {-1, -2});
158 out = tf_byte.zeros({1, 2});
159 enforced_dtype = ScalarType::Byte;
160 op_cumsum_out(in, /*dim=*/1, enforced_dtype, out);
161 EXPECT_TENSOR_CLOSE(out, tf_byte.make({1, 2}, {255, 253}));
162
163 // Cast negative float values to int, float should rounding toward zero
164 in = tf_float.make({1, 2}, {-1.9, -2.9});
165 out = tf_int.zeros({1, 2});
166 enforced_dtype = ScalarType::Int;
167 op_cumsum_out(in, /*dim=*/1, enforced_dtype, out);
168 EXPECT_TENSOR_CLOSE(out, tf_int.make({1, 2}, {-1, -3}));
169 }
170
171 /* A generic smoke test that works for the supported dtypes with
172 * enforced_dtype specified.
173 */
TEST_F(OpCumSumOutTest,FloatSpecificTest)174 TEST_F(OpCumSumOutTest, FloatSpecificTest) {
175 // Float/double specific +/-Inf and NAN test
176 #define TEST_ENTRY_FLOAT_SPECIFIC_CASES(ctype, dtype) \
177 test_cumsum_out_float<ScalarType::dtype>();
178 ET_FORALL_FLOAT_TYPES(TEST_ENTRY_FLOAT_SPECIFIC_CASES);
179 #undef TEST_ENTRY_FLOAT_SPECIFIC_CASES
180 }
181
TEST_F(OpCumSumOutTest,SimpleGeneratedCase)182 TEST_F(OpCumSumOutTest, SimpleGeneratedCase) {
183 TensorFactory<ScalarType::Float> tf;
184
185 Tensor x = tf.make(
186 {10, 10},
187 {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
188 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
189 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
190 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
191 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
192 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
193 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
194 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0});
195 Tensor expected_result = tf.make(
196 {10, 10},
197 {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 1.0, 2.0, 3.0,
198 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0,
199 7.0, 8.0, 9.0, 10.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0,
200 10.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 1.0, 2.0,
201 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 1.0, 2.0, 3.0, 4.0, 5.0,
202 6.0, 7.0, 8.0, 9.0, 10.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
203 9.0, 10.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 1.0,
204 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0});
205
206 Tensor out = tf.zeros({10, 10});
207 Tensor ret = op_cumsum_out(x, 1, ScalarType::Float, out);
208 EXPECT_TENSOR_CLOSE(out, expected_result);
209 }
210
TEST_F(OpCumSumOutTest,DynamicShapeUpperBoundSameAsExpected)211 TEST_F(OpCumSumOutTest, DynamicShapeUpperBoundSameAsExpected) {
212 TensorFactory<ScalarType::Float> tf;
213
214 Tensor x = tf.make(
215 {3, 2},
216 {0.04876953363418579,
217 0.816348671913147,
218 0.44230276346206665,
219 0.2767965793609619,
220 0.8998266458511353,
221 0.09595239162445068});
222 Tensor expected_result = tf.make(
223 {3, 2},
224 {0.04876953363418579,
225 0.8651182055473328,
226 0.44230276346206665,
227 0.7190993428230286,
228 0.8998266458511353,
229 0.9957790374755859});
230
231 Tensor out =
232 tf.zeros({3, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
233 Tensor ret = op_cumsum_out(x, 1, ScalarType::Float, out);
234 EXPECT_TENSOR_CLOSE(out, expected_result);
235 }
236
TEST_F(OpCumSumOutTest,DynamicShapeUpperBoundLargerThanExpected)237 TEST_F(OpCumSumOutTest, DynamicShapeUpperBoundLargerThanExpected) {
238 TensorFactory<ScalarType::Float> tf;
239
240 Tensor x = tf.make(
241 {3, 2},
242 {0.04876953363418579,
243 0.816348671913147,
244 0.44230276346206665,
245 0.2767965793609619,
246 0.8998266458511353,
247 0.09595239162445068});
248 Tensor expected_result = tf.make(
249 {3, 2},
250 {0.04876953363418579,
251 0.8651182055473328,
252 0.44230276346206665,
253 0.7190993428230286,
254 0.8998266458511353,
255 0.9957790374755859});
256
257 Tensor out =
258 tf.zeros({10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
259 Tensor ret = op_cumsum_out(x, 1, ScalarType::Float, out);
260 EXPECT_TENSOR_CLOSE(out, expected_result);
261 }
262
TEST_F(OpCumSumOutTest,DynamicShapeUnbound)263 TEST_F(OpCumSumOutTest, DynamicShapeUnbound) {
264 GTEST_SKIP() << "Dynamic shape unbound not supported";
265 TensorFactory<ScalarType::Float> tf;
266
267 Tensor x = tf.make(
268 {3, 2},
269 {0.04876953363418579,
270 0.816348671913147,
271 0.44230276346206665,
272 0.2767965793609619,
273 0.8998266458511353,
274 0.09595239162445068});
275 Tensor expected_result = tf.make(
276 {3, 2},
277 {0.04876953363418579,
278 0.8651182055473328,
279 0.44230276346206665,
280 0.7190993428230286,
281 0.8998266458511353,
282 0.9957790374755859});
283
284 Tensor out =
285 tf.zeros({1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
286 Tensor ret = op_cumsum_out(x, 1, ScalarType::Float, out);
287 EXPECT_TENSOR_CLOSE(out, expected_result);
288 }
289