1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10 #include <executorch/kernels/test/TestUtil.h>
11 #include <executorch/kernels/test/supported_features.h>
12 #include <executorch/runtime/core/exec_aten/exec_aten.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15 #include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
16
17 #include <gtest/gtest.h>
18
19 using namespace ::testing;
20 using exec_aten::ArrayRef;
21 using exec_aten::ScalarType;
22 using exec_aten::Tensor;
23 using exec_aten::TensorList;
24 using torch::executor::testing::TensorFactory;
25
26 class OpCatOutTest : public OperatorTest {
27 protected:
op_cat_out(TensorList tensors,int64_t dim,Tensor & out)28 Tensor& op_cat_out(TensorList tensors, int64_t dim, Tensor& out) {
29 return torch::executor::aten::cat_outf(context_, tensors, dim, out);
30 }
31
32 template <class CTYPE, exec_aten::ScalarType DTYPE>
test_dtype()33 void test_dtype() {
34 TensorFactory<DTYPE> tf;
35
36 // Will be concatenated along dim[1]. Use different input values so we can
37 // see where each output value came from.
38 Tensor x = tf.ones({2, 1});
39 Tensor y = tf.zeros({2, 1});
40 std::vector<Tensor> inputs = {x, y};
41
42 Tensor out = tf.ones({2, 2});
43 op_cat_out(ArrayRef<Tensor>(inputs.data(), inputs.size()), /*dim=*/1, out);
44
45 // clang-format off
46 Tensor expected = tf.make(
47 {2, 2},
48 {
49 1, 0,
50 1, 0,
51 });
52 // clang-format on
53
54 EXPECT_TENSOR_EQ(out, expected);
55 }
56 };
57
TEST_F(OpCatOutTest,SmokeDim1)58 TEST_F(OpCatOutTest, SmokeDim1) {
59 TensorFactory<ScalarType::Int> tf;
60
61 // Two tensors with the same number of dimensions and the same dim[0]
62 // size, but different dim[1] sizes. These will be concatenated along dim[1].
63 // clang-format off
64 Tensor x = tf.make(
65 {2, 3},
66 {
67 1, 2, 3,
68 4, 5, 6,
69 });
70 Tensor y = tf.make(
71 {2, 1},
72 {
73 10,
74 20,
75 });
76 // clang-format on
77
78 std::vector<Tensor> inputs = {x, y};
79
80 // Output tensor with the shape of the two input tensors concatenated along
81 // dim[1].
82 // - It should have the same number of dimensions as each input.
83 // - For non-cat dimensions (dim[0]), it should have the same size as the
84 // input tensors.
85 // - For the cat dimension (dim[1]), its size should be the sum of the cat
86 // dimensions of the inputs: in this case, 3 + 1.
87 Tensor out = tf.zeros({2, 4});
88
89 // Concatenate along dim[1].
90 Tensor ret = op_cat_out(
91 ArrayRef<Tensor>(inputs.data(), inputs.size()), /*dim=*/1, out);
92
93 // Should always return the provided out Tensor.
94 EXPECT_TENSOR_EQ(ret, out);
95
96 // clang-format off
97 Tensor expected = tf.make(
98 {2, 4},
99 {
100 1, 2, 3, 10,
101 4, 5, 6, 20,
102 });
103 // clang-format on
104
105 EXPECT_TENSOR_EQ(out, expected);
106 }
107
TEST_F(OpCatOutTest,HalfSupport)108 TEST_F(OpCatOutTest, HalfSupport) {
109 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
110 GTEST_SKIP() << "Test Half support only for ExecuTorch mode";
111 }
112 TensorFactory<ScalarType::Half> tf;
113
114 Tensor x = tf.make({2, 3}, {1.5, -2.0, 3.25, 4.0, -5.5, 6.5});
115 Tensor y = tf.make({2, 1}, {10.0, 20.0});
116
117 std::vector<Tensor> inputs = {x, y};
118
119 Tensor out = tf.zeros({2, 4});
120
121 // Concatenate along dim[1].
122 Tensor ret = op_cat_out(
123 ArrayRef<Tensor>(inputs.data(), inputs.size()), /*dim=*/1, out);
124
125 Tensor expected =
126 tf.make({2, 4}, {1.5, -2.0, 3.25, 10.0, 4.0, -5.5, 6.5, 20.0});
127 EXPECT_TENSOR_EQ(out, expected);
128 }
129
TEST_F(OpCatOutTest,NegativeDims)130 TEST_F(OpCatOutTest, NegativeDims) {
131 TensorFactory<ScalarType::Int> tf;
132
133 // Symmetrical input tensors can can be concatenated along any dimension.
134 // clang-format off
135 Tensor x = tf.make(
136 {2, 2},
137 {
138 1, 2,
139 3, 4,
140 });
141 Tensor y = tf.make(
142 {2, 2},
143 {
144 10, 20,
145 30, 40,
146 });
147 // clang-format on
148
149 std::vector<Tensor> inputs = {x, y};
150
151 // Cat along dim[-1], which should be the same as dim[1].
152 Tensor out_neg1 = tf.zeros({2, 4});
153 op_cat_out(
154 ArrayRef<Tensor>(inputs.data(), inputs.size()), /*dim=*/-1, out_neg1);
155
156 Tensor out_1 = tf.zeros({2, 4});
157 op_cat_out(ArrayRef<Tensor>(inputs.data(), inputs.size()), /*dim=*/1, out_1);
158
159 EXPECT_TENSOR_EQ(out_neg1, out_1);
160
161 // Cat along dim[-2], which should be the same as dim[0].
162 Tensor out_neg2 = tf.zeros({4, 2});
163 op_cat_out(
164 ArrayRef<Tensor>(inputs.data(), inputs.size()), /*dim=*/-2, out_neg2);
165
166 Tensor out_0 = tf.zeros({4, 2});
167 op_cat_out(ArrayRef<Tensor>(inputs.data(), inputs.size()), /*dim=*/0, out_0);
168
169 EXPECT_TENSOR_EQ(out_neg2, out_0);
170 }
171
172 /// A generic smoke test that works for any dtype that supports ones() and
173 /// zeros().
TEST_F(OpCatOutTest,AllDtypesSupported)174 TEST_F(OpCatOutTest, AllDtypesSupported) {
175 #define TEST_ENTRY(ctype, dtype) test_dtype<ctype, ScalarType::dtype>();
176 ET_FORALL_REAL_TYPES_AND(Bool, TEST_ENTRY);
177 #undef TEST_ENTRY
178 // TODO: Also add tests for half, complex, quantized, and other types. Easiest
179 // way to do that would be to make TensorFactory support zeros() and ones()
180 // for those types.
181 }
182
TEST_F(OpCatOutTest,EmptyInputTensorShapeIgnored)183 TEST_F(OpCatOutTest, EmptyInputTensorShapeIgnored) {
184 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
185 GTEST_SKIP() << "ATen kernel doesn't ignore empty input tensor shape";
186 }
187 TensorFactory<ScalarType::Int> tf;
188
189 // An empty tensor with a shape totally different from the non-empty inputs.
190 Tensor empty = tf.make({0, 10, 3}, {});
191 EXPECT_EQ(empty.numel(), 0);
192
193 Tensor x = tf.ones({2, 2});
194
195 std::vector<Tensor> inputs = {x, empty, x};
196
197 // Output whose shape is appropriate for concatenating along dim[0].
198 Tensor out = tf.zeros({4, 2});
199
200 op_cat_out(ArrayRef<Tensor>(inputs.data(), inputs.size()), /*dim=*/0, out);
201 // Success if it doesn't assert on the weird-shaped empty input.
202 }
203
TEST_F(OpCatOutTest,DimBounds)204 TEST_F(OpCatOutTest, DimBounds) {
205 TensorFactory<ScalarType::Int> tf;
206
207 // Cat a single tensor, which can be done across any dimension and still
208 // produces the same output shape.
209 Tensor x = tf.ones({2, 2});
210 ArrayRef<Tensor> inputs(&x, 1);
211
212 Tensor out = tf.zeros({2, 2});
213
214 // Some valid dim values.
215 // Negative values work like python indices: -1 is the rightmost element,
216 // -2 the second-from-rightmost, etc.
217 const std::vector<int64_t> valid_dims = {0, 1, -1, -2};
218 for (int64_t dim : valid_dims) {
219 op_cat_out(inputs, dim, out);
220 // Success if it doesn't assert.
221 }
222
223 // Some invalid dim values.
224 const std::vector<int64_t> invalid_dims = {2, -3};
225 for (int64_t dim : invalid_dims) {
226 ET_EXPECT_KERNEL_FAILURE(context_, op_cat_out(inputs, dim, out));
227 }
228 }
229
TEST_F(OpCatOutTest,NoInputTensorsWithNonEmptyOutputDies)230 TEST_F(OpCatOutTest, NoInputTensorsWithNonEmptyOutputDies) {
231 TensorFactory<ScalarType::Int> tf;
232 Tensor out = tf.ones({1});
233
234 // Providing an empty list of input tensors should
235 // cause an assertion and kill the test process.
236 ET_EXPECT_KERNEL_FAILURE(
237 context_, op_cat_out(ArrayRef<Tensor>(), /*dim=*/0, out));
238 }
239
TEST_F(OpCatOutTest,NoInputTensorsWithEmptyOutputDies)240 TEST_F(OpCatOutTest, NoInputTensorsWithEmptyOutputDies) {
241 TensorFactory<ScalarType::Int> tf;
242
243 // Make an empty out tensor and demonstrate that it's empty.
244 Tensor out = tf.make({0}, {});
245 EXPECT_EQ(out.numel(), 0);
246
247 // Providing an empty list of input tensors should
248 // cause an assertion and kill the test process.
249 ET_EXPECT_KERNEL_FAILURE(
250 context_, op_cat_out(ArrayRef<Tensor>(), /*dim=*/0, out));
251 }
252
TEST_F(OpCatOutTest,MismatchedDtypesDies)253 TEST_F(OpCatOutTest, MismatchedDtypesDies) {
254 TensorFactory<ScalarType::Int> tf_int;
255 TensorFactory<ScalarType::Float> tf_float;
256 Tensor out = tf_int.zeros({4, 2});
257
258 // Same shape as the output, but a different dtype.
259 std::vector<Tensor> inputs = {tf_float.ones({2, 2})};
260
261 ET_EXPECT_KERNEL_FAILURE(
262 context_,
263 op_cat_out(
264 ArrayRef<Tensor>(inputs.data(), inputs.size()), /*dim=*/0, out));
265 }
266
TEST_F(OpCatOutTest,MismatchedDimensionsDies)267 TEST_F(OpCatOutTest, MismatchedDimensionsDies) {
268 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
269 GTEST_SKIP() << "ATen kernel can handle mismatched dimensions";
270 }
271 TensorFactory<ScalarType::Int> tf;
272 Tensor out = tf.zeros({2, 2});
273
274 // Same dtype and numel as the output, but a different number of dimensions.
275 std::vector<Tensor> inputs = {tf.ones({1, 1, 1, 1})};
276
277 ET_EXPECT_KERNEL_FAILURE(
278 context_,
279 op_cat_out(
280 ArrayRef<Tensor>(inputs.data(), inputs.size()), /*dim=*/0, out));
281 }
282
TEST_F(OpCatOutTest,MismatchedDimensionSizeDies)283 TEST_F(OpCatOutTest, MismatchedDimensionSizeDies) {
284 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
285 GTEST_SKIP() << "ATen kernel can handle mismatched dimension size";
286 }
287 TensorFactory<ScalarType::Int> tf;
288 Tensor out = tf.zeros({2, 2});
289
290 // Same dtype and number of dimensions as the output, but a different-sized 1
291 // dimension.
292 std::vector<Tensor> inputs = {tf.ones({2, 3})};
293
294 ET_EXPECT_KERNEL_FAILURE(
295 context_,
296 op_cat_out(
297 ArrayRef<Tensor>(inputs.data(), inputs.size()), /*dim=*/0, out));
298 }
299
TEST_F(OpCatOutTest,WrongOutShapeDies)300 TEST_F(OpCatOutTest, WrongOutShapeDies) {
301 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
302 GTEST_SKIP() << "ATen kernel can handle wrong out shape";
303 }
304 TensorFactory<ScalarType::Int> tf;
305
306 // Should be {4, 3} to match the inputs when calling cat() with dim 0.
307 Tensor out = tf.zeros({4, 5});
308
309 std::vector<Tensor> inputs = {
310 tf.ones({2, 3}),
311 tf.ones({2, 3}),
312 };
313
314 ET_EXPECT_KERNEL_FAILURE(
315 context_,
316 op_cat_out(
317 ArrayRef<Tensor>(inputs.data(), inputs.size()), /*dim=*/0, out));
318 }
319
320 /* %python
321 import torch
322 torch.manual_seed(0)
323 x = [torch.randint(10, (2, 3)),
324 torch.randint(10, (2, 3)),
325 torch.randint(10, (2, 3)),
326 torch.randint(10, (2, 3))]
327 res = torch.cat(x, 0)
328 op = "op_cat_out"
329 opt_extra_params = "0,"
330 dtype = "ScalarType::Int"
331 check = "EXPECT_TENSOR_EQ" */
332
TEST_F(OpCatOutTest,DynamicShapeUpperBoundSameAsExpected)333 TEST_F(OpCatOutTest, DynamicShapeUpperBoundSameAsExpected) {
334 /* %python
335 out_args = "{8, 3}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND"
336 %rewrite(unary_op_tensor_list_in) */
337
338 TensorFactory<ScalarType::Int> tf;
339
340 std::vector<Tensor> xv = {
341 tf.make({2, 3}, {4, 9, 3, 0, 3, 9}),
342 tf.make({2, 3}, {7, 3, 7, 3, 1, 6}),
343 tf.make({2, 3}, {6, 9, 8, 6, 6, 8}),
344 tf.make({2, 3}, {4, 3, 6, 9, 1, 4})};
345 TensorList x(xv.data(), xv.size());
346 Tensor expected = tf.make({8, 3}, {4, 9, 3, 0, 3, 9, 7, 3, 7, 3, 1, 6,
347 6, 9, 8, 6, 6, 8, 4, 3, 6, 9, 1, 4});
348
349 Tensor out =
350 tf.zeros({8, 3}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
351 op_cat_out(x, 0, out);
352 EXPECT_TENSOR_EQ(out, expected);
353 }
354
TEST_F(OpCatOutTest,DynamicShapeUpperBoundLargerThanExpected)355 TEST_F(OpCatOutTest, DynamicShapeUpperBoundLargerThanExpected) {
356 /* %python
357 out_args = "{10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND"
358 %rewrite(unary_op_tensor_list_in) */
359
360 TensorFactory<ScalarType::Int> tf;
361
362 std::vector<Tensor> xv = {
363 tf.make({2, 3}, {4, 9, 3, 0, 3, 9}),
364 tf.make({2, 3}, {7, 3, 7, 3, 1, 6}),
365 tf.make({2, 3}, {6, 9, 8, 6, 6, 8}),
366 tf.make({2, 3}, {4, 3, 6, 9, 1, 4})};
367 TensorList x(xv.data(), xv.size());
368 Tensor expected = tf.make({8, 3}, {4, 9, 3, 0, 3, 9, 7, 3, 7, 3, 1, 6,
369 6, 9, 8, 6, 6, 8, 4, 3, 6, 9, 1, 4});
370
371 Tensor out =
372 tf.zeros({10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
373 op_cat_out(x, 0, out);
374 EXPECT_TENSOR_EQ(out, expected);
375 }
376
TEST_F(OpCatOutTest,DynamicShapeUnbound)377 TEST_F(OpCatOutTest, DynamicShapeUnbound) {
378 if (!torch::executor::testing::SupportedFeatures::get()->output_resize) {
379 GTEST_SKIP() << "Dynamic shape unbound not supported";
380 }
381 /* %python
382 out_args = "{1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND"
383 %rewrite(unary_op_tensor_list_in) */
384
385 TensorFactory<ScalarType::Int> tf;
386
387 std::vector<Tensor> xv = {
388 tf.make({2, 3}, {4, 9, 3, 0, 3, 9}),
389 tf.make({2, 3}, {7, 3, 7, 3, 1, 6}),
390 tf.make({2, 3}, {6, 9, 8, 6, 6, 8}),
391 tf.make({2, 3}, {4, 3, 6, 9, 1, 4})};
392 TensorList x(xv.data(), xv.size());
393 Tensor expected = tf.make({8, 3}, {4, 9, 3, 0, 3, 9, 7, 3, 7, 3, 1, 6,
394 6, 9, 8, 6, 6, 8, 4, 3, 6, 9, 1, 4});
395
396 Tensor out =
397 tf.zeros({1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
398 op_cat_out(x, 0, out);
399 EXPECT_TENSOR_EQ(out, expected);
400 }
401