1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <cstdint>
10 #include <map>
11 #include <typeindex>
12 #include <variant>
13
14 #include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
15 #include <executorch/kernels/test/TestUtil.h>
16 #include <executorch/kernels/test/supported_features.h>
17 #include <executorch/runtime/core/exec_aten/exec_aten.h>
18 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
19 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
20
21 #include <gtest/gtest.h>
22
23 using namespace ::testing;
24 using exec_aten::ArrayRef;
25 using exec_aten::optional;
26 using exec_aten::ScalarType;
27 using exec_aten::Tensor;
28 using torch::executor::testing::TensorFactory;
29
30 // To further emphasize the accuracy of our op_to, we TEST_F the conversion
31 // from floating-point types to signed int types directly by the TEST_F cases
32 // generated by core Pytorch directly. Such data is random generated in [-5, 5].
33
34 // clang-format off
35 typedef std::map<
36 std::type_index,
37 std::variant<
38 std::vector<float>,
39 std::vector<double>>>
40 FloatingTypeToDataMap;
41
42 typedef std::map<
43 std::type_index,
44 std::variant<
45 std::vector<int64_t>,
46 std::vector<int32_t>,
47 std::vector<int16_t>,
48 std::vector<int8_t>,
49 std::vector<uint8_t>>>
50 IntTypeToDataMap;
51 // clang-format on
52
53 class OpToDimOrderCopyTest : public OperatorTest {
54 protected:
op__to_dim_order_copy_out(const Tensor & self,bool non_blocking,exec_aten::optional<ArrayRef<int64_t>> dim_order,Tensor & out)55 Tensor& op__to_dim_order_copy_out(
56 const Tensor& self,
57 bool non_blocking,
58 exec_aten::optional<ArrayRef<int64_t>> dim_order,
59 Tensor& out) {
60 return torch::executor::dim_order_ops::_to_dim_order_copy_outf(
61 context_, self, non_blocking, dim_order, out);
62 }
63 // Cast float vector to OUTPUT_CTYPE vector
64 template <typename INPUT_CTYPE, typename OUTPUT_CTYPE>
vector_type_cast(std::vector<INPUT_CTYPE> input)65 std::vector<OUTPUT_CTYPE> vector_type_cast(std::vector<INPUT_CTYPE> input) {
66 std::vector<OUTPUT_CTYPE> output(input.size());
67 std::transform(
68 input.begin(), input.end(), output.begin(), [](INPUT_CTYPE x) {
69 return static_cast<OUTPUT_CTYPE>(x);
70 });
71 return output;
72 }
73
74 template <typename INPUT_CTYPE, typename OUTPUT_CTYPE>
75 struct ToTestCase {
76 const std::vector<int32_t> sizes;
77 const std::vector<INPUT_CTYPE> data_in;
78 const std::vector<OUTPUT_CTYPE> data_out;
79 };
80
81 // Each TEST_F has different combination of input and output types. Therefore
82 // it is a little bit mess if create template TEST_F case and custom data
83 // types for both input data and output data. We choose another way: for all
84 // TEST_F cases, their data are all in double. And we are gonna cast them into
85 // desired type when delievering them into tf.make function. Based on our
86 // experiments, type cast of core PyTorch is same as static_cast in c++ in the
87 // representable scope, so here we believe using static_cast to generate
88 // ground truth is reasonable.
89 template <
90 typename INPUT_CTYPE,
91 ScalarType INPUT_DTYPE,
92 typename OUTPUT_CTYPE,
93 ScalarType OUTPUT_DTYPE>
test_runner_static_cast(std::vector<ToTestCase<double,double>> test_cases)94 void test_runner_static_cast(
95 std::vector<ToTestCase<double, double>> test_cases) {
96 TensorFactory<INPUT_DTYPE> tf_in;
97 TensorFactory<OUTPUT_DTYPE> tf_out;
98
99 for (const auto& test_case : test_cases) {
100 auto data_in = vector_type_cast<double, INPUT_CTYPE>(test_case.data_in);
101 auto data_out = vector_type_cast<INPUT_CTYPE, OUTPUT_CTYPE>(data_in);
102
103 Tensor input = tf_in.make(test_case.sizes, data_in);
104 Tensor output = tf_out.zeros_like(input);
105
106 std::vector<int64_t> dim_order_vec;
107 for (int64_t i = 0; i < input.dim(); i++) {
108 dim_order_vec.push_back(i);
109 }
110 ArrayRef<int64_t> dim_order(dim_order_vec.data(), dim_order_vec.size());
111
112 Tensor ret = op__to_dim_order_copy_out(
113 /*self=*/input,
114 /*non_blocking=*/false,
115 dim_order,
116 output);
117
118 Tensor expected = tf_out.make(test_case.sizes, data_out);
119
120 // The original tensor a should share same value with the out variable and
121 // return variable of to function
122 EXPECT_TENSOR_EQ(ret, output);
123 EXPECT_TENSOR_EQ(ret, expected);
124 }
125 }
126
127 template <typename INPUT_CTYPE, ScalarType INPUT_DTYPE>
test_runner_to_bool(std::vector<double> test_case,std::vector<uint8_t> data_out)128 void test_runner_to_bool(
129 std::vector<double> test_case,
130 std::vector<uint8_t> data_out) {
131 TensorFactory<INPUT_DTYPE> tf_in;
132 TensorFactory<ScalarType::Bool> tf_out;
133
134 auto data_in = vector_type_cast<double, INPUT_CTYPE>(test_case);
135
136 Tensor input = tf_in.make({(int)test_case.size()}, data_in);
137 Tensor output = tf_out.zeros_like(input);
138
139 std::vector<int64_t> dim_order_vec;
140 for (int i = 0; i < input.dim(); i++) {
141 dim_order_vec.push_back(i);
142 }
143 ArrayRef<int64_t> dim_order(dim_order_vec.data(), dim_order_vec.size());
144
145 Tensor ret = op__to_dim_order_copy_out(
146 /*self=*/input,
147 /*non_blocking=*/false,
148 dim_order,
149 output);
150
151 Tensor expected = tf_out.make({(int)data_out.size()}, data_out);
152
153 // The return value of op__to_dim_order_copy_out and the values written to
154 // output should be the same.
155 EXPECT_TENSOR_EQ(ret, output);
156 // The return value of op__to_dim_order_copy_out and the values in expected
157 // which are the reference values should be the same.
158 EXPECT_TENSOR_EQ(ret, expected);
159 }
160
161 template <typename OUT_CTYPE, ScalarType OUT_DTYPE>
test_runner_from_bool(std::vector<uint8_t> test_case,std::vector<double> out)162 void test_runner_from_bool(
163 std::vector<uint8_t> test_case,
164 std::vector<double> out) {
165 TensorFactory<ScalarType::Bool> tf_in;
166 TensorFactory<OUT_DTYPE> tf_out;
167
168 auto data_out = vector_type_cast<double, OUT_CTYPE>(out);
169
170 Tensor input = tf_in.make({(int)test_case.size()}, test_case);
171 Tensor output = tf_out.zeros_like(input);
172
173 std::vector<int64_t> dim_order_vec;
174 for (int64_t i = 0; i < input.dim(); i++) {
175 dim_order_vec.push_back(i);
176 }
177 ArrayRef<int64_t> dim_order(dim_order_vec.data(), dim_order_vec.size());
178
179 Tensor ret = op__to_dim_order_copy_out(
180 /*self=*/input,
181 /*non_blocking=*/false,
182 dim_order,
183 output);
184
185 Tensor expected = tf_out.make({(int)data_out.size()}, data_out);
186
187 // The return value of op__to_dim_order_copy_out and the values written to
188 // output should be the same.
189 EXPECT_TENSOR_EQ(ret, output);
190 // The return value of op__to_dim_order_copy_out and the values in expected
191 // which are the reference values should be the same.
192 EXPECT_TENSOR_EQ(ret, expected);
193 }
194
195 /* %python
196 import torch
197 torch.manual_seed(0)
198 x = torch.rand(2, 3)
199 res = x.to(non_blocking = False, memory_format = torch.preserve_format)
200 op = "op__to_dim_order_copy_out"
201 opt_setup_params = """
202 bool non_blocking = false;
203 optional<MemoryFormat> memory_format;
204 """
205 opt_extra_params = "non_blocking, memory_format,"
206 out_args = "out_shape, dynamism"
207 dtype = "ScalarType::Float"
208 check = "EXPECT_TENSOR_EQ" */
209
test_dynamic_shape(const std::vector<int32_t> & out_shape,enum torch::executor::TensorShapeDynamism dynamism)210 void test_dynamic_shape(
211 const std::vector<int32_t>& out_shape,
212 enum torch::executor::TensorShapeDynamism dynamism) {
213 /* %python
214 %rewrite(unary_op) */
215
216 TensorFactory<ScalarType::Float> tf;
217
218 Tensor x = tf.make(
219 {2, 3},
220 {0.49625658988952637,
221 0.7682217955589294,
222 0.08847743272781372,
223 0.13203048706054688,
224 0.30742281675338745,
225 0.6340786814689636});
226 Tensor expected = tf.make(
227 {2, 3},
228 {0.49625658988952637,
229 0.7682217955589294,
230 0.08847743272781372,
231 0.13203048706054688,
232 0.30742281675338745,
233 0.6340786814689636});
234
235 bool non_blocking = false;
236
237 Tensor out = tf.zeros(out_shape, dynamism);
238
239 std::vector<int64_t> dim_order_vec;
240 for (int64_t i = 0; i < x.dim(); i++) {
241 dim_order_vec.push_back(i);
242 }
243 ArrayRef<int64_t> dim_order(dim_order_vec.data(), dim_order_vec.size());
244
245 Tensor ret = op__to_dim_order_copy_out(
246 /*self=*/x, non_blocking, dim_order, out);
247
248 EXPECT_TENSOR_EQ(out, expected);
249 EXPECT_TENSOR_EQ(ret, expected);
250 }
251
252 template <
253 typename INPUT_CTYPE,
254 ScalarType INPUT_DTYPE,
255 typename OUTPUT_CTYPE,
256 ScalarType OUTPUT_DTYPE>
test_runner_hardcode_data(FloatingTypeToDataMap floating_point_data,IntTypeToDataMap int_data)257 void test_runner_hardcode_data(
258 FloatingTypeToDataMap floating_point_data,
259 IntTypeToDataMap int_data) {
260 TensorFactory<INPUT_DTYPE> tf_in;
261 TensorFactory<OUTPUT_DTYPE> tf_out;
262
263 if (typeid(OUTPUT_CTYPE) == typeid(uint8_t)) {
264 // Would cause underflow when testing uint8_t.
265 return;
266 }
267
268 ToTestCase<INPUT_CTYPE, OUTPUT_CTYPE> test_case = {
269 /*sizes=*/{3, 5}, /*data_in=*/
270 std::get<std::vector<INPUT_CTYPE>>(
271 floating_point_data[typeid(INPUT_CTYPE)]),
272 /*data_out=*/
273 std::get<std::vector<OUTPUT_CTYPE>>(int_data[typeid(OUTPUT_CTYPE)])};
274
275 Tensor input = tf_in.make(test_case.sizes, test_case.data_in);
276 Tensor output = tf_out.zeros_like(input);
277
278 std::vector<int64_t> dim_order_vec;
279 for (int64_t i = 0; i < input.dim(); i++) {
280 dim_order_vec.push_back(i);
281 }
282 ArrayRef<int64_t> dim_order(dim_order_vec.data(), dim_order_vec.size());
283
284 Tensor ret = op__to_dim_order_copy_out(
285 /*self=*/input,
286 /*non_blocking=*/false,
287 dim_order,
288 output);
289
290 Tensor expected = tf_out.make(test_case.sizes, test_case.data_out);
291
292 // The original tensor a should share same value with the out variable and
293 // return variable of to function
294 EXPECT_TENSOR_EQ(ret, output);
295 EXPECT_TENSOR_EQ(ret, expected);
296 }
297 };
298
299 /* Here we temporary not try to implement or TEST_F the behavior about casting a
300 * number can not be represented in some type to this type (e.g. inf to
301 * int32_t nan to int64_t or 2147483648 to int32_t), because
302 * - a. The result of such kind of cast is undefined according to c++
303 * standard;
304 * - b. No explicit rules can be found in core pytorch for such transaction
305 * (not same as static_cast or any other casting function in c++);
306 * - c. If user tries to cast a unrepresentable value to certain type, they
307 * should take the risk;
308 * - d. Even though we can always use if/switch to cover these boundry cases,
309 * the code will be lengthy and jumbled. I believe using these disordered
310 * code to meet some undefine behavior is meaningless, and we can not
311 * cover all such cases.
312 */
313
314 // Regular TEST_F for to_copy.out
315 // TEST_F if to_copy.out works well under all kinds of data pairs
TEST_F(OpToDimOrderCopyTest,AllDtypesSupported)316 TEST_F(OpToDimOrderCopyTest, AllDtypesSupported) {
317 std::vector<ToTestCase<double, double>> test_cases = {
318 {
319 /*sizes=*/{2, 4}, /*data_in=*/
320 {2.11, 3.2, 2.3, 4.0, 1.1, 5.2, 1.1, 6.3}, /*data_out=*/
321 {}, // data_out shouldn't be used in test_runner_static_cast
322 },
323 {
324 /*sizes=*/{3, 4, 0, 5},
325 /*data_in=*/{},
326 /*data_out=*/{},
327 },
328 {
329 /*sizes=*/{},
330 /*data_in=*/{10.0},
331 /*data_out=*/{}, // data_out shouldn't be used in
332 // test_runner_static_cast
333 },
334 };
335
336 #define TEST_KERNEL(INPUT_CTYPE, INPUT_DTYPE, OUTPUT_CTYPE, OUTPUT_DTYPE) \
337 test_runner_static_cast< \
338 INPUT_CTYPE, \
339 ScalarType::INPUT_DTYPE, \
340 OUTPUT_CTYPE, \
341 ScalarType::OUTPUT_DTYPE>(test_cases);
342
343 #define TEST_ENTRY(INPUT_CTYPE, INPUT_DTYPE) \
344 ET_FORALL_REAL_TYPES_WITH2(INPUT_CTYPE, INPUT_DTYPE, TEST_KERNEL);
345
346 ET_FORALL_REAL_TYPES(TEST_ENTRY);
347
348 #undef TEST_ENTRY
349 #undef TEST_KERNEL
350 }
351
TEST_F(OpToDimOrderCopyTest,BoolTests)352 TEST_F(OpToDimOrderCopyTest, BoolTests) {
353 std::vector<double> test_case_to_bool = {1.1, 2.2, 0};
354 std::vector<uint8_t> result_to_bool = {true, true, false};
355 #define TEST_TO_BOOL(INPUT_CTYPE, INPUT_DTYPE) \
356 test_runner_to_bool<INPUT_CTYPE, ScalarType::INPUT_DTYPE>( \
357 test_case_to_bool, result_to_bool);
358 ET_FORALL_REAL_TYPES(TEST_TO_BOOL);
359
360 std::vector<uint8_t> test_case_from_bool = {true, true, false};
361 std::vector<double> result_from_bool = {1.0, 1.0, 0};
362 #define TEST_FROM_BOOL(OUTPUT_CTYPE, OUTPUT_DTYPE) \
363 test_runner_from_bool<OUTPUT_CTYPE, ScalarType::OUTPUT_DTYPE>( \
364 test_case_from_bool, result_from_bool);
365 ET_FORALL_REAL_TYPES(TEST_FROM_BOOL);
366 }
367
TEST_F(OpToDimOrderCopyTest,NanInfSupported)368 TEST_F(OpToDimOrderCopyTest, NanInfSupported) {
369 constexpr auto floatInfinity = std::numeric_limits<float>::infinity();
370 std::vector<ToTestCase<double, double>> test_cases = {{
371 /*sizes=*/{2, 4},
372 /*data_in=*/{2, 3, NAN, 4, floatInfinity, 5, -floatInfinity, 6},
373 /*data_out=*/{2, 3, NAN, 4, floatInfinity, 5, -floatInfinity, 6},
374 }};
375
376 #define TEST_KERNEL(INPUT_CTYPE, INPUT_DTYPE, OUTPUT_CTYPE, OUTPUT_DTYPE) \
377 test_runner_static_cast< \
378 INPUT_CTYPE, \
379 ScalarType::INPUT_DTYPE, \
380 OUTPUT_CTYPE, \
381 ScalarType::OUTPUT_DTYPE>(test_cases);
382
383 #define TEST_ENTRY(INPUT_CTYPE, INPUT_DTYPE) \
384 ET_FORALL_FLOAT_TYPES_WITH2(INPUT_CTYPE, INPUT_DTYPE, TEST_KERNEL);
385
386 ET_FORALL_FLOAT_TYPES(TEST_ENTRY);
387
388 #undef TEST_ENTRY
389 #undef TEST_KERNEL
390 }
391
TEST_F(OpToDimOrderCopyTest,HardcodeFloatConvertInt)392 TEST_F(OpToDimOrderCopyTest, HardcodeFloatConvertInt) {
393 // Hardcode input and output generated from core PyTorch
394 // clang-format off
395 std::vector<float> float_data = {
396 -1.47900056838989257812, -4.59277725219726562500,
397 2.15365791320800781250, -2.55494546890258789062,
398 3.06999135017395019531, 3.27460670471191406250,
399 -3.98865103721618652344, -4.81065988540649414062,
400 3.67902207374572753906, 3.72226405143737792969,
401 0.80567771196365356445, 2.23788332939147949219,
402 -0.52035576105117797852, -1.58493483066558837891,
403 -0.30919688940048217773};
404
405 std::vector<double> double_data = {
406 -1.47900053955270172068, -4.59277735274143061872,
407 2.15365796963871947156, -2.55494554556038755422,
408 3.06999137834642255029, 3.27460679459944969949,
409 -3.98865109243288795682, -4.81065977167646074975,
410 3.67902198302105531980, 3.72226414774102742911,
411 0.80567768667100203572, 2.23788335717029518435,
412 -0.52035578832931150828, -1.58493480710766210251,
413 -0.30919688936285893988};
414 // clang-format on
415
416 std::vector<int64_t> int64_data = {
417 -1, -4, 2, -2, 3, 3, -3, -4, 3, 3, 0, 2, 0, -1, 0};
418 std::vector<int32_t> int32_data = {
419 -1, -4, 2, -2, 3, 3, -3, -4, 3, 3, 0, 2, 0, -1, 0};
420 std::vector<int16_t> int16_data = {
421 -1, -4, 2, -2, 3, 3, -3, -4, 3, 3, 0, 2, 0, -1, 0};
422 std::vector<int8_t> int8_data = {
423 -1, -4, 2, -2, 3, 3, -3, -4, 3, 3, 0, 2, 0, -1, 0};
424
425 // Gathering all floating point data together for better traversial
426 FloatingTypeToDataMap floating_point_data;
427 floating_point_data[typeid(float)] = float_data;
428 floating_point_data[typeid(double)] = double_data;
429
430 // Gathering all int data together for better traversial
431 IntTypeToDataMap int_data;
432 int_data[typeid(int64_t)] = int64_data;
433 int_data[typeid(int32_t)] = int32_data;
434 int_data[typeid(int16_t)] = int16_data;
435 int_data[typeid(int8_t)] = int8_data;
436
437 #define TEST_KERNEL(INPUT_CTYPE, INPUT_DTYPE, OUTPUT_CTYPE, OUTPUT_DTYPE) \
438 test_runner_hardcode_data< \
439 INPUT_CTYPE, \
440 ScalarType::INPUT_DTYPE, \
441 OUTPUT_CTYPE, \
442 ScalarType::OUTPUT_DTYPE>(floating_point_data, int_data);
443
444 #define TEST_ENTRY(INPUT_CTYPE, INPUT_DTYPE) \
445 ET_FORALL_INT_TYPES_WITH2(INPUT_CTYPE, INPUT_DTYPE, TEST_KERNEL);
446
447 ET_FORALL_FLOAT_TYPES(TEST_ENTRY);
448 }
449
TEST_F(OpToDimOrderCopyTest,MismatchedSizesDie)450 TEST_F(OpToDimOrderCopyTest, MismatchedSizesDie) {
451 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
452 GTEST_SKIP() << "ATen kernel can handle mismatched sizes";
453 }
454 TensorFactory<ScalarType::Int> tf;
455 Tensor input = tf.make(/*sizes=*/{3, 1, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6});
456 Tensor out = tf.zeros({3, 2, 1, 1});
457 std::vector<int64_t> dim_order_vec;
458 for (int64_t i = 0; i < input.dim(); i++) {
459 dim_order_vec.push_back(i);
460 }
461 ArrayRef<int64_t> dim_order(dim_order_vec.data(), dim_order_vec.size());
462
463 ET_EXPECT_KERNEL_FAILURE(
464 context_,
465 op__to_dim_order_copy_out(
466 /*self=*/input,
467 /*non_blocking=*/false,
468 dim_order,
469 out));
470 }
471
472 // Only contiguous memory is supported, the memory type MemoryFormat::Contiguous
473 // should not be allowed. The function is expected death if using the illegal
474 // memory format.
TEST_F(OpToDimOrderCopyTest,MismatchedMemoryFormatDies)475 TEST_F(OpToDimOrderCopyTest, MismatchedMemoryFormatDies) {
476 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
477 GTEST_SKIP() << "ATen kernel can handle non contiguous memory formats";
478 }
479 TensorFactory<ScalarType::Float> tf_in;
480 TensorFactory<ScalarType::Float> tf_out;
481 Tensor input =
482 tf_in.make(/*sizes=*/{3, 1, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6});
483 Tensor out = tf_out.zeros({3, 1, 1, 2});
484
485 std::vector<int64_t> dim_order_vec;
486 for (int64_t i = 0; i < input.dim(); i++) {
487 dim_order_vec.push_back(i);
488 }
489
490 // mutate dim_order_vec to create a illegal one.
491 dim_order_vec[1] = 3;
492 dim_order_vec[3] = 1;
493 ArrayRef<int64_t> dim_order(dim_order_vec.data(), dim_order_vec.size());
494
495 ET_EXPECT_KERNEL_FAILURE(
496 context_,
497 op__to_dim_order_copy_out(
498 /*self=*/input,
499 /*non_blocking=*/false,
500 dim_order,
501 out));
502 }
503
504 // Only blocking data transfer supported
TEST_F(OpToDimOrderCopyTest,MismatchedBlockingDie)505 TEST_F(OpToDimOrderCopyTest, MismatchedBlockingDie) {
506 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
507 GTEST_SKIP() << "ATen kernel can handle non blocking data transfer";
508 }
509 TensorFactory<ScalarType::Int> tf;
510 Tensor input = tf.make(/*sizes=*/{3, 1, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6});
511 Tensor out = tf.zeros(/*sizes=*/{3, 1, 1, 2});
512
513 std::vector<int64_t> dim_order_vec;
514 for (int64_t i = 0; i < input.dim(); i++) {
515 dim_order_vec.push_back(i);
516 }
517 ArrayRef<int64_t> dim_order(dim_order_vec.data(), dim_order_vec.size());
518
519 ET_EXPECT_KERNEL_FAILURE(
520 context_,
521 op__to_dim_order_copy_out(
522 /*self=*/input,
523 /*non_blocking=*/true,
524 dim_order,
525 out));
526 }
527
TEST_F(OpToDimOrderCopyTest,DynamicShapeUpperBoundSameAsExpected)528 TEST_F(OpToDimOrderCopyTest, DynamicShapeUpperBoundSameAsExpected) {
529 test_dynamic_shape(
530 {2, 3}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
531 }
532
TEST_F(OpToDimOrderCopyTest,DynamicShapeUpperBoundLargerThanExpected)533 TEST_F(OpToDimOrderCopyTest, DynamicShapeUpperBoundLargerThanExpected) {
534 test_dynamic_shape(
535 {10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
536 }
537
TEST_F(OpToDimOrderCopyTest,DynamicShapeUnbound)538 TEST_F(OpToDimOrderCopyTest, DynamicShapeUnbound) {
539 if (!torch::executor::testing::SupportedFeatures::get()->output_resize) {
540 GTEST_SKIP() << "Dynamic shape unbound not supported";
541 }
542 test_dynamic_shape(
543 {1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
544 }
545
TEST_F(OpToDimOrderCopyTest,ContiguousToChannelsLast)546 TEST_F(OpToDimOrderCopyTest, ContiguousToChannelsLast) {
547 TensorFactory<ScalarType::Float> tf;
548
549 Tensor x = tf.make_with_dimorder(
550 {3, 5, 2, 2},
551 {0.2432, 0.5248, 0.5361, 0.8513, 0.8184, 0.8206, 0.7357, 0.9655, 0.6138,
552 0.1112, 0.2799, 0.1079, 0.9680, 0.2548, 0.0393, 0.6002, 0.2257, 0.8766,
553 0.2715, 0.1595, 0.2029, 0.7026, 0.6982, 0.8529, 0.4405, 0.6560, 0.9217,
554 0.6372, 0.2446, 0.6590, 0.3866, 0.7185, 0.4439, 0.5346, 0.3179, 0.4492,
555 0.3491, 0.6970, 0.8456, 0.2516, 0.2345, 0.2924, 0.7695, 0.0911, 0.8530,
556 0.8560, 0.6909, 0.7719, 0.8923, 0.5546, 0.6978, 0.8151, 0.3007, 0.3961,
557 0.8416, 0.4296, 0.7203, 0.8963, 0.3597, 0.5552});
558
559 Tensor out = tf.full_channels_last({3, 5, 2, 2}, 0.0);
560 Tensor expected = tf.make_with_dimorder(
561 {3, 5, 2, 2},
562 {0.2432, 0.8184, 0.6138, 0.9680, 0.2257, 0.5248, 0.8206, 0.1112, 0.2548,
563 0.8766, 0.5361, 0.7357, 0.2799, 0.0393, 0.2715, 0.8513, 0.9655, 0.1079,
564 0.6002, 0.1595, 0.2029, 0.4405, 0.2446, 0.4439, 0.3491, 0.7026, 0.6560,
565 0.6590, 0.5346, 0.6970, 0.6982, 0.9217, 0.3866, 0.3179, 0.8456, 0.8529,
566 0.6372, 0.7185, 0.4492, 0.2516, 0.2345, 0.8530, 0.8923, 0.3007, 0.7203,
567 0.2924, 0.8560, 0.5546, 0.3961, 0.8963, 0.7695, 0.6909, 0.6978, 0.8416,
568 0.3597, 0.0911, 0.7719, 0.8151, 0.4296, 0.5552},
569 /*dim_order=*/{0, 2, 3, 1});
570
571 std::vector<int64_t> dim_order_vec = {0, 2, 3, 1};
572 exec_aten::ArrayRef<int64_t> dim_order(
573 dim_order_vec.data(), dim_order_vec.size());
574 Tensor ret = op__to_dim_order_copy_out(
575 /*self*/ x, /*non_blocking*/ false, /*dim_order*/ dim_order, out);
576
577 EXPECT_TENSOR_EQ(out, expected);
578 EXPECT_TENSOR_EQ(ret, expected);
579 }
580
TEST_F(OpToDimOrderCopyTest,ChannelsLastToContiguous)581 TEST_F(OpToDimOrderCopyTest, ChannelsLastToContiguous) {
582 TensorFactory<ScalarType::Float> tf;
583
584 Tensor out = tf.full({3, 5, 2, 2}, 0.0);
585 Tensor x = tf.make_with_dimorder(
586 {3, 5, 2, 2},
587 {0.2432, 0.8184, 0.6138, 0.9680, 0.2257, 0.5248, 0.8206, 0.1112, 0.2548,
588 0.8766, 0.5361, 0.7357, 0.2799, 0.0393, 0.2715, 0.8513, 0.9655, 0.1079,
589 0.6002, 0.1595, 0.2029, 0.4405, 0.2446, 0.4439, 0.3491, 0.7026, 0.6560,
590 0.6590, 0.5346, 0.6970, 0.6982, 0.9217, 0.3866, 0.3179, 0.8456, 0.8529,
591 0.6372, 0.7185, 0.4492, 0.2516, 0.2345, 0.8530, 0.8923, 0.3007, 0.7203,
592 0.2924, 0.8560, 0.5546, 0.3961, 0.8963, 0.7695, 0.6909, 0.6978, 0.8416,
593 0.3597, 0.0911, 0.7719, 0.8151, 0.4296, 0.5552},
594 /*dim_order=*/{0, 2, 3, 1});
595
596 Tensor expected = tf.make_with_dimorder(
597 {3, 5, 2, 2},
598 {0.2432, 0.5248, 0.5361, 0.8513, 0.8184, 0.8206, 0.7357, 0.9655, 0.6138,
599 0.1112, 0.2799, 0.1079, 0.9680, 0.2548, 0.0393, 0.6002, 0.2257, 0.8766,
600 0.2715, 0.1595, 0.2029, 0.7026, 0.6982, 0.8529, 0.4405, 0.6560, 0.9217,
601 0.6372, 0.2446, 0.6590, 0.3866, 0.7185, 0.4439, 0.5346, 0.3179, 0.4492,
602 0.3491, 0.6970, 0.8456, 0.2516, 0.2345, 0.2924, 0.7695, 0.0911, 0.8530,
603 0.8560, 0.6909, 0.7719, 0.8923, 0.5546, 0.6978, 0.8151, 0.3007, 0.3961,
604 0.8416, 0.4296, 0.7203, 0.8963, 0.3597, 0.5552});
605
606 std::vector<int64_t> dim_order_vec = {0, 1, 2, 3};
607 exec_aten::ArrayRef<int64_t> dim_order(
608 dim_order_vec.data(), dim_order_vec.size());
609 Tensor ret = op__to_dim_order_copy_out(
610 /*self*/ x, /*non_blocking*/ false, /*dim_order*/ dim_order, out);
611
612 EXPECT_TENSOR_EQ(out, expected);
613 EXPECT_TENSOR_EQ(ret, expected);
614 }
615
TEST_F(OpToDimOrderCopyTest,PreserveChanneslLast)616 TEST_F(OpToDimOrderCopyTest, PreserveChanneslLast) {
617 TensorFactory<ScalarType::Float> tf;
618
619 Tensor out = tf.full_channels_last({3, 5, 2, 2}, 0.0);
620 Tensor x = tf.make_with_dimorder(
621 {3, 5, 2, 2},
622 {0.2432, 0.8184, 0.6138, 0.9680, 0.2257, 0.5248, 0.8206, 0.1112, 0.2548,
623 0.8766, 0.5361, 0.7357, 0.2799, 0.0393, 0.2715, 0.8513, 0.9655, 0.1079,
624 0.6002, 0.1595, 0.2029, 0.4405, 0.2446, 0.4439, 0.3491, 0.7026, 0.6560,
625 0.6590, 0.5346, 0.6970, 0.6982, 0.9217, 0.3866, 0.3179, 0.8456, 0.8529,
626 0.6372, 0.7185, 0.4492, 0.2516, 0.2345, 0.8530, 0.8923, 0.3007, 0.7203,
627 0.2924, 0.8560, 0.5546, 0.3961, 0.8963, 0.7695, 0.6909, 0.6978, 0.8416,
628 0.3597, 0.0911, 0.7719, 0.8151, 0.4296, 0.5552},
629 /*dim_order=*/{0, 2, 3, 1});
630
631 Tensor expected = tf.make_with_dimorder(
632 {3, 5, 2, 2},
633 {0.2432, 0.8184, 0.6138, 0.9680, 0.2257, 0.5248, 0.8206, 0.1112, 0.2548,
634 0.8766, 0.5361, 0.7357, 0.2799, 0.0393, 0.2715, 0.8513, 0.9655, 0.1079,
635 0.6002, 0.1595, 0.2029, 0.4405, 0.2446, 0.4439, 0.3491, 0.7026, 0.6560,
636 0.6590, 0.5346, 0.6970, 0.6982, 0.9217, 0.3866, 0.3179, 0.8456, 0.8529,
637 0.6372, 0.7185, 0.4492, 0.2516, 0.2345, 0.8530, 0.8923, 0.3007, 0.7203,
638 0.2924, 0.8560, 0.5546, 0.3961, 0.8963, 0.7695, 0.6909, 0.6978, 0.8416,
639 0.3597, 0.0911, 0.7719, 0.8151, 0.4296, 0.5552},
640 /*dim_order=*/{0, 2, 3, 1});
641
642 Tensor ret = op__to_dim_order_copy_out(
643 /*self*/ x,
644 /*non_blocking*/ false,
645 /*dim_order*/ exec_aten::nullopt,
646 out);
647
648 EXPECT_TENSOR_EQ(out, expected);
649 EXPECT_TENSOR_EQ(ret, expected);
650 }
651