1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10 #include <executorch/kernels/test/TestUtil.h>
11 #include <executorch/kernels/test/supported_features.h>
12 #include <executorch/runtime/core/exec_aten/exec_aten.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15
16 #include <gtest/gtest.h>
17
18 using namespace ::testing;
19 using exec_aten::MemoryFormat;
20 using exec_aten::optional;
21 using exec_aten::ScalarType;
22 using exec_aten::Tensor;
23 using torch::executor::testing::TensorFactory;
24
25 class OpCloneTest : public OperatorTest {
26 protected:
op_clone_out(const Tensor & self,optional<MemoryFormat> memory_format,Tensor & out)27 Tensor& op_clone_out(
28 const Tensor& self,
29 optional<MemoryFormat> memory_format,
30 Tensor& out) {
31 return torch::executor::aten::clone_outf(
32 context_, self, memory_format, out);
33 }
34
35 // test if clone.out works well under all kinds of legal input type.
36 template <class CTYPE, exec_aten::ScalarType DTYPE>
test_dtype()37 void test_dtype() {
38 TensorFactory<DTYPE> tf;
39 Tensor input = tf.make(/*sizes=*/{2, 4}, /*data=*/{2, 3, 2, 4, 1, 5, 1, 6});
40 Tensor out_nullopt = tf.zeros(/*sizes=*/{2, 4});
41 Tensor out_contiguous = tf.zeros(/*sizes=*/{2, 4});
42
43 // we only support contiguous memory, the memory type shall be either
44 // nullopt or MemoryFormat::Contiguous.
45 Tensor out_nullopt_ret = op_clone_out(
46 /*self=*/input,
47 /*memory_format=*/exec_aten::nullopt,
48 /*out=*/out_nullopt);
49 Tensor out_contiguous_ret = op_clone_out(
50 /*self=*/input,
51 /*memory_format=*/exec_aten::MemoryFormat::Contiguous,
52 /*out=*/out_contiguous);
53
54 // The original tensor a should share same value with the out variable and
55 // return variable of clone function
56 EXPECT_TENSOR_EQ(input, out_nullopt);
57 EXPECT_TENSOR_EQ(input, out_nullopt_ret);
58
59 EXPECT_TENSOR_EQ(input, out_contiguous);
60 EXPECT_TENSOR_EQ(input, out_contiguous_ret);
61 }
62
63 template <class CTYPE, ScalarType DTYPE>
test_empty_input()64 void test_empty_input() {
65 TensorFactory<DTYPE> tf;
66 Tensor input = tf.make(/*sizes=*/{3, 0, 1, 2}, /*data=*/{});
67 Tensor out = tf.zeros({3, 0, 1, 2});
68 op_clone_out(input, /*memory_format=*/exec_aten::nullopt, out);
69 // check a and out share same value, but are different object
70 EXPECT_TENSOR_EQ(input, out);
71 }
72 };
73
74 // regular test for clone.out
TEST_F(OpCloneTest,AllDtypesSupported)75 TEST_F(OpCloneTest, AllDtypesSupported) {
76 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
77 GTEST_SKIP() << "ATen kernel test fails";
78 }
79 #define TEST_ENTRY(ctype, dtype) test_dtype<ctype, ScalarType::dtype>();
80 ET_FORALL_REAL_TYPES_AND(Bool, TEST_ENTRY);
81 #undef TEST_ENTRY
82 }
83
TEST_F(OpCloneTest,EmptyInputSupported)84 TEST_F(OpCloneTest, EmptyInputSupported) {
85 #define TEST_ENTRY(ctype, dtype) test_empty_input<ctype, ScalarType::dtype>();
86 ET_FORALL_REAL_TYPES_AND(Bool, TEST_ENTRY);
87 #undef TEST_ENTRY
88 }
89
TEST_F(OpCloneTest,MismatchedSizesDie)90 TEST_F(OpCloneTest, MismatchedSizesDie) {
91 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
92 GTEST_SKIP() << "ATen kernel can handle mismatched sizes";
93 }
94 TensorFactory<ScalarType::Int> tf;
95 Tensor input = tf.make(/*sizes=*/{3, 1, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6});
96 Tensor out = tf.zeros({3, 2, 1, 1});
97 ET_EXPECT_KERNEL_FAILURE(
98 context_, op_clone_out(input, /*memory_format=*/exec_aten::nullopt, out));
99 }
100
TEST_F(OpCloneTest,MismatchedTypesDie)101 TEST_F(OpCloneTest, MismatchedTypesDie) {
102 TensorFactory<ScalarType::Int> tf_in;
103 TensorFactory<ScalarType::Float> tf_out;
104 Tensor input =
105 tf_in.make(/*sizes=*/{3, 1, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6});
106 Tensor out = tf_out.zeros({3, 1, 1, 2});
107 ET_EXPECT_KERNEL_FAILURE(
108 context_, op_clone_out(input, /*memory_format=*/exec_aten::nullopt, out));
109 }
110
111 // Only contiguous memory is supported, the memory type other than nullopt or
112 // MemoryFormat::Contiguous should not be allowed. The function is expected
113 // depth if using the illegal memory format.
TEST_F(OpCloneTest,MismatchedMemoryFormatDie)114 TEST_F(OpCloneTest, MismatchedMemoryFormatDie) {
115 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
116 GTEST_SKIP() << "ATen kernel can handle non contiguous memory formats";
117 }
118 TensorFactory<ScalarType::Float> tf_in;
119 TensorFactory<ScalarType::Float> tf_out;
120 Tensor input =
121 tf_in.make(/*sizes=*/{3, 1, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6});
122 Tensor out = tf_out.zeros({3, 1, 1, 2});
123 ET_EXPECT_KERNEL_FAILURE(
124 context_,
125 op_clone_out(input, static_cast<exec_aten::MemoryFormat>(55), out));
126 }
127
TEST_F(OpCloneTest,SimpleGeneratedCase)128 TEST_F(OpCloneTest, SimpleGeneratedCase) {
129 TensorFactory<ScalarType::Float> tf;
130
131 Tensor x = tf.make(
132 {10, 10},
133 {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
134 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
135 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
136 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
137 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
138 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
139 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
140 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0});
141 Tensor expected_result = tf.make(
142 {10, 10},
143 {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
144 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
145 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
146 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
147 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
148 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
149 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
150 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0});
151
152 Tensor out = tf.zeros({10, 10});
153 Tensor ret = op_clone_out(x, exec_aten::MemoryFormat::Contiguous, out);
154 EXPECT_TENSOR_CLOSE(out, expected_result);
155 }
156
TEST_F(OpCloneTest,DynamicShapeUpperBoundSameAsExpected)157 TEST_F(OpCloneTest, DynamicShapeUpperBoundSameAsExpected) {
158 TensorFactory<ScalarType::Float> tf;
159
160 Tensor x = tf.make(
161 {3, 2},
162 {0.04876953363418579,
163 0.816348671913147,
164 0.44230276346206665,
165 0.2767965793609619,
166 0.8998266458511353,
167 0.09595239162445068});
168 Tensor expected_result = tf.make(
169 {3, 2},
170 {0.04876953363418579,
171 0.816348671913147,
172 0.44230276346206665,
173 0.2767965793609619,
174 0.8998266458511353,
175 0.09595239162445068});
176
177 Tensor out =
178 tf.zeros({3, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
179 Tensor ret = op_clone_out(x, exec_aten::MemoryFormat::Contiguous, out);
180 EXPECT_TENSOR_CLOSE(out, expected_result);
181 }
182
TEST_F(OpCloneTest,DynamicShapeUpperBoundLargerThanExpected)183 TEST_F(OpCloneTest, DynamicShapeUpperBoundLargerThanExpected) {
184 TensorFactory<ScalarType::Float> tf;
185
186 Tensor x = tf.make(
187 {3, 2},
188 {0.04876953363418579,
189 0.816348671913147,
190 0.44230276346206665,
191 0.2767965793609619,
192 0.8998266458511353,
193 0.09595239162445068});
194 Tensor expected_result = tf.make(
195 {3, 2},
196 {0.04876953363418579,
197 0.816348671913147,
198 0.44230276346206665,
199 0.2767965793609619,
200 0.8998266458511353,
201 0.09595239162445068});
202
203 Tensor out =
204 tf.zeros({10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
205 Tensor ret = op_clone_out(x, exec_aten::MemoryFormat::Contiguous, out);
206 EXPECT_TENSOR_CLOSE(out, expected_result);
207 }
208
TEST_F(OpCloneTest,DynamicShapeUnbound)209 TEST_F(OpCloneTest, DynamicShapeUnbound) {
210 GTEST_SKIP() << "Dynamic shape unbound not supported";
211 TensorFactory<ScalarType::Float> tf;
212
213 Tensor x = tf.make(
214 {3, 2},
215 {0.04876953363418579,
216 0.816348671913147,
217 0.44230276346206665,
218 0.2767965793609619,
219 0.8998266458511353,
220 0.09595239162445068});
221 Tensor expected_result = tf.make(
222 {3, 2},
223 {0.04876953363418579,
224 0.816348671913147,
225 0.44230276346206665,
226 0.2767965793609619,
227 0.8998266458511353,
228 0.09595239162445068});
229
230 Tensor out =
231 tf.zeros({1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
232 Tensor ret = op_clone_out(x, exec_aten::MemoryFormat::Contiguous, out);
233 EXPECT_TENSOR_CLOSE(out, expected_result);
234 }
235