1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10 #include <executorch/kernels/test/TestUtil.h>
11 #include <executorch/kernels/test/supported_features.h>
12 #include <executorch/runtime/core/exec_aten/exec_aten.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15 #include <executorch/runtime/core/exec_aten/util/tensor_util.h>
16
17 #include <gtest/gtest.h>
18
19 using namespace ::testing;
20 using exec_aten::IntArrayRef;
21 using exec_aten::ScalarType;
22 using exec_aten::Tensor;
23 using torch::executor::testing::TensorFactory;
24
25 class OpViewTest : public OperatorTest {
26 protected:
op_view_copy_out(const Tensor & self,IntArrayRef size,Tensor & out)27 Tensor& op_view_copy_out(const Tensor& self, IntArrayRef size, Tensor& out) {
28 return torch::executor::aten::view_copy_outf(context_, self, size, out);
29 }
30
31 template <class CTYPE, exec_aten::ScalarType DTYPE>
run_view_test_cases(const Tensor & input,const std::vector<std::vector<int32_t>> & out_shapes)32 void run_view_test_cases(
33 const Tensor& input,
34 const std::vector<std::vector<int32_t>>& out_shapes) {
35 TensorFactory<DTYPE> tf;
36 for (std::vector<int32_t> size : out_shapes) {
37 Tensor out = tf.ones(size);
38
39 // The interface of op_view_copy_out should use int64_t as int, while
40 // tensor size needs int32_t so we need to transfrom from int32_t to
41 // int64_t to pass the size to op_view_copy_out function
42 std::vector<int64_t> size_int64_t(size.size());
43 std::transform(
44 size.begin(), size.end(), size_int64_t.begin(), [](int32_t x) {
45 return (int64_t)x;
46 });
47
48 Tensor ret = op_view_copy_out(
49 input,
50 exec_aten::ArrayRef<int64_t>(
51 size_int64_t.data(), size_int64_t.size()),
52 out);
53 EXPECT_TENSOR_EQ(out, ret);
54 EXPECT_TENSOR_DATA_EQ(input, out);
55 }
56 }
57
58 // Test if op_view_copy_out works well under all kinds of legal input type.
59 template <class CTYPE, exec_aten::ScalarType DTYPE>
test_dtype()60 void test_dtype() {
61 TensorFactory<DTYPE> tf;
62 Tensor input = tf.make(/*sizes=*/{2, 4}, /*data=*/{0, 1, 1, 1, 0, 1, 0, 1});
63
64 // Differne kinds of output shape meet the requirement (have same numel as
65 // input)
66 std::vector<std::vector<int32_t>> out_shapes = {
67 {8},
68 {8, 1},
69 {1, 8},
70 {2, 4},
71 {4, 2},
72 {2, 2, 2},
73 {1, 2, 1, 2, 1, 2, 1},
74 };
75
76 run_view_test_cases<CTYPE, DTYPE>(input, out_shapes);
77 }
78
79 template <class CTYPE, ScalarType DTYPE>
test_empty_input()80 void test_empty_input() {
81 TensorFactory<DTYPE> tf;
82 Tensor input = tf.make(/*sizes=*/{3, 0, 1, 2}, /*data=*/{});
83 // Differnet kinds of output shape meet the requirement (have same numel as
84 // input)
85 std::vector<std::vector<int32_t>> out_shapes = {
86 {6, 0}, {6, 0, 0}, {3, 0, 1, 2}, {1, 0, 2, 3}};
87 run_view_test_cases<CTYPE, DTYPE>(input, out_shapes);
88 }
89
90 /* %python
91 import torch
92 torch.manual_seed(0)
93 x = torch.randint(10, (3, 4))
94 res = x.view(2, 6)
95 op = "op_view_copy_out"
96 opt_setup_params = """
97 int64_t size[] = {2, 6};
98 """
99 opt_extra_params = "size,"
100 out_args = "out_shape, dynamism"
101 dtype = "ScalarType::Int"
102 check = "EXPECT_TENSOR_EQ" */
103
test_dynamic_shape(const std::vector<int32_t> & out_shape,enum torch::executor::TensorShapeDynamism dynamism)104 void test_dynamic_shape(
105 const std::vector<int32_t>& out_shape,
106 enum torch::executor::TensorShapeDynamism dynamism) {
107 /* %python
108 %rewrite(unary_op) */
109
110 TensorFactory<ScalarType::Int> tf;
111
112 Tensor x = tf.make({3, 4}, {4, 9, 3, 0, 3, 9, 7, 3, 7, 3, 1, 6});
113 Tensor expected = tf.make({2, 6}, {4, 9, 3, 0, 3, 9, 7, 3, 7, 3, 1, 6});
114
115 int64_t size[] = {2, 6};
116
117 Tensor out = tf.zeros(out_shape, dynamism);
118 op_view_copy_out(x, size, out);
119 EXPECT_TENSOR_EQ(out, expected);
120 }
121 };
122
123 namespace {
vector_32_to_64(std::vector<int32_t> vector_32)124 std::vector<int64_t> vector_32_to_64(std::vector<int32_t> vector_32) {
125 std::vector<int64_t> vector_64(vector_32.size());
126 std::transform(
127 vector_32.begin(), vector_32.end(), vector_64.begin(), [](int32_t x) {
128 return (int64_t)x;
129 });
130 return vector_64;
131 }
132
133 } // namespace
134
135 // Regular test for op_view_copy_out.
TEST_F(OpViewTest,AllDtypesSupported)136 TEST_F(OpViewTest, AllDtypesSupported) {
137 #define TEST_ENTRY(ctype, dtype) test_dtype<ctype, ScalarType::dtype>();
138 ET_FORALL_REAL_TYPES_AND(Bool, TEST_ENTRY);
139 #undef TEST_ENTRY
140 }
141
TEST_F(OpViewTest,EmptyInputSupported)142 TEST_F(OpViewTest, EmptyInputSupported) {
143 #define TEST_ENTRY(ctype, dtype) test_empty_input<ctype, ScalarType::dtype>();
144 ET_FORALL_REAL_TYPES_AND(Bool, TEST_ENTRY);
145 #undef TEST_ENTRY
146 }
147
TEST_F(OpViewTest,InputOutputMismatchedSizesDie)148 TEST_F(OpViewTest, InputOutputMismatchedSizesDie) {
149 TensorFactory<ScalarType::Int> tf;
150 std::vector<int32_t> size_in = {3, 1, 1, 2};
151 std::vector<int32_t> size_out = {3, 2, 1, 2};
152
153 Tensor input = tf.make(size_in, /*data=*/{1, 2, 3, 4, 5, 6});
154 Tensor out = tf.ones(size_out);
155
156 // The interface of op_view_copy_out should use int64_t as int, while tensor
157 // size needs int32_t so we need to transfrom from int32_t to int64_t to pass
158 // the size to op_view_copy_out function
159 std::vector<int64_t> size_int64_t = vector_32_to_64(size_out);
160
161 // The numel of input and output tensor should be same
162 ET_EXPECT_KERNEL_FAILURE(
163 context_,
164 op_view_copy_out(
165 input,
166 exec_aten::ArrayRef<int64_t>(
167 size_int64_t.data(), size_int64_t.size()),
168 out));
169 }
170
TEST_F(OpViewTest,SizeOutputMismatchedSizesDie)171 TEST_F(OpViewTest, SizeOutputMismatchedSizesDie) {
172 TensorFactory<ScalarType::Int> tf;
173 std::vector<int32_t> size = {3, 1, 1, 2};
174 std::vector<int32_t> size_target = {3, 2, 1, 2};
175 Tensor input = tf.make(size, /*data=*/{1, 2, 3, 4, 5, 6});
176 Tensor out = tf.ones(size);
177
178 // The interface of op_view_copy_out should use int64_t as int, while tensor
179 // size needs int32_t. So we need to transfrom from int32_t to int64_t to pass
180 // the size to op_view_copy_out function
181 std::vector<int64_t> size_int64_t = vector_32_to_64(size_target);
182
183 // The target size and out.size() should be same
184 ET_EXPECT_KERNEL_FAILURE(
185 context_,
186 op_view_copy_out(
187 input,
188 exec_aten::ArrayRef<int64_t>(
189 size_int64_t.data(), size_int64_t.size()),
190 out));
191 }
192
TEST_F(OpViewTest,MismatchedTypesDie)193 TEST_F(OpViewTest, MismatchedTypesDie) {
194 TensorFactory<ScalarType::Int> tf_in;
195 TensorFactory<ScalarType::Float> tf_out;
196 std::vector<int32_t> size = {3, 1, 1, 2};
197
198 Tensor input = tf_in.make(size, /*data=*/{1, 2, 3, 4, 5, 6});
199 Tensor out = tf_out.ones(size);
200
201 // The interface of op_view_copy_out should use int64_t as int, while tensor
202 // size needs int32_t. So we need to transfrom from int32_t to int64_t to pass
203 // the size to op_view_copy_out function
204 std::vector<int64_t> size_int64_t = vector_32_to_64(size);
205
206 // DTYPE of input and output should be same.
207 ET_EXPECT_KERNEL_FAILURE(
208 context_,
209 op_view_copy_out(
210 input,
211 exec_aten::ArrayRef<int64_t>(
212 size_int64_t.data(), size_int64_t.size()),
213 out));
214 }
215
TEST_F(OpViewTest,SizeInfer)216 TEST_F(OpViewTest, SizeInfer) {
217 TensorFactory<ScalarType::Float> tf_in;
218 TensorFactory<ScalarType::Float> tf_out_valid, tf_out_invalid;
219 std::vector<int32_t> in_size = {2, 2, 2};
220 std::vector<int32_t> out_size_view = {4, 2};
221 std::vector<int32_t> out_size_valid = {-1, 2};
222 std::vector<int32_t> out_size_invalid = {-1, -1};
223
224 Tensor input = tf_in.make(in_size, /*data=*/{1, 2, 3, 4, 5, 6, 7, 8});
225 Tensor out = tf_out_valid.ones(out_size_view);
226
227 // The interface of op_view_copy_out should use int64_t as int, while tensor
228 // size needs int32_t. So we need to transfrom from int32_t to int64_t to pass
229 // the size to op_view_copy_out function
230 std::vector<int64_t> valid_size_int64_t = vector_32_to_64(out_size_valid);
231 std::vector<int64_t> invalid_size_int64_t = vector_32_to_64(out_size_invalid);
232
233 // Inferring one dimension is valid.
234 op_view_copy_out(
235 input,
236 exec_aten::ArrayRef<int64_t>(
237 valid_size_int64_t.data(), valid_size_int64_t.size()),
238 out);
239 EXPECT_TENSOR_DATA_EQ(input, out);
240 // Inferring two dimensions is invalid.
241 ET_EXPECT_KERNEL_FAILURE(
242 context_,
243 op_view_copy_out(
244 input,
245 exec_aten::ArrayRef<int64_t>(
246 invalid_size_int64_t.data(), invalid_size_int64_t.size()),
247 out));
248 }
249
250 #if !defined(USE_ATEN_LIB)
TEST_F(OpViewTest,UpperBoundOutTensor)251 TEST_F(OpViewTest, UpperBoundOutTensor) {
252 TensorFactory<ScalarType::Float> tf;
253 Tensor input = tf.make(/*sizes=*/{2, 4}, /*data=*/{0, 1, 1, 1, 0, 1, 0, 1});
254 Tensor output = tf.zeros(
255 /*sizes=*/{2, 2, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
256
257 std::vector<int32_t> size = {2, 2, 2};
258 Tensor ref_output = tf.make(size, /*data=*/{0, 1, 1, 1, 0, 1, 0, 1});
259 std::vector<int64_t> size_int64_t(size.size());
260 std::transform(size.begin(), size.end(), size_int64_t.begin(), [](int32_t x) {
261 return (int64_t)x;
262 });
263
264 op_view_copy_out(
265 input,
266 exec_aten::ArrayRef<int64_t>(size_int64_t.data(), size_int64_t.size()),
267 output);
268 EXPECT_TENSOR_EQ(ref_output, output);
269
270 output = tf.zeros(
271 /*sizes=*/{1, 4, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
272 size = std::vector<int32_t>({1, 4, 2});
273 ref_output = tf.make(size, /*data=*/{0, 1, 1, 1, 0, 1, 0, 1});
274 size_int64_t = std::vector<int64_t>(size.size());
275 std::transform(size.begin(), size.end(), size_int64_t.begin(), [](int32_t x) {
276 return (int64_t)x;
277 });
278 size_int64_t[1] = -1;
279
280 op_view_copy_out(
281 input,
282 exec_aten::ArrayRef<int64_t>(size_int64_t.data(), size_int64_t.size()),
283 output);
284 EXPECT_TENSOR_EQ(ref_output, output);
285 }
286 #endif
287
TEST_F(OpViewTest,DynamicShapeUpperBoundSameAsExpected)288 TEST_F(OpViewTest, DynamicShapeUpperBoundSameAsExpected) {
289 test_dynamic_shape(
290 {2, 6}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
291 }
292
TEST_F(OpViewTest,DynamicShapeUpperBoundLargerThanExpected)293 TEST_F(OpViewTest, DynamicShapeUpperBoundLargerThanExpected) {
294 if (!torch::executor::testing::SupportedFeatures::get()->output_resize) {
295 GTEST_SKIP() << "Dynamic shape not supported";
296 }
297 test_dynamic_shape(
298 {10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
299 }
300
TEST_F(OpViewTest,DynamicShapeUnbound)301 TEST_F(OpViewTest, DynamicShapeUnbound) {
302 if (!torch::executor::testing::SupportedFeatures::get()->output_resize) {
303 GTEST_SKIP() << "Dynamic shape not supported";
304 }
305 test_dynamic_shape(
306 {1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
307 }
308