1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10 #include <executorch/kernels/test/TestUtil.h>
11 #include <executorch/kernels/test/supported_features.h>
12 #include <executorch/runtime/core/exec_aten/exec_aten.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15
16 #include <gtest/gtest.h>
17 #include <cmath>
18 #include <cstdint>
19
20 using namespace ::testing;
21 using exec_aten::ArrayRef;
22 using exec_aten::optional;
23 using exec_aten::Scalar;
24 using exec_aten::ScalarType;
25 using exec_aten::Tensor;
26 using torch::executor::testing::TensorFactory;
27
28 class OpAsStridedCopyOutTest : public OperatorTest {
29 protected:
op_as_strided_copy_out(const Tensor & self,ArrayRef<int64_t> size,ArrayRef<int64_t> stride,optional<int64_t> storage_offset,Tensor & out)30 Tensor& op_as_strided_copy_out(
31 const Tensor& self,
32 ArrayRef<int64_t> size,
33 ArrayRef<int64_t> stride,
34 optional<int64_t> storage_offset,
35 Tensor& out) {
36 return torch::executor::aten::as_strided_copy_outf(
37 context_, self, size, stride, storage_offset, out);
38 }
39
40 // Common testing for eq operator
41 template <ScalarType DTYPE>
test_detach_copy_out()42 void test_detach_copy_out() {
43 TensorFactory<DTYPE> tf;
44 const std::vector<int32_t> in_sizes = {3, 3};
45 const std::vector<int32_t> out_sizes = {2, 2, 2};
46
47 Tensor in = tf.make(in_sizes, {1, 2, 3, 4, 5, 6, 7, 8, 9});
48 Tensor out = tf.zeros(out_sizes);
49
50 // Valid input should give the expected output
51 optional<int64_t> storage_offset;
52 int64_t sizes[3] = {2, 2, 2};
53 int64_t stride[3] = {1, 2, 3};
54 op_as_strided_copy_out(
55 /*self=*/in,
56 /*size=*/ArrayRef<int64_t>{sizes, 3},
57 /*stride=*/ArrayRef<int64_t>{stride, 3},
58 storage_offset,
59 out);
60 EXPECT_TENSOR_EQ(out, tf.make(out_sizes, {1, 4, 3, 6, 2, 5, 4, 7}));
61
62 // With storage offset
63 op_as_strided_copy_out(
64 /*self=*/in,
65 /*size=*/ArrayRef<int64_t>{sizes, 3},
66 /*stride=*/ArrayRef<int64_t>{stride, 3},
67 /*storage_offset=*/2,
68 out);
69 EXPECT_TENSOR_EQ(out, tf.make(out_sizes, {3, 6, 5, 8, 4, 7, 6, 9}));
70 }
71
72 template <ScalarType DTYPE>
test_as_strided_copy_out_invalid_parameters()73 void test_as_strided_copy_out_invalid_parameters() {
74 TensorFactory<DTYPE> tf;
75
76 const std::vector<int32_t> in_sizes = {3, 3};
77 const std::vector<int32_t> out_sizes = {2, 2, 2};
78
79 Tensor in = tf.ones(in_sizes);
80 Tensor out = tf.zeros(out_sizes);
81 optional<int64_t> storage_offset;
82 int64_t sizes[3] = {2, 2, 2};
83 int64_t stride[3] = {1, 2, 3};
84
85 // Mismatch strides and shape should die
86 int64_t stride_short[2] = {1, 2};
87 ET_EXPECT_KERNEL_FAILURE(
88 context_,
89 op_as_strided_copy_out(
90 /*self=*/in,
91 /*size=*/ArrayRef<int64_t>{sizes, 3},
92 /*stride=*/ArrayRef<int64_t>{stride_short, 2},
93 storage_offset,
94 out));
95
96 // Negative strides should die
97 int64_t stride_negative[3] = {1, 2, -1};
98 ET_EXPECT_KERNEL_FAILURE(
99 context_,
100 op_as_strided_copy_out(
101 /*self=*/in,
102 /*size=*/ArrayRef<int64_t>{sizes, 3},
103 /*stride=*/ArrayRef<int64_t>{stride_negative, 3},
104 storage_offset,
105 out));
106
107 // Mismatch output tensor shape and size should die
108 int64_t size_invalid[3] = {2, 2, 1};
109 ET_EXPECT_KERNEL_FAILURE(
110 context_,
111 op_as_strided_copy_out(
112 /*self=*/in,
113 /*size=*/ArrayRef<int64_t>{size_invalid, 3},
114 /*stride=*/ArrayRef<int64_t>{stride, 3},
115 storage_offset,
116 out));
117
118 // Invalid storage offset should die
119 storage_offset = -1;
120 ET_EXPECT_KERNEL_FAILURE(
121 context_,
122 op_as_strided_copy_out(
123 /*self=*/in,
124 /*size=*/ArrayRef<int64_t>{sizes, 3},
125 /*stride=*/ArrayRef<int64_t>{stride, 3},
126 storage_offset,
127 out));
128
129 // Out of bound storage access of `in` should die
130 storage_offset = 3;
131 ET_EXPECT_KERNEL_FAILURE(
132 context_,
133 op_as_strided_copy_out(
134 /*self=*/in,
135 /*size=*/ArrayRef<int64_t>{sizes, 3},
136 /*stride=*/ArrayRef<int64_t>{stride, 3},
137 storage_offset,
138 out));
139 }
140 };
141
142 template <>
test_detach_copy_out()143 void OpAsStridedCopyOutTest::test_detach_copy_out<ScalarType::Bool>() {
144 TensorFactory<ScalarType::Bool> tf;
145 const std::vector<int32_t> in_sizes = {3, 3};
146 const std::vector<int32_t> out_sizes = {2, 2, 2};
147 Tensor in = tf.make(
148 in_sizes, {false, true, false, true, false, true, false, true, false});
149 Tensor out = tf.zeros(out_sizes);
150
151 // Valid input should give the expected output
152 optional<int64_t> storage_offset = 2;
153 int64_t sizes[3] = {2, 2, 2};
154 int64_t stride[3] = {1, 2, 3};
155 op_as_strided_copy_out(
156 /*self=*/in,
157 /*size=*/ArrayRef<int64_t>{sizes, 3},
158 /*stride=*/ArrayRef<int64_t>{stride, 3},
159 storage_offset,
160 out);
161 EXPECT_TENSOR_EQ(
162 out,
163 tf.make(out_sizes, {false, true, false, true, true, false, true, false}));
164 }
165
166 template <>
test_detach_copy_out()167 void OpAsStridedCopyOutTest::test_detach_copy_out<ScalarType::Float>() {
168 TensorFactory<ScalarType::Float> tf;
169 const std::vector<int32_t> in_sizes = {3, 3};
170 const std::vector<int32_t> out_sizes = {2, 2, 2};
171
172 Tensor in = tf.make(
173 in_sizes, {3.14, 2.33, 42, INFINITY, -INFINITY, NAN, -3.14, -2.33, -42});
174 Tensor out = tf.zeros(out_sizes);
175
176 // Valid input should give the expected output
177 optional<int64_t> storage_offset = 2;
178 int64_t sizes[3] = {2, 2, 2};
179 int64_t stride[3] = {1, 2, 3};
180 op_as_strided_copy_out(
181 /*self=*/in,
182 /*size=*/ArrayRef<int64_t>{sizes, 3},
183 /*stride=*/ArrayRef<int64_t>{stride, 3},
184 storage_offset,
185 out);
186 EXPECT_TENSOR_CLOSE(
187 out,
188 tf.make(
189 out_sizes,
190 {42.0, NAN, -INFINITY, 2.33, INFINITY, -3.14, NAN, -42.0}));
191 }
192
TEST_F(OpAsStridedCopyOutTest,AllScalarInputOutputSupport)193 TEST_F(OpAsStridedCopyOutTest, AllScalarInputOutputSupport) {
194 #define TEST_ENTRY(ctype, dtype) test_detach_copy_out<ScalarType::dtype>();
195 ET_FORALL_INT_TYPES(TEST_ENTRY);
196 #undef TEST_ENTRY
197 }
198
TEST_F(OpAsStridedCopyOutTest,InvalidParametersDies)199 TEST_F(OpAsStridedCopyOutTest, InvalidParametersDies) {
200 if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
201 GTEST_SKIP() << "ATen kernel can handle invalid parameter";
202 }
203 #define TEST_ENTRY(ctype, dtype) \
204 test_as_strided_copy_out_invalid_parameters<ScalarType::dtype>();
205 ET_FORALL_REAL_TYPES_AND(Bool, TEST_ENTRY);
206 #undef TEST_ENTRY
207 }
208
TEST_F(OpAsStridedCopyOutTest,MismatchedInputDtypesDies)209 TEST_F(OpAsStridedCopyOutTest, MismatchedInputDtypesDies) {
210 TensorFactory<ScalarType::Byte> tf_byte;
211 TensorFactory<ScalarType::Char> tf_char;
212 const std::vector<int32_t> in_sizes = {3, 3};
213 const std::vector<int32_t> out_sizes = {2, 2, 2};
214
215 Tensor in = tf_byte.make(in_sizes, {1, 2, 3, 4, 5, 6, 7, 8, 9});
216 Tensor out = tf_char.zeros(out_sizes);
217 optional<int64_t> storage_offset;
218 int64_t sizes[3] = {2, 2, 2};
219 int64_t stride[3] = {1, 2, 3};
220
221 ET_EXPECT_KERNEL_FAILURE(
222 context_,
223 op_as_strided_copy_out(
224 /*self=*/in,
225 /*size=*/ArrayRef<int64_t>{sizes, 3},
226 /*stride=*/ArrayRef<int64_t>{stride, 3},
227 storage_offset,
228 out));
229 }
230
231 /* %python
232 import torch
233 torch.manual_seed(0)
234 x = torch.rand(3, 3)
235 res = torch.as_strided(x, (2, 2, 2), (1, 2, 3))
236 op = "op_as_strided_copy_out"
237 opt_setup_params = f"""
238 {declare_array_ref([2, 2, 2], "int64_t", "size")}
239 {declare_array_ref([1, 2, 3], "int64_t", "stride")}
240 optional<int64_t> storage_offset;
241 """
242 opt_extra_params = "size, stride, storage_offset,"
243 dtype = "ScalarType::Float"
244 check = "EXPECT_TENSOR_EQ" */
245
TEST_F(OpAsStridedCopyOutTest,DynamicShapeUpperBoundSameAsExpected)246 TEST_F(OpAsStridedCopyOutTest, DynamicShapeUpperBoundSameAsExpected) {
247 /* %python
248 out_args = "{2, 2, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND"
249 %rewrite(unary_op) */
250
251 TensorFactory<ScalarType::Float> tf;
252
253 Tensor x = tf.make(
254 {3, 3},
255 {0.49625658988952637,
256 0.7682217955589294,
257 0.08847743272781372,
258 0.13203048706054688,
259 0.30742281675338745,
260 0.6340786814689636,
261 0.4900934100151062,
262 0.8964447379112244,
263 0.455627977848053});
264 Tensor expected = tf.make(
265 {2, 2, 2},
266 {0.49625658988952637,
267 0.13203048706054688,
268 0.08847743272781372,
269 0.6340786814689636,
270 0.7682217955589294,
271 0.30742281675338745,
272 0.13203048706054688,
273 0.4900934100151062});
274
275 std::vector<int64_t> sizev = {2, 2, 2};
276 ArrayRef<int64_t> size(sizev.data(), sizev.size());
277 std::vector<int64_t> stridev = {1, 2, 3};
278 ArrayRef<int64_t> stride(stridev.data(), stridev.size());
279 optional<int64_t> storage_offset;
280
281 Tensor out =
282 tf.zeros({2, 2, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
283 op_as_strided_copy_out(x, size, stride, storage_offset, out);
284 EXPECT_TENSOR_EQ(out, expected);
285 }
286
TEST_F(OpAsStridedCopyOutTest,DynamicShapeUpperBoundLargerThanExpected)287 TEST_F(OpAsStridedCopyOutTest, DynamicShapeUpperBoundLargerThanExpected) {
288 /* %python
289 out_args = "{5, 5, 5}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND"
290 %rewrite(unary_op) */
291
292 TensorFactory<ScalarType::Float> tf;
293
294 Tensor x = tf.make(
295 {3, 3},
296 {0.49625658988952637,
297 0.7682217955589294,
298 0.08847743272781372,
299 0.13203048706054688,
300 0.30742281675338745,
301 0.6340786814689636,
302 0.4900934100151062,
303 0.8964447379112244,
304 0.455627977848053});
305 Tensor expected = tf.make(
306 {2, 2, 2},
307 {0.49625658988952637,
308 0.13203048706054688,
309 0.08847743272781372,
310 0.6340786814689636,
311 0.7682217955589294,
312 0.30742281675338745,
313 0.13203048706054688,
314 0.4900934100151062});
315
316 std::vector<int64_t> sizev = {2, 2, 2};
317 ArrayRef<int64_t> size(sizev.data(), sizev.size());
318 std::vector<int64_t> stridev = {1, 2, 3};
319 ArrayRef<int64_t> stride(stridev.data(), stridev.size());
320 optional<int64_t> storage_offset;
321
322 Tensor out =
323 tf.zeros({5, 5, 5}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
324 op_as_strided_copy_out(x, size, stride, storage_offset, out);
325 EXPECT_TENSOR_EQ(out, expected);
326 }
327
TEST_F(OpAsStridedCopyOutTest,DynamicShapeUnbound)328 TEST_F(OpAsStridedCopyOutTest, DynamicShapeUnbound) {
329 if (!torch::executor::testing::SupportedFeatures::get()->output_resize) {
330 GTEST_SKIP() << "Dynamic shape unbound not supported";
331 }
332 /* %python
333 out_args = "{1, 1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND"
334 %rewrite(unary_op) */
335
336 TensorFactory<ScalarType::Float> tf;
337
338 Tensor x = tf.make(
339 {3, 3},
340 {0.49625658988952637,
341 0.7682217955589294,
342 0.08847743272781372,
343 0.13203048706054688,
344 0.30742281675338745,
345 0.6340786814689636,
346 0.4900934100151062,
347 0.8964447379112244,
348 0.455627977848053});
349 Tensor expected = tf.make(
350 {2, 2, 2},
351 {0.49625658988952637,
352 0.13203048706054688,
353 0.08847743272781372,
354 0.6340786814689636,
355 0.7682217955589294,
356 0.30742281675338745,
357 0.13203048706054688,
358 0.4900934100151062});
359
360 std::vector<int64_t> sizev = {2, 2, 2};
361 ArrayRef<int64_t> size(sizev.data(), sizev.size());
362 std::vector<int64_t> stridev = {1, 2, 3};
363 ArrayRef<int64_t> stride(stridev.data(), stridev.size());
364 optional<int64_t> storage_offset;
365
366 Tensor out = tf.zeros(
367 {1, 1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
368 op_as_strided_copy_out(x, size, stride, storage_offset, out);
369 EXPECT_TENSOR_EQ(out, expected);
370 }
371