xref: /aosp_15_r20/external/executorch/kernels/test/op_copy_test.cpp (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1 /*
2  * Copyright (c) Meta Platforms, Inc. and affiliates.
3  * All rights reserved.
4  *
5  * This source code is licensed under the BSD-style license found in the
6  * LICENSE file in the root directory of this source tree.
7  */
8 
9 #include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10 #include <executorch/kernels/test/TestUtil.h>
11 #include <executorch/kernels/test/supported_features.h>
12 #include <executorch/runtime/core/exec_aten/exec_aten.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15 
16 #include <gtest/gtest.h>
17 
18 using namespace ::testing;
19 using exec_aten::MemoryFormat;
20 using exec_aten::optional;
21 using exec_aten::ScalarType;
22 using exec_aten::Tensor;
23 using torch::executor::testing::TensorFactory;
24 
25 class OpCopyTest : public OperatorTest {
26  protected:
op_copy_out(const Tensor & self,const Tensor & src,bool non_blocking,Tensor & out)27   Tensor& op_copy_out(
28       const Tensor& self,
29       const Tensor& src,
30       bool non_blocking,
31       Tensor& out) {
32     return torch::executor::aten::copy_outf(
33         context_, self, src, non_blocking, out);
34   }
35 
36   // test if copy.out works well under all kinds of legal input type.
37   template <class CTYPE, exec_aten::ScalarType DTYPE>
test_dtype()38   void test_dtype() {
39     TensorFactory<DTYPE> tf;
40     Tensor self = tf.make(/*sizes=*/{2, 4}, /*data=*/{2, 3, 2, 4, 1, 5, 1, 6});
41     Tensor src = tf.make(/*sizes=*/{2, 4}, /*data=*/{2, 3, 2, 4, 1, 5, 1, 6});
42     bool non_blocking = false;
43     Tensor out_nullopt = tf.zeros(/*sizes=*/{2, 4});
44     Tensor out_contiguous = tf.zeros(/*sizes=*/{2, 4});
45 
46     // we only support contiguous memory, the memory type shall be either
47     // nullopt or MemoryFormat::Contiguous.
48     Tensor out_nullopt_ret = op_copy_out(
49         /*self=*/self,
50         /*src=*/src,
51         /*non_blocking=*/non_blocking,
52         /*out=*/out_nullopt);
53     Tensor out_contiguous_ret = op_copy_out(
54         /*self=*/self,
55         /*src=*/src,
56         /*non_blocking=*/non_blocking,
57         /*out=*/out_contiguous);
58 
59     // The original tensor a should share same value with the out variable and
60     // return variable of copy function
61     EXPECT_TENSOR_EQ(src, out_nullopt);
62     EXPECT_TENSOR_EQ(src, out_nullopt_ret);
63 
64     EXPECT_TENSOR_EQ(src, out_contiguous);
65     EXPECT_TENSOR_EQ(src, out_contiguous_ret);
66   }
67 
68   template <class CTYPE, ScalarType DTYPE>
test_empty_input()69   void test_empty_input() {
70     TensorFactory<DTYPE> tf;
71     Tensor self = tf.make(/*sizes=*/{3, 0, 1, 2}, /*data=*/{});
72     Tensor src = tf.make(/*sizes=*/{3, 0, 1, 2}, /*data=*/{});
73     bool non_blocking = false;
74     Tensor out = tf.zeros({3, 0, 1, 2});
75     op_copy_out(self, src, non_blocking, out);
76     // check a and out share same value, but are different object
77     EXPECT_TENSOR_EQ(src, out);
78   }
79 
80   /* %python
81   import torch
82   torch.manual_seed(0)
83   self = torch.randint(10, (3, 4))
84   src = torch.randint(10, (3, 4))
85   non_blocking = False
86   expected = src
87   out_args = "out_shape, dynamism"
88 
89   copy_template = f"""
90     {declare_tensor_factory("ScalarType::Int", "tf")}
91 
92     {declare_tensor_make_t("self", "tf")}
93     {declare_tensor_make_t("src", "tf")}
94     {declare_tensor_make_t("expected", "tf")}
95     {declare_tensor_zeros("out_shape, dynamism", "tf", "out")}
96 
97     op_copy_out(self, src, $non_blocking$, out);
98     EXPECT_TENSOR_EQ(out, expected);""" */
99 
test_dynamic_shape(const std::vector<int32_t> & out_shape,enum torch::executor::TensorShapeDynamism dynamism)100   void test_dynamic_shape(
101       const std::vector<int32_t>& out_shape,
102       enum torch::executor::TensorShapeDynamism dynamism) {
103     /* %python
104     %rewrite(copy_template) */
105 
106     TensorFactory<ScalarType::Int> tf;
107 
108     Tensor self = tf.make({3, 4}, {4, 9, 3, 0, 3, 9, 7, 3, 7, 3, 1, 6});
109     Tensor src = tf.make({3, 4}, {6, 9, 8, 6, 6, 8, 4, 3, 6, 9, 1, 4});
110     Tensor expected = tf.make({3, 4}, {6, 9, 8, 6, 6, 8, 4, 3, 6, 9, 1, 4});
111     Tensor out = tf.zeros(out_shape, dynamism);
112 
113     op_copy_out(self, src, false, out);
114     EXPECT_TENSOR_EQ(out, expected);
115   }
116 };
117 
118 class OpCopyInplaceTest : public OperatorTest {
119  protected:
op_copy_(Tensor & self,const Tensor & src,bool non_blocking)120   Tensor& op_copy_(Tensor& self, const Tensor& src, bool non_blocking) {
121     return torch::executor::aten::copy_(context_, self, src, non_blocking);
122   }
123 };
124 
125 // regular test for copy.out
TEST_F(OpCopyTest,AllRealDtypesSupported)126 TEST_F(OpCopyTest, AllRealDtypesSupported) {
127 #define TEST_ENTRY(ctype, dtype) test_dtype<ctype, ScalarType::dtype>();
128   ET_FORALL_REALHBF16_TYPES(TEST_ENTRY);
129 #undef TEST_ENTRY
130 }
131 
TEST_F(OpCopyTest,EmptyInputSupported)132 TEST_F(OpCopyTest, EmptyInputSupported) {
133 #define TEST_ENTRY(ctype, dtype) test_empty_input<ctype, ScalarType::dtype>();
134   ET_FORALL_REALHBF16_TYPES(TEST_ENTRY);
135 #undef TEST_ENTRY
136 }
137 
TEST_F(OpCopyTest,BroadCastSrcSupported)138 TEST_F(OpCopyTest, BroadCastSrcSupported) {
139   TensorFactory<ScalarType::Int> tf;
140   Tensor self = tf.make(/*sizes=*/{2, 2}, /*data=*/{1, 2, 3, 4});
141   Tensor src = tf.make(/*sizes=*/{1, 2}, /*data=*/{3, 3});
142   bool non_blocking = false;
143   Tensor out = tf.zeros({2, 2});
144   op_copy_out(self, src, non_blocking, out);
145   Tensor out_expected = tf.make(/*sizes=*/{2, 2}, /*data=*/{3, 3, 3, 3});
146   EXPECT_TENSOR_EQ(out, out_expected);
147 }
148 
TEST_F(OpCopyTest,BroadCastSrcMissingDimSupported)149 TEST_F(OpCopyTest, BroadCastSrcMissingDimSupported) {
150   TensorFactory<ScalarType::Int> tf;
151   Tensor self = tf.make(/*sizes=*/{2, 2}, /*data=*/{1, 2, 3, 4});
152   Tensor src = tf.make(/*sizes=*/{1, 2}, /*data=*/{3, 3});
153   bool non_blocking = false;
154   Tensor out = tf.zeros({2, 2});
155   op_copy_out(self, src, non_blocking, out);
156   Tensor out_expected = tf.make(/*sizes=*/{2, 2}, /*data=*/{3, 3, 3, 3});
157   EXPECT_TENSOR_EQ(out, out_expected);
158 }
159 
TEST_F(OpCopyTest,BroadCastSelfcSupportedDie)160 TEST_F(OpCopyTest, BroadCastSelfcSupportedDie) {
161   TensorFactory<ScalarType::Int> tf;
162   Tensor self = tf.make(/*sizes=*/{1, 2}, /*data=*/{3, 3});
163   Tensor src = tf.make(/*sizes=*/{2, 2}, /*data=*/{1, 2, 3, 4});
164   bool non_blocking = false;
165   Tensor out = tf.zeros({2, 2});
166   ET_EXPECT_KERNEL_FAILURE(context_, op_copy_out(self, src, non_blocking, out));
167 }
168 
TEST_F(OpCopyTest,MismatchSelfSrcTypeSupported)169 TEST_F(OpCopyTest, MismatchSelfSrcTypeSupported) {
170   TensorFactory<ScalarType::Int> tf_self;
171   TensorFactory<ScalarType::Float> tf_src;
172   Tensor self =
173       tf_self.make(/*sizes=*/{3, 1, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6});
174   Tensor src = tf_src.make(/*sizes=*/{3, 1, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6});
175   Tensor out = tf_src.zeros({3, 0, 1, 2});
176   bool non_blocking = false;
177   ET_EXPECT_KERNEL_FAILURE(context_, op_copy_out(self, src, non_blocking, out));
178 }
179 
180 #ifndef USE_ATEN_LIB
TEST_F(OpCopyTest,ResizeOutSupported)181 TEST_F(OpCopyTest, ResizeOutSupported) {
182   TensorFactory<ScalarType::Int> tf;
183   Tensor self = tf.make(/*sizes=*/{3, 1, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6});
184   Tensor src = tf.make(/*sizes=*/{3, 1, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6});
185   Tensor out = tf.zeros(
186       {4, 2, 2, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
187   bool non_blocking = false;
188   op_copy_out(self, src, non_blocking, out);
189   Tensor out_expected =
190       tf.make(/*sizes=*/{3, 1, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6});
191   EXPECT_TENSOR_EQ(out, out_expected);
192 }
193 
TEST_F(OpCopyTest,ResizeOutDie)194 TEST_F(OpCopyTest, ResizeOutDie) {
195   TensorFactory<ScalarType::Int> tf_self;
196   TensorFactory<ScalarType::Float> tf_src;
197   Tensor self =
198       tf_self.make(/*sizes=*/{3, 1, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6});
199   Tensor src = tf_src.make(/*sizes=*/{3, 1, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6});
200   Tensor out = tf_src.zeros(
201       {3, 2, 0}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
202   bool non_blocking = false;
203   ET_EXPECT_KERNEL_FAILURE(context_, op_copy_out(self, src, non_blocking, out));
204 }
205 #endif
206 
TEST_F(OpCopyTest,MismatchedSizesDie)207 TEST_F(OpCopyTest, MismatchedSizesDie) {
208   if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
209     GTEST_SKIP() << "ATen kernel can handle mismatched sizes";
210   }
211   TensorFactory<ScalarType::Int> tf;
212   Tensor self = tf.make(/*sizes=*/{3, 1, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6});
213   Tensor src = tf.make(/*sizes=*/{3, 1, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6});
214   bool non_blocking = false;
215   Tensor out = tf.zeros({3, 2, 1, 1});
216   ET_EXPECT_KERNEL_FAILURE(context_, op_copy_out(self, src, non_blocking, out));
217 }
218 
TEST_F(OpCopyTest,MismatchedSrcOutTypesDie)219 TEST_F(OpCopyTest, MismatchedSrcOutTypesDie) {
220   TensorFactory<ScalarType::Int> tf_in;
221   TensorFactory<ScalarType::Float> tf_out;
222   Tensor self = tf_in.make(/*sizes=*/{3, 1, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6});
223   Tensor src = tf_in.make(/*sizes=*/{3, 1, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6});
224   bool non_blocking = false;
225   Tensor out = tf_out.zeros({3, 1, 1, 2});
226   ET_EXPECT_KERNEL_FAILURE(context_, op_copy_out(self, src, non_blocking, out));
227 }
228 
229 // Only contiguous memory is supported, the memory type other than nullopt or
230 // MemoryFormat::Contiguous should not be allowed. The function is expected
231 // depth if using the illegal memory format.
TEST_F(OpCopyTest,BlockingDie)232 TEST_F(OpCopyTest, BlockingDie) {
233   if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
234     GTEST_SKIP() << "ATen kernel can handle non-contiguous memory formats";
235   }
236   TensorFactory<ScalarType::Float> tf_in;
237   TensorFactory<ScalarType::Float> tf_out;
238   Tensor self = tf_in.make(/*sizes=*/{3, 1, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6});
239   Tensor src = tf_in.make(/*sizes=*/{3, 1, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6});
240   bool non_blocking = true;
241   Tensor out = tf_out.zeros({3, 1, 1, 2});
242   ET_EXPECT_KERNEL_FAILURE(context_, op_copy_out(self, src, non_blocking, out));
243 }
244 
TEST_F(OpCopyTest,DynamicShapeUpperBoundSameAsExpected)245 TEST_F(OpCopyTest, DynamicShapeUpperBoundSameAsExpected) {
246   test_dynamic_shape(
247       {3, 4}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
248 }
249 
TEST_F(OpCopyTest,DynamicShapeUpperBoundLargerThanExpected)250 TEST_F(OpCopyTest, DynamicShapeUpperBoundLargerThanExpected) {
251   if (!torch::executor::testing::SupportedFeatures::get()->output_resize) {
252     GTEST_SKIP() << "Dynamic shape not supported";
253   }
254   test_dynamic_shape(
255       {10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
256 }
257 
TEST_F(OpCopyTest,DynamicShapeUnbound)258 TEST_F(OpCopyTest, DynamicShapeUnbound) {
259   if (!torch::executor::testing::SupportedFeatures::get()->output_resize) {
260     GTEST_SKIP() << "Dynamic shape not supported";
261   }
262   test_dynamic_shape(
263       {1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
264 }
265 
TEST_F(OpCopyInplaceTest,SmokeTest)266 TEST_F(OpCopyInplaceTest, SmokeTest) {
267   TensorFactory<ScalarType::Int> tf;
268   Tensor in = tf.zeros({2, 2});
269   Tensor src = tf.make(/*sizes=*/{2, 2}, /*data=*/{1, 2, 3, 4});
270   bool non_blocking = false;
271   op_copy_(in, src, non_blocking);
272   Tensor expected = tf.make(/*sizes=*/{2, 2}, /*data=*/{1, 2, 3, 4});
273   EXPECT_TENSOR_EQ(in, expected);
274 }
275 
TEST_F(OpCopyInplaceTest,BroadCastSrcSupported)276 TEST_F(OpCopyInplaceTest, BroadCastSrcSupported) {
277   TensorFactory<ScalarType::Int> tf;
278   Tensor in = tf.make(/*sizes=*/{2, 2}, /*data=*/{1, 2, 3, 4});
279   Tensor src = tf.make(/*sizes=*/{1, 2}, /*data=*/{3, 3});
280   bool non_blocking = false;
281   op_copy_(in, src, non_blocking);
282   Tensor expected = tf.make(/*sizes=*/{2, 2}, /*data=*/{3, 3, 3, 3});
283   EXPECT_TENSOR_EQ(in, expected);
284 }
285