xref: /aosp_15_r20/external/executorch/kernels/test/op_unsqueeze_copy_test.cpp (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1 /*
2  * Copyright (c) Meta Platforms, Inc. and affiliates.
3  * All rights reserved.
4  *
5  * This source code is licensed under the BSD-style license found in the
6  * LICENSE file in the root directory of this source tree.
7  */
8 
9 #include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10 #include <executorch/kernels/test/supported_features.h>
11 #include <executorch/runtime/core/exec_aten/exec_aten.h>
12 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
14 #include <executorch/runtime/core/exec_aten/util/tensor_util.h>
15 
16 #include <executorch/kernels/test/TestUtil.h>
17 
18 #include <gtest/gtest.h>
19 #include <cstdio>
20 
21 using namespace ::testing;
22 using exec_aten::ScalarType;
23 using exec_aten::Tensor;
24 using torch::executor::testing::TensorFactory;
25 
26 class OpUnsqueezeTest : public OperatorTest {
27  protected:
op_unsqueeze_copy_out(const Tensor & self,int64_t dim,Tensor & out)28   Tensor& op_unsqueeze_copy_out(const Tensor& self, int64_t dim, Tensor& out) {
29     return torch::executor::aten::unsqueeze_copy_outf(context_, self, dim, out);
30   }
31 
32   template <class CTYPE, ScalarType DTYPE>
run_unsqueeze_test_cases(const Tensor & input,const std::vector<int64_t> & dims)33   void run_unsqueeze_test_cases(
34       const Tensor& input,
35       const std::vector<int64_t>& dims) {
36     TensorFactory<DTYPE> tf;
37 
38     // DEBUG
39     et_pal_init();
40 
41     for (int64_t dim : dims) {
42       std::vector<int32_t> size_out = generate_size_out(input.sizes(), dim);
43       Tensor out = tf.ones(size_out);
44       Tensor ret = op_unsqueeze_copy_out(input, dim, out);
45 
46       // The following is just a check against itself.
47       EXPECT_TENSOR_EQ(out, ret);
48       EXPECT_TENSOR_DATA_EQ(input, out);
49     }
50   }
51 
52   // test if op_unsqueeze_copy_out works well under all kinds of legal input
53   // type.
54   template <class CTYPE, ScalarType DTYPE>
test_dtype()55   void test_dtype() {
56     TensorFactory<DTYPE> tf;
57     Tensor input = tf.make(/*sizes=*/{2, 4}, /*data=*/{0, 1, 1, 1, 0, 1, 0, 1});
58 
59     // All valid dims given the shape of the input
60     // Legal dim for unsqueeze should be in [-(input.dim()+1), input.dim()]
61     // Here input.dim == 2, so the range of legal dim for unsqueeze is [-3, 2]
62     std::vector<int64_t> dims = {-3, -2, -1, 0, 1, 2};
63 
64     run_unsqueeze_test_cases<CTYPE, DTYPE>(input, dims);
65   }
66 
67   template <class CTYPE, ScalarType DTYPE>
test_empty_input()68   void test_empty_input() {
69     TensorFactory<DTYPE> tf;
70     Tensor input = tf.make(/*sizes=*/{3, 0, 1, 2}, /*data=*/{});
71 
72     // All valid dims given the shape of the input
73     // Legal dim for unsqueeze should be in [-(input.dim()+1), input.dim()]
74     // Here input.dim == 4, so the range of legal dim for unsqueeze is [-5, 4]
75     std::vector<int64_t> dims = {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4};
76 
77     run_unsqueeze_test_cases<CTYPE, DTYPE>(input, dims);
78   }
79 
80   // generate size of output based on input size and dim to be unsqueezed on.
generate_size_out(const c10::IntArrayRef & size_in,int64_t dim)81   std::vector<int32_t> generate_size_out(
82 #ifdef USE_ATEN_LIB
83       const c10::IntArrayRef& size_in,
84 #else
85       const exec_aten::ArrayRef<int32_t>& size_in,
86 #endif
87       int64_t dim) {
88     std::vector<int32_t> size_out(size_in.size() + 1);
89 
90     // Support python-style negative indexing.
91     if (dim < 0) {
92       // Since we do not have out.dim() directly, calculate it from the input.
93       dim += size_in.size() + 1;
94     }
95     EXPECT_GE(dim, 0);
96     EXPECT_LT(dim, size_in.size() + 1);
97 
98     for (int32_t i = 0; i <= size_in.size(); i++) {
99       if (i < dim) {
100         size_out[i] = size_in[i];
101       } else if (i > dim) {
102         size_out[i] = size_in[i - 1];
103       } else { // i == dim
104         size_out[dim] = 1;
105       }
106     }
107 
108     return size_out;
109   }
110 };
111 
112 // regular test for op_unsqueeze_copy_out
TEST_F(OpUnsqueezeTest,AllDtypesSupported)113 TEST_F(OpUnsqueezeTest, AllDtypesSupported) {
114 #define TEST_ENTRY(ctype, dtype) test_dtype<ctype, ScalarType::dtype>();
115   ET_FORALL_REAL_TYPES_AND(Bool, TEST_ENTRY);
116 #undef TEST_ENTRY
117 }
118 
TEST_F(OpUnsqueezeTest,EmptyInputSupported)119 TEST_F(OpUnsqueezeTest, EmptyInputSupported) {
120 #define TEST_ENTRY(ctype, dtype) test_empty_input<ctype, ScalarType::dtype>();
121   ET_FORALL_REAL_TYPES_AND(Bool, TEST_ENTRY);
122 #undef TEST_ENTRY
123 }
124 
TEST_F(OpUnsqueezeTest,InputOutputMismatchedSizesDie)125 TEST_F(OpUnsqueezeTest, InputOutputMismatchedSizesDie) {
126   if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
127     GTEST_SKIP() << "ATen kernel can handle mismatched sizes";
128   }
129   TensorFactory<ScalarType::Int> tf;
130 
131   Tensor input = tf.make(/*sizes=*/{3, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6});
132   int64_t dim = 1;
133 
134   // unsqueese input on dim 1 should get tensor(3, 1, 1, 2)
135   Tensor out = tf.ones(/*sizes=*/{3, 1, 1, 1});
136   ET_EXPECT_KERNEL_FAILURE(context_, op_unsqueeze_copy_out(input, dim, out));
137   out = tf.ones(/*sizes=*/{3, 1, 1, 2, 1});
138   ET_EXPECT_KERNEL_FAILURE(context_, op_unsqueeze_copy_out(input, dim, out));
139 }
140 
TEST_F(OpUnsqueezeTest,DimOutputMismatchedSizesDie)141 TEST_F(OpUnsqueezeTest, DimOutputMismatchedSizesDie) {
142   if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
143     GTEST_SKIP() << "ATen kernel can handle mismatched sizes";
144   }
145   TensorFactory<ScalarType::Int> tf;
146   Tensor input = tf.make(/*sizes=*/{3, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6});
147   Tensor out = tf.ones(/*sizes=*/{3, 1, 2, 1});
148   int64_t dim = 2;
149 
150   // The size of output should be [3,1,1,2], not [3,1,2,1], since dim=2 not 3
151   ET_EXPECT_KERNEL_FAILURE(context_, op_unsqueeze_copy_out(input, dim, out));
152 }
153 
TEST_F(OpUnsqueezeTest,MismatchedTypesDie)154 TEST_F(OpUnsqueezeTest, MismatchedTypesDie) {
155   TensorFactory<ScalarType::Int> tf_in;
156   TensorFactory<ScalarType::Double> tf_out;
157   Tensor input = tf_in.make(/*sizes=*/{3, 1, 2}, /*data=*/{1, 2, 3, 4, 5, 6});
158   Tensor out = tf_out.ones(/*sizes=*/{3, 1, 2, 1});
159   int64_t dim = 3;
160 
161   ET_EXPECT_KERNEL_FAILURE(context_, op_unsqueeze_copy_out(input, dim, out));
162 }
163 
TEST_F(OpUnsqueezeTest,DimOutOfRangeDies)164 TEST_F(OpUnsqueezeTest, DimOutOfRangeDies) {
165   TensorFactory<ScalarType::Int> tf;
166   Tensor input = tf.make(/*sizes=*/{1, 1, 1}, /*data=*/{1});
167   Tensor out = tf.ones(/*sizes=*/{1, 1, 1, 1});
168 
169   // Legal dim for unsqueeze should be in [-(input.dim()+1), input.dim()]
170   // Here input.dim == 3, so the range of legal dim for unsqueeze is [-4, 3]
171   std::vector<int64_t> illegal_dims = {
172       -10, -9, -8, -7, -6, -5, 4, 5, 6, 7, 8, 9, 10};
173   std::vector<int64_t> legal_dims = {-4, -3, -2, -1, 0, 1, 2, 3};
174 
175   for (auto dim : legal_dims) {
176     op_unsqueeze_copy_out(input, dim, out);
177   }
178 
179   for (auto dim : illegal_dims) {
180     ET_LOG(Info, "Checking dim %ld", dim);
181     ET_EXPECT_KERNEL_FAILURE(context_, op_unsqueeze_copy_out(input, dim, out));
182   }
183 }
184 
185 #ifndef USE_ATEN_LIB
TEST_F(OpUnsqueezeTest,UpperBoundOutTensor)186 TEST_F(OpUnsqueezeTest, UpperBoundOutTensor) {
187   TensorFactory<ScalarType::Float> tf;
188   Tensor input = tf.make(/*sizes=*/{2, 4}, /*data=*/{0, 1, 1, 1, 0, 1, 0, 1});
189   Tensor out =
190       tf.zeros({3, 4, 6}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
191 
192   // All valid dims given the shape of the input
193   // Legal dim for unsqueeze should be in [-(input.dim()+1), input.dim()]
194   // Here input.dim == 2, so the range of legal dim for unsqueeze is [-3, 2]
195   Tensor ref_out =
196       tf.make(/*sizes=*/{1, 2, 4}, /*data=*/{0, 1, 1, 1, 0, 1, 0, 1});
197   op_unsqueeze_copy_out(input, -3, out);
198   EXPECT_TENSOR_EQ(out, ref_out);
199 
200   ref_out = tf.make(/*sizes=*/{2, 1, 4}, /*data=*/{0, 1, 1, 1, 0, 1, 0, 1});
201   op_unsqueeze_copy_out(input, -2, out);
202   EXPECT_TENSOR_EQ(out, ref_out);
203 
204   ref_out = tf.make(/*sizes=*/{2, 4, 1}, /*data=*/{0, 1, 1, 1, 0, 1, 0, 1});
205   op_unsqueeze_copy_out(input, -1, out);
206   EXPECT_TENSOR_EQ(out, ref_out);
207 
208   ref_out = tf.make(/*sizes=*/{1, 2, 4}, /*data=*/{0, 1, 1, 1, 0, 1, 0, 1});
209   op_unsqueeze_copy_out(input, 0, out);
210   EXPECT_TENSOR_EQ(out, ref_out);
211 
212   ref_out = tf.make(/*sizes=*/{2, 1, 4}, /*data=*/{0, 1, 1, 1, 0, 1, 0, 1});
213   op_unsqueeze_copy_out(input, 1, out);
214   EXPECT_TENSOR_EQ(out, ref_out);
215 
216   ref_out = tf.make(/*sizes=*/{2, 4, 1}, /*data=*/{0, 1, 1, 1, 0, 1, 0, 1});
217   op_unsqueeze_copy_out(input, 2, out);
218   EXPECT_TENSOR_EQ(out, ref_out);
219 }
220 #endif
221 
222 /* %python
223 import torch
224 torch.manual_seed(0)
225 x = torch.rand(2, 4)
226 res = torch.unsqueeze(x, 1)
227 op = "op_unsqueeze_copy_out"
228 opt_extra_params = "1,"
229 dtype = "ScalarType::Float"
230 check = "EXPECT_TENSOR_EQ" */
231 
TEST_F(OpUnsqueezeTest,DynamicShapeUpperBoundSameAsExpected)232 TEST_F(OpUnsqueezeTest, DynamicShapeUpperBoundSameAsExpected) {
233   /* %python
234   out_args = "{2, 1, 4}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND"
235   %rewrite(unary_op) */
236 
237   TensorFactory<ScalarType::Float> tf;
238 
239   Tensor x = tf.make(
240       {2, 4},
241       {0.49625658988952637,
242        0.7682217955589294,
243        0.08847743272781372,
244        0.13203048706054688,
245        0.30742281675338745,
246        0.6340786814689636,
247        0.4900934100151062,
248        0.8964447379112244});
249   Tensor expected = tf.make(
250       {2, 1, 4},
251       {0.49625658988952637,
252        0.7682217955589294,
253        0.08847743272781372,
254        0.13203048706054688,
255        0.30742281675338745,
256        0.6340786814689636,
257        0.4900934100151062,
258        0.8964447379112244});
259 
260   Tensor out =
261       tf.zeros({2, 1, 4}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
262   op_unsqueeze_copy_out(x, 1, out);
263   EXPECT_TENSOR_EQ(out, expected);
264 }
265 
TEST_F(OpUnsqueezeTest,DynamicShapeUpperBoundLargerThanExpected)266 TEST_F(OpUnsqueezeTest, DynamicShapeUpperBoundLargerThanExpected) {
267   if (!torch::executor::testing::SupportedFeatures::get()->output_resize) {
268     GTEST_SKIP() << "Dynamic shape not supported";
269   }
270   /* %python
271   out_args = "{5, 5, 5}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND"
272   %rewrite(unary_op) */
273 
274   TensorFactory<ScalarType::Float> tf;
275 
276   Tensor x = tf.make(
277       {2, 4},
278       {0.49625658988952637,
279        0.7682217955589294,
280        0.08847743272781372,
281        0.13203048706054688,
282        0.30742281675338745,
283        0.6340786814689636,
284        0.4900934100151062,
285        0.8964447379112244});
286   Tensor expected = tf.make(
287       {2, 1, 4},
288       {0.49625658988952637,
289        0.7682217955589294,
290        0.08847743272781372,
291        0.13203048706054688,
292        0.30742281675338745,
293        0.6340786814689636,
294        0.4900934100151062,
295        0.8964447379112244});
296 
297   Tensor out =
298       tf.zeros({5, 5, 5}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
299   op_unsqueeze_copy_out(x, 1, out);
300   EXPECT_TENSOR_EQ(out, expected);
301 }
302 
TEST_F(OpUnsqueezeTest,DynamicShapeUnbound)303 TEST_F(OpUnsqueezeTest, DynamicShapeUnbound) {
304   if (!torch::executor::testing::SupportedFeatures::get()->output_resize) {
305     GTEST_SKIP() << "Dynamic shape not supported";
306   }
307   /* %python
308   out_args = "{1, 1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND"
309   %rewrite(unary_op) */
310 
311   TensorFactory<ScalarType::Float> tf;
312 
313   Tensor x = tf.make(
314       {2, 4},
315       {0.49625658988952637,
316        0.7682217955589294,
317        0.08847743272781372,
318        0.13203048706054688,
319        0.30742281675338745,
320        0.6340786814689636,
321        0.4900934100151062,
322        0.8964447379112244});
323   Tensor expected = tf.make(
324       {2, 1, 4},
325       {0.49625658988952637,
326        0.7682217955589294,
327        0.08847743272781372,
328        0.13203048706054688,
329        0.30742281675338745,
330        0.6340786814689636,
331        0.4900934100151062,
332        0.8964447379112244});
333 
334   Tensor out = tf.zeros(
335       {1, 1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
336   op_unsqueeze_copy_out(x, 1, out);
337   EXPECT_TENSOR_EQ(out, expected);
338 }
339