xref: /aosp_15_r20/external/executorch/kernels/test/op_convolution_test.cpp (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1 /*
2  * Copyright (c) Meta Platforms, Inc. and affiliates.
3  * All rights reserved.
4  *
5  * This source code is licensed under the BSD-style license found in the
6  * LICENSE file in the root directory of this source tree.
7  */
8 
9 #include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10 #include <executorch/kernels/test/TestUtil.h>
11 #include <executorch/kernels/test/supported_features.h>
12 #include <executorch/runtime/core/exec_aten/exec_aten.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15 
16 #include <gtest/gtest.h>
17 
18 using namespace ::testing;
19 using exec_aten::ArrayRef;
20 using exec_aten::optional;
21 using exec_aten::ScalarType;
22 using exec_aten::Tensor;
23 using torch::executor::testing::TensorFactory;
24 
25 class OpConvOutTest : public OperatorTest {
26  protected:
op_convolution_out(const Tensor & input,const Tensor & weight,const optional<Tensor> & bias,ArrayRef<int64_t> stride,ArrayRef<int64_t> padding,ArrayRef<int64_t> dilation,bool transposed,ArrayRef<int64_t> output_padding,int64_t groups,Tensor & out)27   Tensor& op_convolution_out(
28       const Tensor& input,
29       const Tensor& weight,
30       const optional<Tensor>& bias,
31       ArrayRef<int64_t> stride,
32       ArrayRef<int64_t> padding,
33       ArrayRef<int64_t> dilation,
34       bool transposed,
35       ArrayRef<int64_t> output_padding,
36       int64_t groups,
37       Tensor& out) {
38     return torch::executor::aten::convolution_outf(
39         context_,
40         input,
41         weight,
42         bias,
43         stride,
44         padding,
45         dilation,
46         transposed,
47         output_padding,
48         groups,
49         out);
50   }
51 
52   /* Correctness Test Template for test code generation via Python */
53   /* %python
54   correctness_test_template = f"""
55     {declare_tensor_factory("ScalarType::$DTYPE$", "tf")}
56 
57     {declare_tensor_make_t("input", "tf")}
58     {declare_tensor_make_t("weight", "tf")}
59     {declare_optional_tensor_make_t("bias", "tf")}
60     {declare_tensor_make_t("expected", "tf")}
61     Tensor out = tf.zeros($out_size$, $dynamism$);
62 
63     {declare_array_ref_t("stride", "int64_t")}
64     {declare_array_ref_t("padding", "int64_t")}
65     {declare_array_ref_t("dilation", "int64_t")}
66     {declare_array_ref_t("output_padding", "int64_t")}
67 
68     op_convolution_out(
69         input,
70         weight,
71         bias,
72         stride,
73         padding,
74         dilation,
75         $transposed$,
76         output_padding,
77         $groups$,
78         out);
79     EXPECT_TENSOR_CLOSE(out, expected);"""
80   */
81 
82   /* %python
83   import torch
84   torch.manual_seed(0)
85   input = (torch.randint(10, 100, (1, 2, 5)).to(torch.double) / 10.0);
86   weight = (torch.randint(10, 100, (4, 2, 3)).to(torch.double) / 10.0);
87   bias = torch.ones(4).to(torch.double)
88   stride = [2]
89   padding = [0]
90   dilation = [1]
91   transposed = False
92   output_padding = [0]
93   groups = 1
94   expected = torch.nn.functional.conv1d(
95       input, weight, bias, stride, padding, dilation, groups)
96 
97   DTYPE = "Float"
98   out_size = "out_shape"
99   dynamism = "dynamism"
100   */
101 
test_dynamic_shape(const std::vector<int32_t> & out_shape,enum torch::executor::TensorShapeDynamism dynamism)102   void test_dynamic_shape(
103       const std::vector<int32_t>& out_shape,
104       enum torch::executor::TensorShapeDynamism dynamism) {
105     /* %python
106     %past-rewrite(correctness_test_template) */
107 
108     TensorFactory<ScalarType::Float> tf;
109 
110     Tensor input =
111         tf.make({1, 2, 5}, {5.4, 1.9, 9.3, 7.0, 5.3, 7.9, 1.7, 8.3, 4.7, 7.3});
112     Tensor weight =
113         tf.make({4, 2, 3}, {8.1, 6.6, 1.6, 4.9, 3.8, 6.6, 4.6, 2.8,
114                             2.4, 1.3, 3.6, 3.9, 8.1, 8.4, 5.4, 5.1,
115                             8.9, 9.9, 7.9, 1.0, 1.1, 8.2, 6.3, 7.0});
116     optional<Tensor> bias(tf.make({4}, {1.0, 1.0, 1.0, 1.0}));
117     Tensor expected = tf.make(
118         {1, 4, 2},
119         {172.11, 237.72, 102.24, 132.28, 248.51, 320.18, 189.38, 236.07});
120     Tensor out = tf.zeros(out_shape, dynamism);
121 
122     int64_t stride[] = {2};
123     int64_t padding[] = {0};
124     int64_t dilation[] = {1};
125     int64_t output_padding[] = {0};
126 
127     op_convolution_out(
128         input,
129         weight,
130         bias,
131         stride,
132         padding,
133         dilation,
134         false,
135         output_padding,
136         1,
137         out);
138     EXPECT_TENSOR_CLOSE(out, expected);
139   }
140 };
141 
142 class OpConvCorrectnessTest : public OpConvOutTest {};
143 
144 //
145 // Correctness Tests
146 //
147 
TEST_F(OpConvCorrectnessTest,GenericSmokeTest)148 TEST_F(OpConvCorrectnessTest, GenericSmokeTest) {
149   TensorFactory<ScalarType::Int> tf;
150 
151   auto input = tf.make({1, 2, 5}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
152   auto weight =
153       tf.make({4, 2, 3}, {0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11,
154                           12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23});
155   auto bias = tf.ones({4});
156   auto expected = tf.make({1, 4, 2}, {80, 110, 206, 308, 332, 506, 458, 704});
157   auto out = tf.zeros({1, 4, 2});
158 
159   int64_t stride[1] = {2};
160   int64_t padding[1] = {0};
161   int64_t dilation[1] = {1};
162   int64_t output_padding[1] = {0};
163 
164   op_convolution_out(
165       input,
166       weight,
167       exec_aten::optional<Tensor>(bias),
168       exec_aten::ArrayRef<int64_t>{stride, 1},
169       exec_aten::ArrayRef<int64_t>{padding, 1},
170       exec_aten::ArrayRef<int64_t>{dilation, 1},
171       false,
172       exec_aten::ArrayRef<int64_t>{output_padding, 1},
173       int64_t(1),
174       out);
175   EXPECT_TENSOR_CLOSE(out, expected);
176 }
177 
178 /* %python
179 import torch
180 torch.manual_seed(0)
181 input = (torch.randint(10, 100, (1, 2, 5)).to(torch.double) / 10.0);
182 weight = (torch.randint(10, 100, (4, 2, 3)).to(torch.double) / 10.0);
183 bias = torch.ones(4).to(torch.double)
184 stride = [2]
185 padding = [2]
186 dilation = [1]
187 transposed = False
188 output_padding = [0]
189 groups = 1
190 expected = torch.nn.functional.conv1d(
191     input, weight, bias, stride, padding, dilation, groups)
192 
193 DTYPE = "Float"
194 out_size = expected.size()
195 dynamism = "torch::executor::TensorShapeDynamism::STATIC"
196 */
TEST_F(OpConvCorrectnessTest,NonZeroPadding)197 TEST_F(OpConvCorrectnessTest, NonZeroPadding) {
198   /* %python
199   %past-rewrite(correctness_test_template) */
200 
201   TensorFactory<ScalarType::Float> tf;
202 
203   Tensor input =
204       tf.make({1, 2, 5}, {5.4, 1.9, 9.3, 7.0, 5.3, 7.9, 1.7, 8.3, 4.7, 7.3});
205   Tensor weight = tf.make(
206       {4, 2, 3}, {8.1, 6.6, 1.6, 4.9, 3.8, 6.6, 4.6, 2.8, 2.4, 1.3, 3.6, 3.9,
207                   8.1, 8.4, 5.4, 5.1, 8.9, 9.9, 7.9, 1.0, 1.1, 8.2, 6.3, 7.0});
208   optional<Tensor> bias(tf.make({4}, {1.0, 1.0, 1.0, 1.0}));
209   Tensor expected = tf.make(
210       {1, 4, 4},
211       {61.78,
212        172.11,
213        237.72,
214        79.7,
215        44.77,
216        102.24,
217        132.28,
218        34.87,
219        108.37,
220        248.51,
221        320.18,
222        81.16,
223        62.24,
224        189.38,
225        236.07,
226        102.73});
227   Tensor out =
228       tf.zeros({1, 4, 4}, torch::executor::TensorShapeDynamism::STATIC);
229 
230   int64_t stride[] = {2};
231   int64_t padding[] = {2};
232   int64_t dilation[] = {1};
233   int64_t output_padding[] = {0};
234 
235   op_convolution_out(
236       input,
237       weight,
238       bias,
239       stride,
240       padding,
241       dilation,
242       false,
243       output_padding,
244       1,
245       out);
246   EXPECT_TENSOR_CLOSE(out, expected);
247 }
248 
249 /* %python
250 import torch
251 torch.manual_seed(0)
252 input = (torch.randint(10, 100, (3, 2, 5)).to(torch.double) / 10.0);
253 weight = (torch.randint(10, 100, (4, 2, 3)).to(torch.double) / 10.0);
254 bias = torch.ones(4).to(torch.double)
255 stride = [2]
256 padding = [2]
257 dilation = [1]
258 transposed = False
259 output_padding = [0]
260 groups = 1
261 expected = torch.nn.functional.conv1d(
262     input, weight, bias, stride, padding, dilation, groups)
263 
264 DTYPE = "Float"
265 out_size = expected.size()
266 dynamism = "torch::executor::TensorShapeDynamism::STATIC"
267 */
TEST_F(OpConvCorrectnessTest,MultipleInputBatches)268 TEST_F(OpConvCorrectnessTest, MultipleInputBatches) {
269   /* %python
270   %past-rewrite(correctness_test_template) */
271 
272   TensorFactory<ScalarType::Float> tf;
273 
274   Tensor input =
275       tf.make({3, 2, 5}, {5.4, 1.9, 9.3, 7.0, 5.3, 7.9, 1.7, 8.3, 4.7, 7.3,
276                           8.1, 6.6, 1.6, 4.9, 3.8, 6.6, 4.6, 2.8, 2.4, 1.3,
277                           3.6, 3.9, 8.1, 8.4, 5.4, 5.1, 8.9, 9.9, 7.9, 1.0});
278   Tensor weight = tf.make(
279       {4, 2, 3}, {1.1, 8.2, 6.3, 7.0, 6.5, 2.5, 9.2, 9.9, 8.1, 9.8, 4.8, 1.3,
280                   2.6, 8.9, 1.1, 8.7, 2.3, 3.5, 4.2, 7.1, 5.0, 3.9, 3.3, 4.1});
281   optional<Tensor> bias(tf.make({4}, {1.0, 1.0, 1.0, 1.0}));
282   Tensor expected = tf.make(
283       {3, 4, 4}, {54.77, 168.21, 208.92, 57.93, 55.01, 241.19, 312.18, 121.3,
284                   34.59, 143.87, 201.88, 78.29, 60.39, 154.12, 194.07, 51.73,
285                   68.53, 157.21, 105.33, 14.28, 75.19, 244.22, 135.66, 48.70,
286                   33.01, 160.36, 87.38,  22.19, 68.56, 142.28, 85.68,  22.03,
287                   36.43, 206.27, 235.96, 13.94, 36.79, 243.91, 338.66, 60.48,
288                   22.81, 153.47, 210.56, 23.74, 39.91, 174.16, 190.44, 27.58});
289   Tensor out =
290       tf.zeros({3, 4, 4}, torch::executor::TensorShapeDynamism::STATIC);
291 
292   int64_t stride[] = {2};
293   int64_t padding[] = {2};
294   int64_t dilation[] = {1};
295   int64_t output_padding[] = {0};
296 
297   op_convolution_out(
298       input,
299       weight,
300       bias,
301       stride,
302       padding,
303       dilation,
304       false,
305       output_padding,
306       1,
307       out);
308   EXPECT_TENSOR_CLOSE(out, expected);
309 }
310 
311 /* %python
312 import torch
313 torch.manual_seed(0)
314 input = (torch.randint(10, 100, (1, 4, 8, 8)).to(torch.double) / 10.0);
315 weight = (torch.randint(10, 100, (2, 4, 3, 3)).to(torch.double) / 10.0);
316 bias = torch.ones(2).to(torch.double)
317 stride = [2, 2]
318 padding = [1, 1]
319 dilation = [1, 1]
320 transposed = False
321 output_padding = [0]
322 groups = 1
323 expected = torch.nn.functional.conv2d(
324     input, weight, bias, stride, padding, dilation, groups)
325 
326 DTYPE = "Float"
327 out_size = expected.size()
328 dynamism = "torch::executor::TensorShapeDynamism::STATIC"
329 */
330 TEST_F(OpConvCorrectnessTest, 2DSanityCheck) {
331   /* %python
332   %past-rewrite(correctness_test_template) */
333 
334   TensorFactory<ScalarType::Float> tf;
335 
336   Tensor input = tf.make(
337       {1, 4, 8, 8},
338       {5.4, 1.9, 9.3, 7.0, 5.3, 7.9, 1.7, 8.3, 4.7, 7.3, 8.1, 6.6, 1.6, 4.9,
339        3.8, 6.6, 4.6, 2.8, 2.4, 1.3, 3.6, 3.9, 8.1, 8.4, 5.4, 5.1, 8.9, 9.9,
340        7.9, 1.0, 1.1, 8.2, 6.3, 7.0, 6.5, 2.5, 9.2, 9.9, 8.1, 9.8, 4.8, 1.3,
341        2.6, 8.9, 1.1, 8.7, 2.3, 3.5, 4.2, 7.1, 5.0, 3.9, 3.3, 4.1, 8.1, 6.0,
342        3.3, 8.6, 6.6, 5.7, 5.9, 8.6, 7.3, 3.4, 9.5, 6.0, 6.8, 6.2, 1.8, 3.2,
343        2.7, 7.5, 7.0, 8.0, 2.8, 5.1, 4.9, 8.6, 1.1, 9.0, 4.2, 9.9, 2.4, 5.3,
344        4.9, 9.3, 2.9, 5.3, 8.9, 4.8, 9.5, 2.3, 9.2, 3.8, 6.5, 9.6, 2.6, 3.5,
345        2.7, 9.2, 1.5, 7.6, 5.6, 8.5, 5.4, 7.0, 8.8, 5.1, 2.7, 1.8, 7.5, 4.4,
346        2.4, 4.8, 1.4, 3.4, 8.9, 4.0, 4.7, 3.4, 2.5, 8.3, 8.3, 1.7, 2.3, 9.0,
347        2.9, 2.9, 5.3, 7.1, 3.8, 7.1, 1.7, 9.8, 2.4, 4.1, 6.0, 8.4, 4.0, 1.4,
348        7.9, 7.7, 4.0, 4.0, 9.1, 7.4, 4.9, 3.9, 3.5, 8.9, 2.2, 3.2, 8.2, 7.1,
349        5.4, 2.9, 8.1, 5.1, 3.0, 9.3, 2.0, 3.6, 8.7, 6.6, 9.9, 3.1, 7.6, 3.4,
350        4.1, 5.0, 8.5, 9.2, 7.5, 5.8, 6.1, 5.8, 4.1, 4.2, 9.8, 2.0, 7.3, 2.8,
351        7.9, 8.2, 9.7, 9.0, 4.8, 7.8, 6.6, 5.8, 4.5, 7.8, 4.6, 8.5, 7.2, 4.4,
352        1.2, 7.7, 2.2, 2.4, 2.9, 1.8, 2.5, 2.6, 3.4, 6.3, 9.3, 8.4, 3.0, 8.2,
353        1.5, 2.1, 3.2, 5.8, 5.2, 6.4, 1.8, 7.3, 7.6, 1.5, 2.8, 7.8, 9.0, 5.5,
354        4.1, 2.3, 3.0, 8.8, 7.1, 7.1, 9.1, 3.7, 6.2, 6.2, 2.2, 1.3, 4.3, 5.6,
355        8.7, 6.8, 5.0, 9.5, 5.0, 5.3, 5.5, 4.5, 3.3, 6.6, 6.2, 8.2, 5.5, 8.5,
356        2.9, 9.4, 8.3, 8.3});
357   Tensor weight = tf.make(
358       {2, 4, 3, 3},
359       {4.7, 1.3, 7.8, 3.0, 9.7, 2.5, 3.8, 5.2, 4.4, 7.7, 2.3, 6.2,
360        1.5, 9.5, 6.3, 4.9, 8.1, 9.8, 2.0, 6.6, 4.7, 2.4, 6.7, 5.6,
361        2.9, 1.3, 7.8, 5.4, 2.4, 6.9, 6.4, 1.4, 8.9, 7.9, 7.5, 6.7,
362        4.0, 8.3, 5.2, 4.0, 4.8, 7.6, 7.1, 5.9, 9.1, 9.6, 3.9, 6.8,
363        7.6, 2.5, 8.1, 7.3, 7.5, 7.5, 9.3, 5.6, 5.2, 4.7, 4.5, 8.7,
364        8.7, 1.3, 4.1, 4.5, 4.9, 6.5, 7.9, 4.6, 7.0, 8.0, 1.6, 3.5});
365   optional<Tensor> bias(tf.make({2}, {1.0, 1.0}));
366   Tensor expected = tf.make(
367       {1, 2, 4, 4},
368       {642.33, 714.6,   687.96,  717.12,  859.79, 939.27,  996.79,  1189.59,
369        700.73, 1083.28, 1010.33, 1167.78, 776.33, 1138.92, 1073.43, 1140.64,
370        539.83, 851.42,  754.16,  815.01,  822.66, 1191.95, 1063.46, 1330.28,
371        662.97, 1240.69, 1254.52, 1281.46, 766.25, 1273.41, 1148.57, 1217.47});
372   Tensor out =
373       tf.zeros({1, 2, 4, 4}, torch::executor::TensorShapeDynamism::STATIC);
374 
375   int64_t stride[] = {2, 2};
376   int64_t padding[] = {1, 1};
377   int64_t dilation[] = {1, 1};
378   int64_t output_padding[] = {0};
379 
380   op_convolution_out(
381       input,
382       weight,
383       bias,
384       stride,
385       padding,
386       dilation,
387       false,
388       output_padding,
389       1,
390       out);
391   EXPECT_TENSOR_CLOSE(out, expected);
392 }
393 
394 TEST_F(OpConvCorrectnessTest, 2DSanityCheckChannelsLast) {
395   /* %python
396   %past-rewrite(correctness_test_template) */
397 
398   TensorFactory<ScalarType::Float> tf;
399 
400   Tensor input = tf.make_channels_last(
401       {1, 4, 8, 8},
402       {5.4, 1.9, 9.3, 7.0, 5.3, 7.9, 1.7, 8.3, 4.7, 7.3, 8.1, 6.6, 1.6, 4.9,
403        3.8, 6.6, 4.6, 2.8, 2.4, 1.3, 3.6, 3.9, 8.1, 8.4, 5.4, 5.1, 8.9, 9.9,
404        7.9, 1.0, 1.1, 8.2, 6.3, 7.0, 6.5, 2.5, 9.2, 9.9, 8.1, 9.8, 4.8, 1.3,
405        2.6, 8.9, 1.1, 8.7, 2.3, 3.5, 4.2, 7.1, 5.0, 3.9, 3.3, 4.1, 8.1, 6.0,
406        3.3, 8.6, 6.6, 5.7, 5.9, 8.6, 7.3, 3.4, 9.5, 6.0, 6.8, 6.2, 1.8, 3.2,
407        2.7, 7.5, 7.0, 8.0, 2.8, 5.1, 4.9, 8.6, 1.1, 9.0, 4.2, 9.9, 2.4, 5.3,
408        4.9, 9.3, 2.9, 5.3, 8.9, 4.8, 9.5, 2.3, 9.2, 3.8, 6.5, 9.6, 2.6, 3.5,
409        2.7, 9.2, 1.5, 7.6, 5.6, 8.5, 5.4, 7.0, 8.8, 5.1, 2.7, 1.8, 7.5, 4.4,
410        2.4, 4.8, 1.4, 3.4, 8.9, 4.0, 4.7, 3.4, 2.5, 8.3, 8.3, 1.7, 2.3, 9.0,
411        2.9, 2.9, 5.3, 7.1, 3.8, 7.1, 1.7, 9.8, 2.4, 4.1, 6.0, 8.4, 4.0, 1.4,
412        7.9, 7.7, 4.0, 4.0, 9.1, 7.4, 4.9, 3.9, 3.5, 8.9, 2.2, 3.2, 8.2, 7.1,
413        5.4, 2.9, 8.1, 5.1, 3.0, 9.3, 2.0, 3.6, 8.7, 6.6, 9.9, 3.1, 7.6, 3.4,
414        4.1, 5.0, 8.5, 9.2, 7.5, 5.8, 6.1, 5.8, 4.1, 4.2, 9.8, 2.0, 7.3, 2.8,
415        7.9, 8.2, 9.7, 9.0, 4.8, 7.8, 6.6, 5.8, 4.5, 7.8, 4.6, 8.5, 7.2, 4.4,
416        1.2, 7.7, 2.2, 2.4, 2.9, 1.8, 2.5, 2.6, 3.4, 6.3, 9.3, 8.4, 3.0, 8.2,
417        1.5, 2.1, 3.2, 5.8, 5.2, 6.4, 1.8, 7.3, 7.6, 1.5, 2.8, 7.8, 9.0, 5.5,
418        4.1, 2.3, 3.0, 8.8, 7.1, 7.1, 9.1, 3.7, 6.2, 6.2, 2.2, 1.3, 4.3, 5.6,
419        8.7, 6.8, 5.0, 9.5, 5.0, 5.3, 5.5, 4.5, 3.3, 6.6, 6.2, 8.2, 5.5, 8.5,
420        2.9, 9.4, 8.3, 8.3});
421   Tensor weight = tf.make_channels_last(
422       {2, 4, 3, 3},
423       {4.7, 1.3, 7.8, 3.0, 9.7, 2.5, 3.8, 5.2, 4.4, 7.7, 2.3, 6.2,
424        1.5, 9.5, 6.3, 4.9, 8.1, 9.8, 2.0, 6.6, 4.7, 2.4, 6.7, 5.6,
425        2.9, 1.3, 7.8, 5.4, 2.4, 6.9, 6.4, 1.4, 8.9, 7.9, 7.5, 6.7,
426        4.0, 8.3, 5.2, 4.0, 4.8, 7.6, 7.1, 5.9, 9.1, 9.6, 3.9, 6.8,
427        7.6, 2.5, 8.1, 7.3, 7.5, 7.5, 9.3, 5.6, 5.2, 4.7, 4.5, 8.7,
428        8.7, 1.3, 4.1, 4.5, 4.9, 6.5, 7.9, 4.6, 7.0, 8.0, 1.6, 3.5});
429   optional<Tensor> bias(tf.make({2}, {1.0, 1.0}));
430   Tensor expected = tf.make_channels_last(
431       {1, 2, 4, 4},
432       {624.92, 656.07, 710.91,  800.45,  622.48,  596.14,  831.26,  882.43,
433        812.8,  947.49, 1069.65, 1155.81, 964.84,  1057.19, 1121.77, 1328.68,
434        748.23, 799.7,  1090.23, 1203.45, 1043.71, 1124.75, 1140.41, 1265.35,
435        688.62, 807.57, 1073.07, 1109.53, 1110,    1221.82, 1210.86, 1324.26});
436   Tensor out = tf.full_channels_last({1, 2, 4, 4}, 0);
437 
438   int64_t stride[] = {2, 2};
439   int64_t padding[] = {1, 1};
440   int64_t dilation[] = {1, 1};
441   int64_t output_padding[] = {0};
442 
443   op_convolution_out(
444       input,
445       weight,
446       bias,
447       stride,
448       padding,
449       dilation,
450       false,
451       output_padding,
452       1,
453       out);
454 
455   EXPECT_TENSOR_CLOSE(out, expected);
456 }
457 
TEST_F(OpConvOutTest,DynamicShapeUpperBoundSameAsExpected)458 TEST_F(OpConvOutTest, DynamicShapeUpperBoundSameAsExpected) {
459   test_dynamic_shape(
460       {1, 4, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
461 }
462 
TEST_F(OpConvOutTest,DynamicShapeUpperBoundLargerThanExpected)463 TEST_F(OpConvOutTest, DynamicShapeUpperBoundLargerThanExpected) {
464   test_dynamic_shape(
465       {10, 10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
466 }
467 
TEST_F(OpConvOutTest,DynamicShapeUnbound)468 TEST_F(OpConvOutTest, DynamicShapeUnbound) {
469   if (!torch::executor::testing::SupportedFeatures::get()->output_resize) {
470     GTEST_SKIP() << "Dynamic shape unbound not supported";
471   }
472   test_dynamic_shape(
473       {1, 1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
474 }
475 
TEST_F(OpConvCorrectnessTest,InvalidInputShape)476 TEST_F(OpConvCorrectnessTest, InvalidInputShape) {
477   TensorFactory<ScalarType::Float> tf;
478 
479   Tensor input = tf.ones({2, 4, 4, 5});
480   Tensor weight = tf.ones({8, 3, 2, 2});
481   optional<Tensor> bias;
482   Tensor out = tf.zeros({2, 8, 3, 4});
483 
484   int64_t stride[1] = {1};
485   int64_t padding[1] = {0};
486   int64_t dilation[1] = {1};
487   int64_t output_padding[1] = {0};
488   int64_t groups = 2;
489 
490   ET_EXPECT_KERNEL_FAILURE(
491       context_,
492       op_convolution_out(
493           input,
494           weight,
495           exec_aten::optional<Tensor>(bias),
496           exec_aten::ArrayRef<int64_t>{stride, 1},
497           exec_aten::ArrayRef<int64_t>{padding, 1},
498           exec_aten::ArrayRef<int64_t>{dilation, 1},
499           false,
500           exec_aten::ArrayRef<int64_t>{output_padding, 1},
501           groups,
502           out));
503 
504   ET_EXPECT_KERNEL_FAILURE(
505       context_,
506       op_convolution_out(
507           input,
508           weight,
509           exec_aten::optional<Tensor>(bias),
510           exec_aten::ArrayRef<int64_t>{stride, 1},
511           exec_aten::ArrayRef<int64_t>{padding, 1},
512           exec_aten::ArrayRef<int64_t>{dilation, 1},
513           true,
514           exec_aten::ArrayRef<int64_t>{output_padding, 1},
515           groups,
516           out));
517 }
518 
TEST_F(OpConvCorrectnessTest,TransposedDefaultParams)519 TEST_F(OpConvCorrectnessTest, TransposedDefaultParams) {
520   TensorFactory<ScalarType::Float> tf;
521 
522   Tensor input = tf.full({2, 4, 3, 2}, 2.0);
523   Tensor weight = tf.full({4, 1, 2, 2}, 0.5);
524   optional<Tensor> bias;
525   Tensor out = tf.full({2, 2, 4, 3}, 0.7);
526   Tensor expected =
527       tf.make({2, 2, 4, 3}, {2, 4, 2, 4, 8, 4, 4, 8, 4, 2, 4, 2, 2, 4, 2, 4,
528                              8, 4, 4, 8, 4, 2, 4, 2, 2, 4, 2, 4, 8, 4, 4, 8,
529                              4, 2, 4, 2, 2, 4, 2, 4, 8, 4, 4, 8, 4, 2, 4, 2});
530 
531   int64_t stride[1] = {1};
532   int64_t padding[1] = {0};
533   int64_t dilation[1] = {1};
534   bool transposed = true;
535   int64_t output_padding[1] = {0};
536   int64_t groups = 2;
537 
538   op_convolution_out(
539       input,
540       weight,
541       exec_aten::optional<Tensor>(bias),
542       exec_aten::ArrayRef<int64_t>{stride, 1},
543       exec_aten::ArrayRef<int64_t>{padding, 1},
544       exec_aten::ArrayRef<int64_t>{dilation, 1},
545       transposed,
546       exec_aten::ArrayRef<int64_t>{output_padding, 1},
547       groups,
548       out);
549 
550   EXPECT_TENSOR_CLOSE(out, expected);
551 }
552 
TEST_F(OpConvCorrectnessTest,TransposedNonDefaultParams)553 TEST_F(OpConvCorrectnessTest, TransposedNonDefaultParams) {
554   TensorFactory<ScalarType::Float> tf;
555 
556   Tensor input = tf.full({2, 6, 4, 5}, 2.0);
557   Tensor weight = tf.full({6, 1, 2, 2}, 0.5);
558   Tensor bias = tf.make({3}, {1, 2, 3});
559   Tensor out = tf.full({2, 3, 3, 6}, 0.7);
560   Tensor expected = tf.make(
561       {2, 3, 3, 6},
562       {1, 1, 1, 1, 1, 1, 1, 3, 3, 1, 3, 3, 1, 3, 3, 1, 3, 3, 2, 2, 2, 2,
563        2, 2, 2, 4, 4, 2, 4, 4, 2, 4, 4, 2, 4, 4, 3, 3, 3, 3, 3, 3, 3, 5,
564        5, 3, 5, 5, 3, 5, 5, 3, 5, 5, 1, 1, 1, 1, 1, 1, 1, 3, 3, 1, 3, 3,
565        1, 3, 3, 1, 3, 3, 2, 2, 2, 2, 2, 2, 2, 4, 4, 2, 4, 4, 2, 4, 4, 2,
566        4, 4, 3, 3, 3, 3, 3, 3, 3, 5, 5, 3, 5, 5, 3, 5, 5, 3, 5, 5});
567 
568   int64_t stride[1] = {3};
569   int64_t padding[1] = {7};
570   int64_t dilation[1] = {5};
571   bool transposed = true;
572   int64_t output_padding[1] = {2};
573   int64_t groups = 3;
574 
575   op_convolution_out(
576       input,
577       weight,
578       exec_aten::optional<Tensor>(bias),
579       exec_aten::ArrayRef<int64_t>{stride, 1},
580       exec_aten::ArrayRef<int64_t>{padding, 1},
581       exec_aten::ArrayRef<int64_t>{dilation, 1},
582       transposed,
583       exec_aten::ArrayRef<int64_t>{output_padding, 1},
584       groups,
585       out);
586 
587   EXPECT_TENSOR_CLOSE(out, expected);
588 }
589 
590 template <typename T>
get_channels_last_data(const Tensor & t)591 std::vector<T> get_channels_last_data(const Tensor& t) {
592   const std::vector<int32_t> sizes(t.sizes().begin(), t.sizes().end());
593   std::vector<T> contiguous_data(
594       t.const_data_ptr<T>(), t.const_data_ptr<T>() + t.numel());
595   std::vector<T> channels_last_data(t.numel());
596   int32_t N = sizes[0];
597   int32_t C = sizes[1];
598   int32_t H = sizes[2];
599   int32_t W = sizes[3];
600   for (int32_t n = 0; n < N; ++n) {
601     for (int32_t c = 0; c < C; ++c) {
602       for (int32_t h = 0; h < H; ++h) {
603         for (int32_t w = 0; w < W; ++w) {
604           // Calculate the index in the original blob
605           int32_t old_index = ((n * C + c) * H + h) * W + w;
606           // Calculate the index in the new blob
607           int32_t new_index = ((n * H + h) * W + w) * C + c;
608           // Copy the data
609           channels_last_data[new_index] = contiguous_data[old_index];
610         }
611       }
612     }
613   }
614   return channels_last_data;
615 }
616 
TEST_F(OpConvCorrectnessTest,TransposedDefaultParamsChannelsLast)617 TEST_F(OpConvCorrectnessTest, TransposedDefaultParamsChannelsLast) {
618   TensorFactory<ScalarType::Float> tf;
619 
620   Tensor input = tf.full_channels_last({2, 4, 3, 2}, 2.0);
621   Tensor weight = tf.full_channels_last({4, 1, 2, 2}, 0.5);
622   optional<Tensor> bias;
623   Tensor out = tf.full_channels_last({2, 2, 4, 3}, 0.7);
624   Tensor expected =
625       tf.make({2, 2, 4, 3}, {2, 4, 2, 4, 8, 4, 4, 8, 4, 2, 4, 2, 2, 4, 2, 4,
626                              8, 4, 4, 8, 4, 2, 4, 2, 2, 4, 2, 4, 8, 4, 4, 8,
627                              4, 2, 4, 2, 2, 4, 2, 4, 8, 4, 4, 8, 4, 2, 4, 2});
628 
629   const std::vector<int32_t> sizes(
630       expected.sizes().begin(), expected.sizes().end());
631   std::vector<float> channels_last_data =
632       get_channels_last_data<float>(expected);
633   Tensor expected_channels_last =
634       tf.make_channels_last(sizes, channels_last_data);
635 
636   int64_t stride[1] = {1};
637   int64_t padding[1] = {0};
638   int64_t dilation[1] = {1};
639   bool transposed = true;
640   int64_t output_padding[1] = {0};
641   int64_t groups = 2;
642 
643   op_convolution_out(
644       input,
645       weight,
646       exec_aten::optional<Tensor>(bias),
647       exec_aten::ArrayRef<int64_t>{stride, 1},
648       exec_aten::ArrayRef<int64_t>{padding, 1},
649       exec_aten::ArrayRef<int64_t>{dilation, 1},
650       transposed,
651       exec_aten::ArrayRef<int64_t>{output_padding, 1},
652       groups,
653       out);
654 
655   EXPECT_TENSOR_CLOSE(out, expected_channels_last);
656 }
657 
TEST_F(OpConvCorrectnessTest,TransposedNonDefaultParamsChannelsLast)658 TEST_F(OpConvCorrectnessTest, TransposedNonDefaultParamsChannelsLast) {
659   TensorFactory<ScalarType::Float> tf;
660 
661   Tensor input = tf.full_channels_last({2, 6, 4, 5}, 2.0);
662   Tensor weight = tf.full_channels_last({6, 1, 2, 2}, 0.5);
663   Tensor bias = tf.make({3}, {1, 2, 3});
664   Tensor out = tf.full_channels_last({2, 3, 3, 6}, 0.7);
665   Tensor expected = tf.make(
666       {2, 3, 3, 6},
667       {1, 1, 1, 1, 1, 1, 1, 3, 3, 1, 3, 3, 1, 3, 3, 1, 3, 3, 2, 2, 2, 2,
668        2, 2, 2, 4, 4, 2, 4, 4, 2, 4, 4, 2, 4, 4, 3, 3, 3, 3, 3, 3, 3, 5,
669        5, 3, 5, 5, 3, 5, 5, 3, 5, 5, 1, 1, 1, 1, 1, 1, 1, 3, 3, 1, 3, 3,
670        1, 3, 3, 1, 3, 3, 2, 2, 2, 2, 2, 2, 2, 4, 4, 2, 4, 4, 2, 4, 4, 2,
671        4, 4, 3, 3, 3, 3, 3, 3, 3, 5, 5, 3, 5, 5, 3, 5, 5, 3, 5, 5});
672 
673   const std::vector<int32_t> sizes(
674       expected.sizes().begin(), expected.sizes().end());
675   std::vector<float> channels_last_data =
676       get_channels_last_data<float>(expected);
677   Tensor expected_channels_last =
678       tf.make_channels_last(sizes, channels_last_data);
679 
680   int64_t stride[1] = {3};
681   int64_t padding[1] = {7};
682   int64_t dilation[1] = {5};
683   bool transposed = true;
684   int64_t output_padding[1] = {2};
685   int64_t groups = 3;
686 
687   op_convolution_out(
688       input,
689       weight,
690       exec_aten::optional<Tensor>(bias),
691       exec_aten::ArrayRef<int64_t>{stride, 1},
692       exec_aten::ArrayRef<int64_t>{padding, 1},
693       exec_aten::ArrayRef<int64_t>{dilation, 1},
694       transposed,
695       exec_aten::ArrayRef<int64_t>{output_padding, 1},
696       groups,
697       out);
698 
699   EXPECT_TENSOR_CLOSE(out, expected_channels_last);
700 }
701 
TEST_F(OpConvCorrectnessTest,InvalidOutputPadding)702 TEST_F(OpConvCorrectnessTest, InvalidOutputPadding) {
703   TensorFactory<ScalarType::Float> tf;
704 
705   Tensor input = tf.full({2, 6, 4, 5}, 2.0);
706   Tensor weight = tf.full({6, 1, 2, 2}, 0.5);
707   Tensor bias = tf.make({3}, {1, 2, 3});
708   Tensor out = tf.zeros({2, 3, 6, 9});
709 
710   int64_t stride[1] = {3};
711   int64_t padding[1] = {7};
712   int64_t dilation[1] = {5};
713   bool transposed = true;
714   int64_t output_padding[1] = {5};
715   int64_t groups = 3;
716 
717   ET_EXPECT_KERNEL_FAILURE(
718       context_,
719       op_convolution_out(
720           input,
721           weight,
722           exec_aten::optional<Tensor>(bias),
723           exec_aten::ArrayRef<int64_t>{stride, 1},
724           exec_aten::ArrayRef<int64_t>{padding, 1},
725           exec_aten::ArrayRef<int64_t>{dilation, 1},
726           transposed,
727           exec_aten::ArrayRef<int64_t>{output_padding, 1},
728           groups,
729           out));
730 }
731