xref: /aosp_15_r20/external/executorch/backends/arm/test/ops/test_conv2d.py (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1# Copyright 2024 Arm Limited and/or its affiliates.
2# All rights reserved.
3#
4# This source code is licensed under the BSD-style license found in the
5# LICENSE file in the root directory of this source tree.
6
7import unittest
8
9from typing import List, Optional, Tuple, Union
10
11import torch
12from executorch.backends.arm.test import common
13
14from executorch.backends.arm.test.tester.arm_tester import ArmTester
15from executorch.exir.backend.compile_spec_schema import CompileSpec
16from parameterized import parameterized
17
18
19class Conv2d(torch.nn.Module):
20    """
21    Creates one or many chained 2D-convolutions. For multiple convolutions, the
22    respective parameteres are provided as lists.
23    """
24
25    def __init__(
26        self,
27        inputs: Optional[torch.Tensor] = None,
28        height=8,
29        width=8,
30        nbr_conv=1,  # Number of chained convs
31        in_channels: Union[List, int, None] = None,
32        out_channels: Union[List, int, None] = None,
33        kernel_size: Union[List, Tuple, None] = None,
34        stride: Union[List, Tuple, None] = None,
35        padding: Union[List, Tuple, None] = None,
36        dilation: Union[List, Tuple, None] = None,
37        groups: Union[List, int, None] = None,
38        bias: Union[List, bool, None] = None,
39        padding_mode: Union[List, str, None] = None,
40        batches=1,
41        dtype=torch.float,
42    ):
43        super().__init__()
44        self.nbr_convs = nbr_conv
45
46        # Handle default values
47        in_channels = [2] * nbr_conv if in_channels is None else in_channels
48        out_channels = [1 * nbr_conv] if out_channels is None else out_channels
49        kernel_size = [(3, 3)] * nbr_conv if kernel_size is None else kernel_size
50        stride = [(2, 2)] * nbr_conv if stride is None else stride
51        padding = [(1, 1)] * nbr_conv if padding is None else padding
52        dilation = [(1, 1)] * nbr_conv if dilation is None else dilation
53        groups = [1] * nbr_conv if groups is None else groups
54        bias = [True] * nbr_conv if bias is None else bias
55        padding_mode = ["zeros"] * nbr_conv if padding_mode is None else padding_mode
56
57        # This allows the input parameters to be either a single value or a list
58        # as type hint implies
59        if not isinstance(in_channels, List):
60            in_channels = [in_channels]
61        if not isinstance(out_channels, List):
62            out_channels = [out_channels]
63        if not isinstance(kernel_size, List):
64            kernel_size = [kernel_size]
65        if not isinstance(stride, List):
66            stride = [stride]
67        if not isinstance(padding, List):
68            padding = [padding]
69        if not isinstance(dilation, List):
70            dilation = [dilation]
71        if not isinstance(groups, List):
72            groups = [groups]
73        if not isinstance(bias, List):
74            bias = [bias]
75        if not isinstance(padding_mode, List):
76            padding_mode = [padding_mode]
77
78        # Generate test data if not provided
79        if inputs is None:
80            self.inputs = (
81                torch.randn(batches, in_channels[0], height, width).to(dtype),
82            )
83        else:
84            self.inputs = (inputs,)
85
86        # Build chain of convs
87        for i in range(self.nbr_convs):
88            setattr(
89                self,
90                f"conv_{i}",
91                torch.nn.Conv2d(
92                    in_channels=in_channels[i],
93                    out_channels=out_channels[i],
94                    kernel_size=kernel_size[i],
95                    stride=stride[i],
96                    padding=padding[i],
97                    dilation=dilation[i],
98                    groups=groups[i],
99                    bias=bias[i],
100                    padding_mode=padding_mode[i],
101                ).to(dtype),
102            )
103
104    def get_inputs(self):
105        return self.inputs
106
107    def forward(self, x):
108        for i in range(self.nbr_convs):
109            conv = getattr(self, f"conv_{i}")
110            x = conv(x)
111        return x
112
113
114conv2d_2x2_3x2x40x40_nobias = Conv2d(
115    in_channels=2,
116    out_channels=3,
117    kernel_size=(2, 2),
118    stride=1,
119    bias=False,
120    padding=0,
121    width=40,
122    height=40,
123    batches=3,
124)
125
126conv2d_3x3_1x3x256x256_st1 = Conv2d(
127    in_channels=3,
128    out_channels=10,
129    kernel_size=(3, 3),
130    stride=1,
131    padding=0,
132    width=256,
133    height=256,
134    batches=1,
135)
136
137conv2d_3x3_1x3x12x12_st2_pd1 = Conv2d(
138    in_channels=3,
139    out_channels=4,
140    kernel_size=(3, 3),
141    stride=2,
142    padding=1,
143    width=12,
144    height=12,
145    batches=1,
146)
147
148conv2d_1x1_1x2x128x128_st1 = Conv2d(
149    in_channels=2,
150    out_channels=1,
151    kernel_size=(1, 1),
152    stride=1,
153    padding=0,
154    width=128,
155    height=128,
156    batches=1,
157)
158
159conv2d_2x2_1x1x14x13_st2 = Conv2d(
160    in_channels=1,
161    out_channels=1,
162    kernel_size=(2, 2),
163    stride=2,
164    padding=0,
165    width=14,
166    height=13,
167    batches=1,
168)
169
170conv2d_5x5_3x2x128x128_st1 = Conv2d(
171    in_channels=2,
172    out_channels=3,
173    kernel_size=(5, 5),
174    stride=1,
175    padding=0,
176    width=128,
177    height=128,
178    batches=3,
179)
180
181conv2d_3x3_1x3x224x224_st2_pd1 = Conv2d(
182    in_channels=3,
183    out_channels=16,
184    kernel_size=(3, 3),
185    stride=2,
186    padding=1,
187    width=224,
188    height=224,
189    batches=1,
190)
191
192conv2d_5x5_1x3x14x15_st3_pd1 = Conv2d(
193    in_channels=3,
194    out_channels=16,
195    kernel_size=(5, 5),
196    stride=3,
197    padding=1,
198    width=14,
199    height=15,
200    batches=1,
201)
202
203
204two_conv2d_nobias = Conv2d(
205    nbr_conv=2,
206    width=256,
207    height=256,
208    in_channels=[3, 10],
209    out_channels=[10, 15],
210    kernel_size=[(5, 5), (5, 5)],
211    stride=[1, 1],
212    padding=[0, 0],
213    bias=[False, False],
214    batches=1,
215)
216
217two_conv2d = Conv2d(
218    nbr_conv=2,
219    width=256,
220    height=256,
221    in_channels=[3, 10],
222    out_channels=[10, 15],
223    kernel_size=[(5, 5), (5, 5)],
224    stride=[1, 1],
225    padding=[0, 0],
226    bias=[True, True],
227    batches=1,
228)
229
230# Shenanigan to get a nicer output when test fails. With unittest it looks like:
231# FAIL: test_conv2d_tosa_BI_2_3x3_1x3x12x12_st2_pd1
232testsuite = [
233    ("2x2_3x2x40x40_nobias", conv2d_2x2_3x2x40x40_nobias),
234    ("3x3_1x3x256x256_st1", conv2d_3x3_1x3x256x256_st1),
235    ("3x3_1x3x12x12_st2_pd1", conv2d_3x3_1x3x12x12_st2_pd1),
236    ("1x1_1x2x128x128_st1", conv2d_1x1_1x2x128x128_st1),
237    ("2x2_1x1x14x13_st2_needs_adjust_pass", conv2d_2x2_1x1x14x13_st2),
238    ("conv2d_5x5_1x3x14x15_st3_pd1_needs_adjust_pass", conv2d_5x5_1x3x14x15_st3_pd1),
239    ("5x5_3x2x128x128_st1", conv2d_5x5_3x2x128x128_st1),
240    ("3x3_1x3x224x224_st2_pd1", conv2d_3x3_1x3x224x224_st2_pd1),
241    ("two_conv2d_nobias", two_conv2d_nobias),
242    ("two_conv2d", two_conv2d),
243]
244
245
246class TestConv2D(unittest.TestCase):
247    """Tests Conv2D, both single ops and multiple Convolutions in series."""
248
249    def _test_conv2d_tosa_MI_pipeline(
250        self, module: torch.nn.Module, test_data: Tuple[torch.Tensor]
251    ):
252        (
253            ArmTester(
254                module,
255                example_inputs=test_data,
256                compile_spec=common.get_tosa_compile_spec(
257                    "TOSA-0.80.0+MI", permute_memory_to_nhwc=True
258                ),
259            )
260            .export()
261            .to_edge()
262            .partition()
263            .check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
264            .check_not(["executorch_exir_dialects_edge__ops_aten_convolution_default"])
265            .to_executorch()
266            .run_method_and_compare_outputs(inputs=test_data)
267        )
268
269    def _test_conv2d_tosa_BI_pipeline(
270        self,
271        module: torch.nn.Module,
272        test_data: Tuple[torch.Tensor],
273    ):
274        (
275            ArmTester(
276                module,
277                example_inputs=test_data,
278                compile_spec=common.get_tosa_compile_spec(
279                    "TOSA-0.80.0+BI", permute_memory_to_nhwc=True
280                ),
281            )
282            .quantize()
283            .export()
284            .to_edge()
285            .partition()
286            .check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
287            .check_not(["executorch_exir_dialects_edge__ops_aten_convolution_default"])
288            .to_executorch()
289            .run_method_and_compare_outputs(inputs=test_data, qtol=1)
290        )
291
292    def _test_conv2d_ethosu_BI_pipeline(
293        self,
294        compile_spec: CompileSpec,
295        module: torch.nn.Module,
296        test_data: Tuple[torch.Tensor],
297    ):
298        (
299            ArmTester(
300                module,
301                example_inputs=test_data,
302                compile_spec=compile_spec,
303            )
304            .quantize()
305            .export()
306            .to_edge()
307            .partition()
308            .check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
309            .check_not(["executorch_exir_dialects_edge__ops_aten_convolution_default"])
310            .to_executorch()
311        )
312
313    @parameterized.expand(testsuite)
314    def test_conv2d_tosa_MI(self, test_name, model):
315        self._test_conv2d_tosa_MI_pipeline(model, model.get_inputs())
316
317    @parameterized.expand(testsuite)
318    def test_conv2d_tosa_BI(self, test_name, model):
319        self._test_conv2d_tosa_BI_pipeline(model, model.get_inputs())
320
321    @parameterized.expand(testsuite)
322    def test_conv2d_u55_BI(self, test_name, model):
323        self._test_conv2d_ethosu_BI_pipeline(
324            common.get_u55_compile_spec(permute_memory_to_nhwc=True),
325            model,
326            model.get_inputs(),
327        )
328
329    @parameterized.expand(testsuite)
330    def test_conv2d_u85_BI(self, test_name, model):
331        self._test_conv2d_ethosu_BI_pipeline(
332            common.get_u85_compile_spec(permute_memory_to_nhwc=True),
333            model,
334            model.get_inputs(),
335        )
336