xref: /aosp_15_r20/external/executorch/backends/qualcomm/builders/op_pad.py (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1# Copyright (c) Qualcomm Innovation Center, Inc.
2# All rights reserved
3#
4# This source code is licensed under the BSD-style license found in the
5# LICENSE file in the root directory of this source tree.
6from typing import cast, Dict, List
7
8import executorch.backends.qualcomm.python.PyQnnWrapperAdaptor as PyQnnWrapper
9
10import numpy as np
11import torch
12from executorch.backends.qualcomm.utils.constants import QCOM_AXIS_ORDER, QCOM_DATA
13
14from .node_visitor import NodeVisitor, QNN_TENSOR_TYPE_MAP, register_node_visitor
15from .qnn_constants import OpPad, QNN_OP_PACKAGE_NAME_QTI_AISW
16
17
18@register_node_visitor
19class Pad(NodeVisitor):
20    target = ["aten.constant_pad_nd.default"]
21
22    def __init__(self, *args) -> None:
23        super().__init__(*args)
24
25    def define_node(
26        self,
27        node: torch.fx.Node,
28        nodes_to_wrappers: Dict[torch.fx.Node, PyQnnWrapper.TensorWrapper],
29    ) -> PyQnnWrapper.PyQnnOpWrapper:
30        input_node = node.args[0]
31        input_tensor = self.get_tensor(input_node, node)
32        pad_inp_tensor_wrapper = self.define_tensor(
33            input_node,
34            input_tensor,
35            PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE,
36            nodes_to_wrappers,
37            is_input_tensor=True,
38        )
39        pad_input_tensors = [pad_inp_tensor_wrapper]
40
41        output_tensor = self.get_tensor(node, node)
42        output_tensor_wrapper = self.define_tensor(
43            node,
44            output_tensor,
45            PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE,
46            nodes_to_wrappers,
47            is_input_tensor=False,
48        )
49        pad_output_tensors = [output_tensor_wrapper]
50
51        pad_amount_shape = [input_tensor.dim(), 2]
52        # pytorch padding start from the last index
53        pad_amount = np.reshape(cast(List[int], node.args[1]), (-1, 2))[::-1].astype(
54            np.uint32
55        )
56        # fullfill the pad amount for each idex of tensor
57        if zero_amounts := pad_amount_shape[0] - pad_amount.shape[0]:
58            pad_amount = np.concatenate(
59                (np.array([(0, 0)] * zero_amounts), pad_amount)
60            ).astype(np.uint32)
61
62        if QCOM_AXIS_ORDER in node.meta:
63            pad_amount = np.transpose(pad_amount, node.meta[QCOM_AXIS_ORDER])
64        pad_amount_val = node.args[2]
65
66        pad_op = PyQnnWrapper.PyQnnOpWrapper(
67            node.name,
68            QNN_OP_PACKAGE_NAME_QTI_AISW,
69            OpPad.op_name,
70        )
71        pad_op.AddInputTensors(pad_input_tensors)
72        pad_op.AddOutputTensors(pad_output_tensors)
73
74        # For now, we only support constant (0) padding due to torch implementation
75        pad_op.AddScalarParam(
76            OpPad.param_scheme,
77            PyQnnWrapper.Qnn_DataType_t.QNN_DATATYPE_UINT_32,
78            {QCOM_DATA: np.uint32(OpPad.Scheme.CONSTANT)},
79        )
80
81        pad_op.AddScalarParam(
82            OpPad.param_pad_constant_value,
83            QNN_TENSOR_TYPE_MAP[type(pad_amount_val)],
84            {QCOM_DATA: pad_amount_val},
85        )
86
87        pad_op.AddTensorParam(
88            OpPad.param_pad_amount,
89            PyQnnWrapper.Qnn_DataType_t.QNN_DATATYPE_UINT_32,
90            len(pad_amount_shape),
91            pad_amount_shape,
92            pad_amount,
93            True,
94        )
95
96        return pad_op
97