xref: /aosp_15_r20/external/executorch/backends/qualcomm/builders/op_hardtanh.py (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1# Copyright (c) Qualcomm Innovation Center, Inc.
2# All rights reserved
3#
4# This source code is licensed under the BSD-style license found in the
5# LICENSE file in the root directory of this source tree.
6
7from typing import cast, Dict
8
9import executorch.backends.qualcomm.python.PyQnnWrapperAdaptor as PyQnnWrapper
10
11import numpy as np
12import torch
13from executorch.backends.qualcomm.utils.constants import QCOM_DATA
14
15from .node_visitor import NodeVisitor, register_node_visitor
16from .qnn_constants import OpReluMinMax, QNN_OP_PACKAGE_NAME_QTI_AISW
17
18
19@register_node_visitor
20class HardTanhVisitor(NodeVisitor):
21    target = ["aten.hardtanh.default"]
22
23    def __init__(self, *args) -> None:
24        super().__init__(*args)
25
26    def define_node(
27        self,
28        node: torch.fx.Node,
29        nodes_to_wrappers: Dict[torch.fx.Node, PyQnnWrapper.TensorWrapper],
30    ) -> PyQnnWrapper.PyQnnOpWrapper:
31        input_node = node.args[0]
32        input_tensor = self.get_tensor(input_node, node)
33        input_tensor_wrapper = self.define_tensor(
34            input_node,
35            input_tensor,
36            PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE,
37            nodes_to_wrappers,
38            is_input_tensor=True,
39        )
40
41        # default value of output_min and output_max
42        output_min = -1
43        output_max = 1
44
45        if len(node.args) > 1:
46            # update output_min
47            output_min = cast(float, node.args[1])
48            # update output_max
49            output_max = cast(float, node.args[2])
50
51        output_tensor = self.get_tensor(node, node)
52        output_tensor_wrapper = self.define_tensor(
53            node,
54            output_tensor,
55            PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE,
56            nodes_to_wrappers,
57            is_input_tensor=False,
58        )
59
60        hardtanh_op = PyQnnWrapper.PyQnnOpWrapper(
61            node.name,
62            QNN_OP_PACKAGE_NAME_QTI_AISW,
63            OpReluMinMax.op_name,
64        )
65        hardtanh_op.AddInputTensors([input_tensor_wrapper])
66        hardtanh_op.AddOutputTensors([output_tensor_wrapper])
67        hardtanh_op.AddScalarParam(
68            OpReluMinMax.param_max_value,
69            PyQnnWrapper.Qnn_DataType_t.QNN_DATATYPE_FLOAT_32,
70            {QCOM_DATA: np.float32(output_max)},
71        )
72        hardtanh_op.AddScalarParam(
73            OpReluMinMax.param_min_value,
74            PyQnnWrapper.Qnn_DataType_t.QNN_DATATYPE_FLOAT_32,
75            {QCOM_DATA: np.float32(output_min)},
76        )
77
78        return hardtanh_op
79