xref: /aosp_15_r20/external/pytorch/torch/ao/quantization/quantizer/utils.py (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1# mypy: allow-untyped-defs
2from typing import List
3
4from torch.ao.quantization.pt2e.utils import _is_sym_size_node
5from torch.ao.quantization.quantizer.quantizer import QuantizationAnnotation
6from torch.fx import Node
7
8
9def _annotate_input_qspec_map(node: Node, input_node: Node, qspec):
10    quantization_annotation = node.meta.get(
11        "quantization_annotation", QuantizationAnnotation()
12    )
13    if quantization_annotation.input_qspec_map is None:
14        quantization_annotation.input_qspec_map = {}
15    quantization_annotation.input_qspec_map[input_node] = qspec
16    node.meta["quantization_annotation"] = quantization_annotation
17
18
19def _annotate_output_qspec(node: Node, qspec):
20    quantization_annotation = node.meta.get(
21        "quantization_annotation", QuantizationAnnotation()
22    )
23    quantization_annotation.output_qspec = qspec
24    node.meta["quantization_annotation"] = quantization_annotation
25
26
27def _node_only_used_for_sym_size(node: Node, partition_nodes: List[Node]):
28    """
29    This utility is used to handle cases when dynami_shape=True tracing leads
30    to symint nodes in the pattern of linear module. In those cases, we need to
31    distinguish between the nodes that are in input for just extracting value of
32    some dimentions (and symint nodes) vs. the one that is activation.
33    For example:
34    graph(x, y, weight):
35       size_0 = torch.ops.aten.sym_size([x], [0])
36       size_1 = torch.ops.aten.sym_size([y], [1])
37       view_size = size_0 * size_1
38       size_3 = torch.ops.aten.sym_size([x], [2])
39       vie_out = torch.ops.aten.view(x, [view_size, size_3])
40       return mm(view_out, weight)
41    In the example above y node is not actual input. It exist only to extract size_1
42    """
43    if _is_sym_size_node(node):
44        return True
45
46    return all(
47        ((user not in partition_nodes) or _is_sym_size_node(user))
48        for user in node.users
49    )
50
51
52def _get_module_name_filter(module_name: str):
53    """Get the module_name_filter function for a given module name, the filter accepts
54    a node and checks if the node comes from a module that has certain module name
55
56    For example:
57        node: linear_op = call_function[...](...)  # comes from a module with name blocks.sub.linear1
58
59
60    >> module_name_filter = _get_module_name_filter("blocks.sub")
61    >> print(module_name_filter(node))
62    True  # the node is from "blocks.sub" based on the fully qualified name "blocks.sub.linear1"
63    """
64
65    def module_name_filter(n: Node) -> bool:
66        # example: {
67        #    'L__self___sub': ("L['self'].sub", <class '....Sub'>),
68        #    'L__self___sub_linear': ("L['self'].sub.linear", <class 'torch.nn.modules.linear.Linear'>)
69        # }
70        # get_attr nodes doesn't have nn_module_stack?
71        nn_module_stack = n.meta.get("nn_module_stack", {})
72
73        def _normalize_path(n):
74            prefix = 0
75            # TODO This is non standard behavior and should be removed when we migrate off capture_pre_autograd_graph.
76            if n.startswith("L['self']."):
77                prefix = len("L['self'].")
78            return n[prefix:]
79
80        names = [_normalize_path(n) for n, _ in nn_module_stack.values()]
81        return module_name in names
82
83    return module_name_filter
84