xref: /aosp_15_r20/external/pytorch/torch/ao/quantization/fx/lower_to_qnnpack.py (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1from typing import Dict, Tuple
2
3from torch.ao.quantization.qconfig import QConfigAny
4from torch.fx import GraphModule
5
6from ._lower_to_native_backend import _lower_to_native_backend
7
8
9__all__ = ["lower_to_qnnpack"]
10
11
12def lower_to_qnnpack(
13    model: GraphModule,
14    qconfig_map: Dict[str, QConfigAny],
15    node_name_to_scope: Dict[str, Tuple[str, type]],
16) -> GraphModule:
17    """Lower a quantized reference model (with reference quantized operator patterns)
18    to qnnpack
19    """
20    return _lower_to_native_backend(model, qconfig_map, node_name_to_scope)
21