xref: /aosp_15_r20/external/executorch/kernels/quantized/targets.bzl (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
2load("@fbsource//xplat/executorch/codegen:codegen.bzl", "et_operator_library", "executorch_generated_lib", "exir_custom_ops_aot_lib")
3
4def define_common_targets():
5    runtime.export_file(
6        name = "quantized.yaml",
7        visibility = [
8            "@EXECUTORCH_CLIENTS",
9        ],
10    )
11
12    # Excluding embedding_byte ops because we choose to define them
13    # in python separately, mostly to be easy to share with oss.
14    et_operator_library(
15        name = "quantized_ops_need_aot_registration",
16        ops = [
17            "quantized_decomposed::add.out",
18            "quantized_decomposed::choose_qparams.Tensor_out",
19            "quantized_decomposed::choose_qparams_per_token_asymmetric.out",
20            "quantized_decomposed::dequantize_per_channel.out",
21            "quantized_decomposed::dequantize_per_tensor.out",
22            "quantized_decomposed::dequantize_per_tensor.Tensor_out",
23            "quantized_decomposed::dequantize_per_token.out",
24            "quantized_decomposed::mixed_linear.out",
25            "quantized_decomposed::mixed_mm.out",
26            "quantized_decomposed::quantize_per_channel.out",
27            "quantized_decomposed::quantize_per_tensor.out",
28            "quantized_decomposed::quantize_per_tensor.Tensor_out",
29            "quantized_decomposed::quantize_per_token.out",
30        ],
31        define_static_targets = True,
32    )
33
34    # lib used to register quantized ops into EXIR
35    exir_custom_ops_aot_lib(
36        name = "custom_ops_generated_lib",
37        yaml_target = ":quantized.yaml",
38        visibility = ["//executorch/...", "@EXECUTORCH_CLIENTS"],
39        kernels = [":quantized_operators_aten"],
40        deps = [
41            ":quantized_ops_need_aot_registration",
42        ],
43    )
44
45    # lib used to register quantized ops into EXIR
46    # TODO: merge this with custom_ops_generated_lib
47    exir_custom_ops_aot_lib(
48        name = "aot_lib",
49        yaml_target = ":quantized.yaml",
50        visibility = ["//executorch/..."],
51        kernels = [":quantized_operators_aten"],
52        deps = [
53            ":quantized_ops_need_aot_registration",
54        ],
55    )
56
57    et_operator_library(
58        name = "all_quantized_ops",
59        ops_schema_yaml_target = ":quantized.yaml",
60        define_static_targets = True,
61    )
62
63    # On Windows we can only compile these two ops currently, so adding a
64    # separate target for this.
65    et_operator_library(
66        name = "q_dq_ops",
67            ops = [
68                "quantized_decomposed::dequantize_per_tensor.out",
69                "quantized_decomposed::dequantize_per_tensor.Tensor_out",
70                "quantized_decomposed::quantize_per_tensor.out",
71                "quantized_decomposed::quantize_per_tensor.Tensor_out",
72            ],
73    )
74
75    for aten_mode in (True, False):
76        aten_suffix = "_aten" if aten_mode else ""
77
78        runtime.cxx_library(
79            name = "quantized_operators" + aten_suffix,
80            srcs = [],
81            visibility = [
82                "//executorch/...",
83                "@EXECUTORCH_CLIENTS",
84            ],
85            exported_deps = [
86                "//executorch/kernels/quantized/cpu:quantized_cpu" + aten_suffix,
87            ],
88        )
89
90        executorch_generated_lib(
91            name = "generated_lib" + aten_suffix,
92            deps = [
93                ":quantized_operators" + aten_suffix,
94                ":all_quantized_ops",
95            ],
96            custom_ops_yaml_target = ":quantized.yaml",
97            custom_ops_aten_kernel_deps = [":quantized_operators_aten"] if aten_mode else [],
98            custom_ops_requires_aot_registration = False,
99            aten_mode = aten_mode,
100            visibility = [
101                "//executorch/...",
102                "@EXECUTORCH_CLIENTS",
103            ],
104            define_static_targets = True,
105        )
106
107        # On Windows we can only compile these two ops currently, so adding a
108        # separate target for this.
109        executorch_generated_lib(
110            name = "q_dq_ops_generated_lib" + aten_suffix,
111            custom_ops_yaml_target = ":quantized.yaml",
112            kernel_deps = [
113                "//executorch/kernels/quantized/cpu:op_quantize" + aten_suffix,
114                "//executorch/kernels/quantized/cpu:op_dequantize" + aten_suffix,
115            ],
116            aten_mode = aten_mode,
117            deps = [
118                ":q_dq_ops",
119            ],
120            visibility = [
121                "//executorch/...",
122                "@EXECUTORCH_CLIENTS",
123            ],
124        )
125
126    runtime.python_library(
127        name = "quantized_ops_lib",
128        srcs = ["__init__.py"],
129        deps = [
130            "//caffe2:torch",
131        ],
132        visibility = [
133            "//executorch/kernels/quantized/...",
134            "@EXECUTORCH_CLIENTS",
135        ],
136    )
137