xref: /aosp_15_r20/external/executorch/runtime/kernel/targets.bzl (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
2
3def _operator_registry_preprocessor_flags():
4    max_kernel_num = native.read_config("executorch", "max_kernel_num", None)
5    if max_kernel_num != None:
6        return ["-DMAX_KERNEL_NUM=" + max_kernel_num]
7    elif not runtime.is_oss:
8        return select({
9            "DEFAULT": [],
10            "fbsource//xplat/executorch/build/constraints:executorch-max-kernel-num-256": ["-DMAX_KERNEL_NUM=256"],
11            "fbsource//xplat/executorch/build/constraints:executorch-max-kernel-num-64": ["-DMAX_KERNEL_NUM=64"],
12        })
13    else:
14        return []
15
16def define_common_targets():
17    """Defines targets that should be shared between fbcode and xplat.
18
19    The directory containing this targets.bzl file should also contain both
20    TARGETS and BUCK files that call this function.
21    """
22
23    runtime.cxx_library(
24        name = "operator_registry",
25        srcs = ["operator_registry.cpp"],
26        exported_headers = ["operator_registry.h"],
27        visibility = [
28            "//executorch/...",
29            "@EXECUTORCH_CLIENTS",
30        ],
31        exported_deps = [
32            "//executorch/runtime/core:core",
33            "//executorch/runtime/core:evalue",
34        ],
35        preprocessor_flags = _operator_registry_preprocessor_flags(),
36    )
37
38    runtime.cxx_library(
39        name = "operator_registry_MAX_NUM_KERNELS_TEST_ONLY",
40        srcs = ["operator_registry.cpp"],
41        exported_headers = ["operator_registry.h"],
42        visibility = [
43            "//executorch/...",
44            "@EXECUTORCH_CLIENTS",
45        ],
46        exported_deps = [
47            "//executorch/runtime/core:core",
48            "//executorch/runtime/core:evalue",
49        ],
50        preprocessor_flags = ["-DMAX_KERNEL_NUM=1"],
51    )
52
53    for aten_mode in (True, False):
54        aten_suffix = "_aten" if aten_mode else ""
55
56        runtime.cxx_library(
57            name = "kernel_runtime_context" + aten_suffix,
58            exported_headers = [
59                "kernel_runtime_context.h",
60            ],
61            visibility = [
62                "//executorch/kernels/...",
63                "//executorch/runtime/executor/...",
64                "//executorch/runtime/kernel/...",
65                "@EXECUTORCH_CLIENTS",
66            ],
67            exported_deps = [
68                "//executorch/runtime/core:core",
69                "//executorch/runtime/platform:platform",
70                "//executorch/runtime/core:memory_allocator",
71                "//executorch/runtime/core:event_tracer" + aten_suffix,
72                # TODO(T147221312): This will eventually depend on exec_aten
73                # once KernelRuntimeContext support tensor resizing, which is
74                # why this target supports aten mode.
75            ],
76        )
77
78        runtime.cxx_library(
79            name = "kernel_includes" + aten_suffix,
80            exported_headers = [
81                "kernel_includes.h",
82            ],
83            visibility = [
84                "//executorch/runtime/kernel/...",
85                "//executorch/kernels/...",
86                "//executorch/kernels/prim_ops/...",  # Prim kernels
87                "@EXECUTORCH_CLIENTS",
88            ],
89            exported_deps = [
90                ":kernel_runtime_context" + aten_suffix,
91                "//executorch/runtime/core/exec_aten:lib" + aten_suffix,
92                "//executorch/runtime/core/exec_aten/util:scalar_type_util" + aten_suffix,
93                "//executorch/runtime/core/exec_aten/util:tensor_util" + aten_suffix,
94            ],
95        )
96