1load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") 2 3def _get_operator_lib(aten = False): 4 if aten: 5 return ["//executorch/kernels/aten:generated_lib"] 6 elif runtime.is_oss: 7 # TODO(T183193812): delete this path after optimized-oss.yaml is no more. 8 return ["//executorch/configurations:optimized_native_cpu_ops_oss", "//executorch/extension/llm/custom_ops:custom_ops"] 9 else: 10 return ["//executorch/configurations:optimized_native_cpu_ops", "//executorch/extension/llm/custom_ops:custom_ops"] 11 12def get_qnn_dependency(): 13 # buck build -c executorch.enable_qnn=true //executorch/examples/models/llama/runner:runner 14 # Check if QNN is enabled before including the dependency 15 if native.read_config("executorch", "enable_qnn", "false") == "true": 16 # //executorch/backends/qualcomm:qnn_executorch_backend doesn't work, 17 # likely due to it's an empty library with dependency only 18 return [ 19 "//executorch/backends/qualcomm/runtime:runtime", 20 ] 21 return [] 22 23def define_common_targets(): 24 for aten in (True, False): 25 aten_suffix = "_aten" if aten else "" 26 runtime.cxx_library( 27 name = "runner" + aten_suffix, 28 srcs = [ 29 "runner.cpp", 30 ], 31 exported_headers = [ 32 "runner.h", 33 ], 34 preprocessor_flags = [ 35 "-DUSE_ATEN_LIB", 36 ] if aten else [], 37 visibility = [ 38 "@EXECUTORCH_CLIENTS", 39 ], 40 exported_deps = [ 41 "//executorch/backends/xnnpack:xnnpack_backend", 42 "//executorch/extension/llm/runner:irunner", 43 "//executorch/extension/llm/runner:stats", 44 "//executorch/extension/llm/runner:text_decoder_runner" + aten_suffix, 45 "//executorch/extension/llm/runner:text_prefiller" + aten_suffix, 46 "//executorch/extension/llm/runner:text_token_generator" + aten_suffix, 47 "//executorch/extension/evalue_util:print_evalue" + aten_suffix, 48 "//executorch/extension/module:module" + aten_suffix, 49 "//executorch/extension/tensor:tensor" + aten_suffix, 50 "//executorch/kernels/quantized:generated_lib" + aten_suffix, 51 "//executorch/runtime/core/exec_aten:lib" + aten_suffix, 52 "//executorch/runtime/core/exec_aten/util:tensor_util" + aten_suffix, 53 "//executorch/examples/models/llama/tokenizer:tiktoken", 54 "//executorch/extension/llm/tokenizer:bpe_tokenizer", 55 ] + (_get_operator_lib(aten)) + ([ 56 # Vulkan API currently cannot build on some platforms (e.g. Apple, FBCODE) 57 # Therefore enable it explicitly for now to avoid failing tests 58 "//executorch/backends/vulkan:vulkan_backend_lib", 59 ] if native.read_config("llama", "use_vulkan", "0") == "1" else []) + get_qnn_dependency(), 60 external_deps = [ 61 "libtorch", 62 ] if aten else [], 63 ) 64