xref: /aosp_15_r20/external/pytorch/aten/src/ATen/native/transformers/cuda/sdp_utils.h (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #pragma once
2 
3 #include <ATen/Context.h>
4 #include <c10/macros/Macros.h>
5 #include <ATen/native/transformers/sdp_utils_cpp.h>
6 #include <c10/macros/Export.h>
7 
8 namespace sdp {
9 
10 bool check_for_seq_len_1_nested_tensor(sdp_params const& params, bool debug);
11 SDPBackend select_sdp_backend(sdp_params const& kernel_params);
12 C10_EXPORT bool is_flash_attention_available();
13 C10_EXPORT bool can_use_flash_attention(sdp_params const& params, bool debug);
14 C10_EXPORT bool can_use_mem_efficient_attention(sdp_params const& params, bool debug);
15 C10_EXPORT bool can_use_cudnn_attention(sdp_params const& params, bool debug);
16 
17 } // namespace sdp
18