Home
last modified time | relevance | path

Searched defs:AttentionKernel (Results 1 – 1 of 1) sorted by relevance

/aosp_15_r20/external/pytorch/aten/src/ATen/native/transformers/cuda/mem_eff_attention/
H A Dkernel_forward.h90 struct AttentionKernel { struct
91 enum CustomMaskType {
98 using scalar_t = scalar_t_;
99 using accum_t = float;
100 using lse_scalar_t = float;
101 using output_t = scalar_t;
105 using output_accum_t = accum_t;
106 static constexpr bool kSupportsDropout = kSupportsDropout_;
107 static constexpr bool kSupportsBias = kSupportsBias_;
108 static constexpr int kKeysPerBlock = kKeysPerBlock_;
[all …]