Home
last modified time | relevance | path

Searched defs:grad_input_mask (Results 1 – 11 of 11) sorted by relevance

/aosp_15_r20/external/pytorch/aten/src/ATen/native/mkldnn/
H A DNormalization.cpp37 std::array<bool,3> grad_input_mask) { in mkldnn_batch_norm_backward()
75 bool update, double eps, std::array<bool,3> grad_input_mask, const Tensor& reserve) { in _new_batch_norm_backward_mkldnn()
243 bool update, double eps, std::array<bool,3> grad_input_mask, const Tensor& reserve) { in _new_batch_norm_backward_mkldnn()
252 std::array<bool,3> grad_input_mask) { in mkldnn_batch_norm_backward()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/nested/
H A DNestedTensorBackward.cpp25 std::array<bool, 2> grad_input_mask) { in matmul_backward_nested()
204 std::array<bool, 3> grad_input_mask) { in layer_norm_backward_nested()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DNormalization.cpp306 bool train, double eps, std::array<bool,3> grad_input_mask) { in batch_norm_backward_cpu_template()
915 bool update, double eps, std::array<bool,3> grad_input_mask, const Tensor& reserve) { in _new_batch_norm_backward_cpu()
920 … bool train, double eps, std::array<bool,3> grad_input_mask) { in batch_norm_backward_cpu()
H A Dgroup_norm.cpp115 std::array<bool, 3> grad_input_mask) { in native_group_norm_backward()
H A Dlayer_norm.cpp120 std::array<bool, 3> grad_input_mask) { in layer_norm_backward_cpu()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/
H A DNormalization.cu552 bool update, double eps, std::array<bool,3> grad_input_mask, const Tensor& reserve) { in _new_batch_norm_backward_cuda()
570 …ptional<Tensor>& save_invstd_opt, bool train, double epsilon, std::array<bool,3> grad_input_mask) { in batch_norm_backward_cuda()
H A Dlayer_norm_kernel.cu1395 std::array<bool, 3> grad_input_mask) { in layer_norm_backward_cuda()
H A DNormalization.cuh605 … bool train, double epsilon, std::array<bool,3> grad_input_mask) { in batch_norm_backward_cuda_template()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/transformers/cuda/
H A Dattention_backward.cu795 std::array<bool, 4> grad_input_mask, in _scaled_dot_product_efficient_attention_backward_cuda()
/aosp_15_r20/external/pytorch/test/cpp_extensions/
H A Dopen_registration_extension.cpp478 std::array<bool,4> grad_input_mask, in custom_scaled_dot_product_fused_attention_overrideable_backward()
/aosp_15_r20/external/pytorch/torch/csrc/autograd/
H A DFunctionsManual.cpp1235 const std::array<bool, 2>& grad_input_mask) { in clamp_backward_min_max()
1538 const std::array<bool, 3>& grad_input_mask) { in sparse_sampled_addmm_backward()
3867 const std::array<bool, 2>& grad_input_mask) { in linalg_lstsq_backward()
4946 std::array<bool, 3> grad_input_mask) { in infinitely_differentiable_native_group_norm_backward()