Searched defs:grad_input_mask (Results 1 – 11 of 11) sorted by relevance
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mkldnn/ |
H A D | Normalization.cpp | 37 std::array<bool,3> grad_input_mask) { in mkldnn_batch_norm_backward() 75 bool update, double eps, std::array<bool,3> grad_input_mask, const Tensor& reserve) { in _new_batch_norm_backward_mkldnn() 243 bool update, double eps, std::array<bool,3> grad_input_mask, const Tensor& reserve) { in _new_batch_norm_backward_mkldnn() 252 std::array<bool,3> grad_input_mask) { in mkldnn_batch_norm_backward()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/nested/ |
H A D | NestedTensorBackward.cpp | 25 std::array<bool, 2> grad_input_mask) { in matmul_backward_nested() 204 std::array<bool, 3> grad_input_mask) { in layer_norm_backward_nested()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | Normalization.cpp | 306 bool train, double eps, std::array<bool,3> grad_input_mask) { in batch_norm_backward_cpu_template() 915 bool update, double eps, std::array<bool,3> grad_input_mask, const Tensor& reserve) { in _new_batch_norm_backward_cpu() 920 … bool train, double eps, std::array<bool,3> grad_input_mask) { in batch_norm_backward_cpu()
|
H A D | group_norm.cpp | 115 std::array<bool, 3> grad_input_mask) { in native_group_norm_backward()
|
H A D | layer_norm.cpp | 120 std::array<bool, 3> grad_input_mask) { in layer_norm_backward_cpu()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ |
H A D | Normalization.cu | 552 bool update, double eps, std::array<bool,3> grad_input_mask, const Tensor& reserve) { in _new_batch_norm_backward_cuda() 570 …ptional<Tensor>& save_invstd_opt, bool train, double epsilon, std::array<bool,3> grad_input_mask) { in batch_norm_backward_cuda()
|
H A D | layer_norm_kernel.cu | 1395 std::array<bool, 3> grad_input_mask) { in layer_norm_backward_cuda()
|
H A D | Normalization.cuh | 605 … bool train, double epsilon, std::array<bool,3> grad_input_mask) { in batch_norm_backward_cuda_template()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/transformers/cuda/ |
H A D | attention_backward.cu | 795 std::array<bool, 4> grad_input_mask, in _scaled_dot_product_efficient_attention_backward_cuda()
|
/aosp_15_r20/external/pytorch/test/cpp_extensions/ |
H A D | open_registration_extension.cpp | 478 std::array<bool,4> grad_input_mask, in custom_scaled_dot_product_fused_attention_overrideable_backward()
|
/aosp_15_r20/external/pytorch/torch/csrc/autograd/ |
H A D | FunctionsManual.cpp | 1235 const std::array<bool, 2>& grad_input_mask) { in clamp_backward_min_max() 1538 const std::array<bool, 3>& grad_input_mask) { in sparse_sampled_addmm_backward() 3867 const std::array<bool, 2>& grad_input_mask) { in linalg_lstsq_backward() 4946 std::array<bool, 3> grad_input_mask) { in infinitely_differentiable_native_group_norm_backward()
|