Home
last modified time | relevance | path

Searched full:rstd (Results 1 – 25 of 40) sorted by relevance

12

/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/
H A Dgroup_norm_kernel.cu37 T* rstd) { in RowwiseMomentsCUDAKernel() argument
70 rstd[i] = c10::cuda::compat::rsqrt(m2 + static_cast<T_ACC>(eps)); in RowwiseMomentsCUDAKernel()
80 const T* rstd, in ComputeFusedParamsCUDAKernel() argument
91 ? static_cast<T_ACC>(rstd[ng]) in ComputeFusedParamsCUDAKernel()
92 : static_cast<T_ACC>(rstd[ng]) * static_cast<T_ACC>(gamma[c]); in ComputeFusedParamsCUDAKernel()
106 const T* rstd, in Compute1dBackwardFusedParamsCUDAKernel() argument
138 static_cast<T_ACC>(rstd[ng]) * static_cast<T_ACC>(rstd[ng]) * in Compute1dBackwardFusedParamsCUDAKernel()
139 static_cast<T_ACC>(rstd[ng]) * s; in Compute1dBackwardFusedParamsCUDAKernel()
142 sum2 * static_cast<T_ACC>(rstd[ng]) * s; in Compute1dBackwardFusedParamsCUDAKernel()
154 const T* rstd, in GammaBeta1dBackwardCUDAKernel1() argument
[all …]
H A Dlayer_norm_kernel.cu59 T_ACC* rstd) { in RowwiseMomentsCUDAKernel() argument
88 rstd[i] = c10::cuda::compat::rsqrt(m2 + eps); in RowwiseMomentsCUDAKernel()
97 const T_ACC* rstd, in LayerNormForwardCUDAKernel() argument
109 static_cast<T_ACC>(rstd[i]) * gamma_v + in LayerNormForwardCUDAKernel()
228 T_ACC* rstd, in vectorized_layer_norm_kernel_impl() argument
280 rstd[i1] = rstd_val; in vectorized_layer_norm_kernel_impl()
293 T_ACC* /*rstd*/, in vectorized_layer_norm_kernel_impl()
307 T_ACC* rstd, in vectorized_layer_norm_kernel() argument
309 vectorized_layer_norm_kernel_impl(N, eps, X, gamma, beta, mean, rstd, Y); in vectorized_layer_norm_kernel()
318 const T_ACC* __restrict__ rstd, in compute_gI() argument
[all …]
/aosp_15_r20/external/trusty/arm-trusted-firmware/plat/st/stm32mp1/
Dstm32mp1_scmi.c133 struct stm32_scmi_rstd *rstd; member
141 .rstd = stm32_scmi0_reset_domain,
358 return &resource->rstd[n]; in find_rstd()
368 const struct stm32_scmi_rstd *rstd = find_rstd(agent_id, scmi_id); in plat_scmi_rstd_get_name() local
370 if (rstd == NULL) { in plat_scmi_rstd_get_name()
374 return rstd->name; in plat_scmi_rstd_get_name()
391 const struct stm32_scmi_rstd *rstd = find_rstd(agent_id, scmi_id); in plat_scmi_rstd_autonomous() local
393 if (rstd == NULL) { in plat_scmi_rstd_autonomous()
397 if (!stm32mp_nsec_can_access_reset(rstd->reset_id)) { in plat_scmi_rstd_autonomous()
406 VERBOSE("SCMI reset %lu cycle\n", rstd->reset_id); in plat_scmi_rstd_autonomous()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/functorch/
H A DBatchRulesNorm.cpp22 // There's a weird case where mean, rstd can both have shape (0,). in compute_stat_bdim()
69 Tensor rstd; in batch_norm_batch_rule() local
76 rstd = std::get<2>(result); in batch_norm_batch_rule()
103 rstd = std::get<2>(result); in batch_norm_batch_rule()
104 rstd = reshape_dim_outof(0, bdim_size.value(), rstd); // [B0, C] in batch_norm_batch_rule()
123 return std::make_tuple(result0, 0, mean, stats_bdim, rstd, stats_bdim); in batch_norm_batch_rule()
133 const at::Tensor & rstd, std::optional<int64_t> rstd_bdim, in batch_norm_backward_no_weight_bias_batch_rule() argument
146 …grad_out, input, dummy_weight, running_mean_opt, running_var_opt, mean, rstd, training, eps, {true… in batch_norm_backward_no_weight_bias_batch_rule()
153 auto rstd_ = moveBatchDimToFront(rstd, rstd_bdim); in batch_norm_backward_no_weight_bias_batch_rule()
309 Tensor rstd; in native_group_norm_plumbing() local
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cpu/
H A Dgroup_norm_kernel.cpp40 Tensor& rstd) { in GroupNormKernelImplInternal() argument
51 PT* rstd_data = rstd.data_ptr<PT>(); in GroupNormKernelImplInternal()
295 Tensor& rstd) { in GroupNormKernelImplChannelsLastInternal() argument
306 PT* rstd_data = rstd.data_ptr<PT>(); in GroupNormKernelImplChannelsLastInternal()
317 // Mean and rstd are collected per each n and g, which involves reduction in GroupNormKernelImplChannelsLastInternal()
418 // step-2: compute mean and rstd in GroupNormKernelImplChannelsLastInternal()
439 // mean/rstd have shape of {N, G}, gamma/beta have shape of {G, D}. in GroupNormKernelImplChannelsLastInternal()
495 Tensor& rstd) { in GroupNormKernelImpl() argument
503 X, gamma, beta, N, C, HxW, group, eps, Y, mean, rstd); in GroupNormKernelImpl()
506 X, gamma, beta, N, C, HxW, group, eps, Y, mean, rstd); in GroupNormKernelImpl()
[all …]
H A Dlayer_norm_kernel.cpp37 Tensor* rstd) { in LayerNormKernelImplInternal() argument
44 T* rstd_data = rstd ? rstd->data_ptr<T>() : nullptr; in LayerNormKernelImplInternal()
96 Tensor* rstd) { in layer_norm_kernel_mixed_type() argument
104 param_t* rstd_data = rstd ? rstd->data_ptr<param_t>() : nullptr; in layer_norm_kernel_mixed_type()
155 Tensor* rstd) { in LayerNormKernelImplInternal() argument
158 layer_norm_kernel_mixed_type<T, float>(X, gamma, beta, M, N, eps, Y, mean, rstd); in LayerNormKernelImplInternal()
160 layer_norm_kernel_mixed_type<T, T>(X, gamma, beta, M, N, eps, Y, mean, rstd); in LayerNormKernelImplInternal()
173 Tensor* rstd) { in LayerNormKernelImpl() argument
180 X, gamma, beta, M, N, eps, Y, mean, rstd); in LayerNormKernelImpl()
495 const Tensor& rstd, in LayerNormBackwardKernelImplInternal() argument
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A Dlayer_norm.cpp37 at::Tensor& rstd, in layer_norm_with_mean_rstd_out() argument
45 LayerNormKernel(kCPU, input, gamma, beta, M, N, eps, &out, &mean, &rstd); in layer_norm_with_mean_rstd_out()
58 rstd = rstd.view(stat_shape); in layer_norm_with_mean_rstd_out()
72 LayerNormKernel(kCPU, input, gamma, beta, M, N, eps, &out, /*mean=*/nullptr, /*rstd=*/nullptr); in layer_norm_cpu_out()
106 Tensor rstd = at::empty({M}, X->options().dtype(dtype)); in layer_norm_cpu() local
108 layer_norm_with_mean_rstd_out(Y, mean, rstd, *X, normalized_shape, *gamma, *beta, eps, M, N); in layer_norm_cpu()
109 return std::make_tuple(std::move(Y), std::move(mean), std::move(rstd)); in layer_norm_cpu()
117 const Tensor& rstd, in layer_norm_backward_cpu() argument
182 kCPU, dY, *X, mean, rstd, *gamma, M, N, &dX, &dgamma, &dbeta); in layer_norm_backward_cpu()
253 at::Tensor rstd = std::get<2>(outputs); in math_native_layer_norm() local
[all …]
H A Dgroup_norm.cpp99 Tensor rstd = at::empty({N, group}, X.options().dtype(dtype)); in native_group_norm() local
101 X.device().type(), X, gamma, beta, N, C, HxW, group, eps, Y, mean, rstd); in native_group_norm()
102 return std::make_tuple(Y, mean, rstd); in native_group_norm()
109 const Tensor& rstd, in native_group_norm_backward() argument
123 bool mixed_type = is_mixed_type(X, mean, rstd); in native_group_norm_backward()
125 check_mixed_data_type(X, mean, rstd); in native_group_norm_backward()
165 rstd, in native_group_norm_backward()
254 …at::Tensor rstd = std::get<2>(outputs).to(c10::TensorOptions().dtype(input.scalar_type())).view({N… in math_group_norm() local
255 return std::make_tuple(out, mean, rstd); in math_group_norm()
H A Dgroup_norm.h22 Tensor& /* rstd */);
28 const Tensor& /* rstd */,
H A Dlayer_norm.h89 Tensor* /* rstd */);
95 const Tensor& /* rstd */,
/aosp_15_r20/external/pytorch/torch/nn/utils/_expanded_weights/
H A Dlayer_norm_expanded_weights.py31 output, mean, rstd = forward_helper(
41 ctx.mean, ctx.rstd = mean, rstd
53 mean, rstd = ctx.mean, ctx.rstd
67 rstd,
H A Dgroup_norm_expanded_weights.py34 output, mean, rstd = forward_helper(
41 ctx.mean, ctx.rstd = mean, rstd
52 mean, rstd = ctx.mean, ctx.rstd
77 rstd,
H A Dinstance_norm_expanded_weights.py67 rstd = 1 / torch.sqrt(var + eps)
78 rstd,
/aosp_15_r20/external/libxml2/os400/
H A Dxmlcatalog.cmd21 EXPR(*YES) RSTD(*YES) DFT(*XML) +
36 RSTD(*YES) SPCVAL((*YES '--convert') (*NO '')) +
44 EXPR(*YES) DFT(*YES) RSTD(*YES) PMTCTL(TYPESGML) +
50 RSTD(*YES) SPCVAL((*YES '-v') (*NO '')) +
69 EXPR(*YES) RSTD(*YES) SPCVAL( +
H A Dxmllint.cmd26 SPCVAL(*DTDURL *DTDFPI) EXPR(*YES) RSTD(*YES) +
37 RSTD(*YES) DFT(*XSD) +
72 RSTD(*YES) DFT(*NONE) +
94 MAX(50) RSTD(*YES) PROMPT('Options') +
/aosp_15_r20/external/pytorch/torch/_decomp/
H A Ddecompositions_for_jvp.py118 input: Tensor, rstd: Tensor, inner_dim_indices: List[int], keepdim: bool
125 eps = torch.pow(1 / rstd, 2) - var # this makes me so sad inside
127 rstd = 1 / torch.sqrt(var + eps)
128 return mean, rstd
137 rstd: Tensor,
164 mean_, rstd_ = recompute_mean_var(input, rstd, inner_dim_indices, keepdim=True)
H A Ddecompositions.py1567 rstd: Tensor,
1576 grad_output, input, mean, rstd, allow_cpu_scalar_tensors=False
1579 utils.check_same_shape(mean, rstd, allow_cpu_scalar_tensors=False)
1612 rstd.unsqueeze(-1),
1619 rstd.unsqueeze(-1),
1620 torch.ones((1, group, cpg), device=rstd.device),
1622 c2 = (db_val * mean - ds_val) * rstd * rstd * rstd * s
1623 c3 = -c2 * mean - db_val * rstd * s
1638 * rstd.unsqueeze(-1)
1655 rstd: Tensor,
[all …]
/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/ops/glsl/
H A Dnative_layer_norm.glsl69 VEC4_T rstd = pow(var + epsilon, VEC4_T(-0.5));
70 VEC4_T offset = -rstd * mean;
78 VEC4_T outtex = (v * rstd + offset) * weight + bias;
83 write_texel(t_rstd, lpos, rstd);
/aosp_15_r20/external/pytorch/torch/distributed/tensor/_ops/
H A D_math_ops.py788 # for the triple return values (out, mean, rstd).
807 # we use OpStrategy because the output (out, mean, rstd)
880 # args must be: grad_out, input, normalized_shape, mean, rstd,
920 # grad_out, rstd, and normalized input, among which rstd
949 # arg: mean, rstd
967 # d_weight = sum(grad_out * (input - mean) / rstd, outer_dim, keepdim=False)
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mkldnn/
H A DNormalization.cpp107 auto rstd = empty_mkldnn( in mkldnn_layer_norm_last_index_weight_bias_f32() local
115 auto rstd_it = at::native::itensor_from_mkldnn(rstd); in mkldnn_layer_norm_last_index_weight_bias_f32()
129 return std::make_tuple(dst, mean, rstd); in mkldnn_layer_norm_last_index_weight_bias_f32()
/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/ops/impl/
H A DNativeLayerNorm.cpp37 vTensorPtr rstd = graph->get_tensor(args[0].refs[2]); in resize_native_layer_norm_node() local
48 rstd->virtual_resize(mean_size); in resize_native_layer_norm_node()
/aosp_15_r20/external/executorch/kernels/optimized/cpu/
H A Dop_native_layer_norm.cpp35 Tensor& rstd) { in layer_norm() argument
50 CTYPE* rstd_data = rstd.mutable_data_ptr<CTYPE>(); in layer_norm()
/aosp_15_r20/external/executorch/kernels/portable/cpu/
H A Dop_native_layer_norm.cpp32 Tensor& rstd) { in layer_norm() argument
45 CTYPE* rstd_data = rstd.mutable_data_ptr<CTYPE>(); in layer_norm()
H A Dop_native_group_norm.cpp35 Tensor& rstd) { in group_norm() argument
51 CTYPE* rstd_data = rstd.mutable_data_ptr<CTYPE>(); in group_norm()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mps/operations/
H A DNormalization.mm918 const Tensor& rstd,
1055 MPSGraphTensor* rstdTensor = mpsGraphRankedPlaceHolder(mpsGraph, rstd);
1078 // Reshape mean and rstd to [1, M, -1]
1128 // reverseVariance is square of rstd
1180 auto saveVarPlaceholder = Placeholder(cachedGraph->rstdTensor_, rstd);

12