Home
last modified time | relevance | path

Searched full:save_var (Results 1 – 14 of 14) sorted by relevance

/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/
H A DNormalization.cu306 void batch_norm_mean_var(const Tensor& self, Tensor& save_mean, Tensor& save_var) { in batch_norm_mean_var() argument
314 save_mean, save_var, self, dummy_epsilon); in batch_norm_mean_var()
320 (!save_var.defined() || save_var.is_contiguous())) { in batch_norm_mean_var()
324 save_mean, save_var, self, dummy_epsilon); in batch_norm_mean_var()
339 at::native::var_mean_out(save_var, save_mean, self, /*dims=*/reduce_dims, in batch_norm_mean_var()
347 const Tensor& save_mean, const Tensor& save_var, in batch_norm_update_stats() argument
355 .add_input(save_var) in batch_norm_update_stats()
381 const Tensor& save_mean, const Tensor& save_var, in batch_norm_update_stats_and_invert() argument
388 .add_output(save_var) in batch_norm_update_stats_and_invert()
390 .add_input(save_var) in batch_norm_update_stats_and_invert()
[all …]
H A DNormalization.cuh719 …// The input_transform kernel is pointwise, but we need to balance reading parameters (save_var/me… in batch_norm_elemt_cuda_template()
867 // The kernel is pointwise, but we need to balance reading parameters (save_var/mean, in batch_norm_backward_elemt_cuda_template()
918 // The kernel is pointwise, but we need to balance reading parameters (save_var/mean, in batch_norm_backward_elemt_cuda_template()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/miopen/
H A DBatchNorm_miopen.cpp113 Tensor save_mean, save_var; in miopen_batch_norm() local
118 save_var = at::empty({ num_features }, weight_t.options()); in miopen_batch_norm()
134 save_var.mutable_data_ptr())); in miopen_batch_norm()
137 save_var = at::empty({0}, weight_t.options()); in miopen_batch_norm()
153 // save_mean and save_var can be undefined in miopen_batch_norm()
156 return std::tuple<Tensor, Tensor, Tensor>{output_t, save_mean, save_var}; in miopen_batch_norm()
184 save_var{ save_var_t, "save_var", 5 }; in miopen_batch_norm_backward() local
187 checkAllDefined(c, {input, grad_output, weight, save_mean, save_var}); in miopen_batch_norm_backward()
188 checkAllSameGPU(c, {input, grad_output, weight, save_mean, save_var}); in miopen_batch_norm_backward()
195 checkAllSameType(c, {weight, save_mean, save_var}); in miopen_batch_norm_backward()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DNormalization.cpp593 auto [output, save_mean, save_var, reserve] = in _batch_norm_impl_index()
598 output, save_mean, save_var, reserve, 1); in _batch_norm_impl_index()
762 … bool train, double momentum, double eps, Tensor& out, Tensor& save_mean, Tensor& save_var) { in batch_norm_cpu_out() argument
780 …sform_input_template<scalar_t, opmath_t>(self, weight, bias, save_mean, save_var, running_mean, ru… in batch_norm_cpu_out()
782 // Resize save_mean and save_var in batch_norm_cpu_out()
784 at::native::resize_output(save_var, {self.size(1)}); in batch_norm_cpu_out()
785 …e<scalar_t, opmath_t, InvStd>(self, running_mean, running_var, momentum, eps, save_mean, save_var); in batch_norm_cpu_out()
790 …sform_input_template<scalar_t, scalar_t>(self, weight, bias, save_mean, save_var, running_mean, ru… in batch_norm_cpu_out()
792 // Resize save_mean and save_var in batch_norm_cpu_out()
794 at::native::resize_output(save_var, {self.size(1)}); in batch_norm_cpu_out()
[all …]
H A Dnative_functions.yaml1864 …, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, T…
4053 …, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -…
6601 …, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, bool update, flo…
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cudnn/
H A DBatchNorm.cpp185 Tensor save_mean, save_var; in cudnn_batch_norm() local
192 save_var = at::empty({num_features}, weight_t.options()); in cudnn_batch_norm()
233 save_var.mutable_data_ptr(), in cudnn_batch_norm()
243 save_var = at::empty({0}, weight_t.options()); in cudnn_batch_norm()
261 // save_mean and save_var can be undefined in cudnn_batch_norm()
265 output_t, save_mean, save_var, reserve}; in cudnn_batch_norm()
297 save_var{save_var_t, "save_var", 5}, in cudnn_batch_norm_backward() local
301 checkAllDefined(c, {input, grad_output, weight, save_mean, save_var}); in cudnn_batch_norm_backward()
302 checkAllSameGPU(c, {input, grad_output, weight, save_mean, save_var}); in cudnn_batch_norm_backward()
309 checkAllSameType(c, {weight, save_mean, save_var}); in cudnn_batch_norm_backward()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mps/operations/
H A DNormalization.mm79 Tensor& save_var) {
107 return std::tuple<Tensor&, Tensor&, Tensor&>(output, save_mean, save_var);
188 Update the running stats to be stored into save_mean and save_var,
195 Calculate the save_var directly from the running variance
324 auto saveVarPlaceholder = Placeholder(cachedGraph->saveVarTensor_, save_var);
355 save_var.resize_({0});
357 return std::tuple<Tensor&, Tensor&, Tensor&>(output, save_mean, save_var);
382 auto save_var = at::empty({n_input},
401 save_var);
402 return std::make_tuple(output, save_mean, save_var);
[all …]
/aosp_15_r20/external/pytorch/tools/autograd/
H A Dderivatives.yaml1261 …, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, bool update, flo…
1262 …, grads[2], grad_out, running_mean, running_var, update, eps, save_mean, save_var, grad_input_mask)
1264 save_var: not_implemented("batch_norm_backward save_var")
2681 # HACK: save_mean and save_var are going to be passed in as
2684 …, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, T…
2686 save_var: not_implemented("cudnn_batch_norm_backward save_var")
2688 …ds[2], grad_output, running_mean, running_var, true, epsilon, save_mean, save_var, grad_input_mask)
2738 …, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -…
2740 save_var: not_implemented("miopen_batch_norm_backward save_var")
2741 …ds[2], grad_output, running_mean, running_var, true, epsilon, save_mean, save_var, grad_input_mask)
H A Dgen_autograd_functions.py563 def save_var(var: SavedAttribute, is_output: bool) -> None: function
807 save_var(var, is_output=False)
809 save_var(var, is_output=True)
/aosp_15_r20/external/pytorch/torch/_decomp/
H A Ddecompositions_for_jvp.py305 save_var: Optional[Tensor],
318 save_var,
H A Ddecompositions.py2419 save_var: Optional[Tensor],
2429 save_var,
2445 save_var: Optional[Tensor],
2456 save_var,
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mkldnn/
H A DNormalization.cpp214 auto [output, save_mean, save_var] = in _batch_norm_with_update_mkldnn()
217 return std::tuple<Tensor, Tensor, Tensor, Tensor>(output, save_mean, save_var, reserve); in _batch_norm_with_update_mkldnn()
/aosp_15_r20/external/executorch/exir/tests/
H A Ddynamic_shape_models.py48 # for infernece, the save_mean and save_var should be empty
/aosp_15_r20/external/python/cpython3/
Dconfigure.ac27 dnl - _SAVE_VAR([VAR]) Helper for SAVE_ENV; stores VAR as save_VAR
28 dnl - _RESTORE_VAR([VAR]) Helper for RESTORE_ENV; restores VAR from save_VAR