Home
last modified time | relevance | path

Searched full:save_mean (Results 1 – 21 of 21) sorted by relevance

/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/
H A DNormalization.cu306 void batch_norm_mean_var(const Tensor& self, Tensor& save_mean, Tensor& save_var) { in batch_norm_mean_var() argument
314 save_mean, save_var, self, dummy_epsilon); in batch_norm_mean_var()
319 if ((!save_mean.defined() || save_mean.is_contiguous()) && in batch_norm_mean_var()
324 save_mean, save_var, self, dummy_epsilon); in batch_norm_mean_var()
339 at::native::var_mean_out(save_var, save_mean, self, /*dims=*/reduce_dims, in batch_norm_mean_var()
347 const Tensor& save_mean, const Tensor& save_var, in batch_norm_update_stats() argument
354 .add_input(save_mean) in batch_norm_update_stats()
381 const Tensor& save_mean, const Tensor& save_var, in batch_norm_update_stats_and_invert() argument
389 .add_const_input(save_mean) in batch_norm_update_stats_and_invert()
435 …t, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_in… in batch_norm_cuda_out() argument
[all …]
H A DNormalization.cuh273 GenericPackedTensorAccessor<stat_accscalar_t, 1, RestrictPtrTraits, index_t> save_mean, in batch_norm_collect_statistics_kernel() argument
345 if (save_mean.data() != NULL) { in batch_norm_collect_statistics_kernel()
346 save_mean[plane] = avg; in batch_norm_collect_statistics_kernel()
365 … const GenericPackedTensorAccessor<const stat_accscalar_t, 1, DefaultPtrTraits, index_t> save_mean, in batch_norm_backward_kernel() argument
375 mean = save_mean[plane]; in batch_norm_backward_kernel()
642 auto save_mean = packed_accessor_or_dummy< in batch_norm_backward_cuda_template() local
643 const accscalar_t, 1, DefaultPtrTraits, index_t>(save_mean_, "save_mean"); in batch_norm_backward_cuda_template()
654 save_mean, save_invstd, train, epsilon); in batch_norm_backward_cuda_template()
762 auto save_mean = get_packed_accessor< in batch_norm_gather_stats_cuda_template() local
763 accscalar_t, 1, RestrictPtrTraits, index_t>(save_mean_, "save_mean"); in batch_norm_gather_stats_cuda_template()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DNormalization.cpp137 const Tensor& save_mean /* optional */, const Tensor& save_invstd /* optional */, in batch_norm_cpu_transform_input_template() argument
152 save_mean, save_invstd, running_mean, running_var, train, eps); in batch_norm_cpu_transform_input_template()
154 return std::make_tuple(output, save_mean, save_invstd); in batch_norm_cpu_transform_input_template()
168 auto mean = as_nd(train ? save_mean : running_mean); in batch_norm_cpu_transform_input_template()
196 return std::make_tuple(output, save_mean, save_invstd); in batch_norm_cpu_transform_input_template()
202 double momentum, double eps, Tensor& save_mean, Tensor& save_var_transform) { in batch_norm_cpu_update_stats_template() argument
214 auto save_mean_a = save_mean.accessor<param_t, 1>(); in batch_norm_cpu_update_stats_template()
244 return std::make_tuple(save_mean, save_var_transform); in batch_norm_cpu_update_stats_template()
280 return std::make_tuple(save_mean, save_var_transform); in batch_norm_cpu_update_stats_template()
297 …Tensor save_mean = is_contiguous(input) ? at::empty({n_input}, input.options().dtype(dtype)) : at:… in batch_norm_cpu_update_stats_template() local
[all …]
H A Dnative_functions.yaml1080 …put, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_va…
1864 …tput, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_va…
4053 …tput, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_va…
4296 …bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save…
4321 …bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save…
4335 …bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save…
4365 …put, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_in…
6590 …) running_var, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save…
6601 …nput, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_va…
/aosp_15_r20/external/pytorch/aten/src/ATen/native/miopen/
H A DBatchNorm_miopen.cpp113 Tensor save_mean, save_var; in miopen_batch_norm() local
117 save_mean = at::empty({ num_features }, weight_t.options()); in miopen_batch_norm()
133 save_mean.mutable_data_ptr(), in miopen_batch_norm()
136 save_mean = at::empty({0}, weight_t.options()); in miopen_batch_norm()
153 // save_mean and save_var can be undefined in miopen_batch_norm()
156 return std::tuple<Tensor, Tensor, Tensor>{output_t, save_mean, save_var}; in miopen_batch_norm()
183 save_mean{ save_mean_t, "save_mean", 4 }, in miopen_batch_norm_backward() local
187 checkAllDefined(c, {input, grad_output, weight, save_mean, save_var}); in miopen_batch_norm_backward()
188 checkAllSameGPU(c, {input, grad_output, weight, save_mean, save_var}); in miopen_batch_norm_backward()
195 checkAllSameType(c, {weight, save_mean, save_var}); in miopen_batch_norm_backward()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cudnn/
H A DBatchNorm.cpp185 Tensor save_mean, save_var; in cudnn_batch_norm() local
191 save_mean = at::empty({num_features}, weight_t.options()); in cudnn_batch_norm()
232 save_mean.mutable_data_ptr(), in cudnn_batch_norm()
242 save_mean = at::empty({0}, weight_t.options()); in cudnn_batch_norm()
261 // save_mean and save_var can be undefined in cudnn_batch_norm()
265 output_t, save_mean, save_var, reserve}; in cudnn_batch_norm()
296 weight{weight_t, "weight", 3}, save_mean{save_mean_t, "save_mean", 4}, in cudnn_batch_norm_backward() local
301 checkAllDefined(c, {input, grad_output, weight, save_mean, save_var}); in cudnn_batch_norm_backward()
302 checkAllSameGPU(c, {input, grad_output, weight, save_mean, save_var}); in cudnn_batch_norm_backward()
309 checkAllSameType(c, {weight, save_mean, save_var}); in cudnn_batch_norm_backward()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cpu/
H A Dbatch_norm_kernel.cpp34 const Tensor& save_mean, const Tensor& save_invstd, in batch_norm_cpu_collect_linear_and_constant_terms() argument
40 auto save_mean_a = conditional_accessor_1d<const param_t>(save_mean); in batch_norm_cpu_collect_linear_and_constant_terms()
76 const Tensor& weight, const Tensor& bias, const Tensor& save_mean, const Tensor& save_invstd, in batch_norm_cpu_contiguous_impl() argument
91 save_mean, save_invstd, running_mean, running_var, train, eps); in batch_norm_cpu_contiguous_impl()
128 const Tensor& weight, const Tensor& bias, const Tensor& save_mean, const Tensor& save_invstd, in batch_norm_cpu_channels_last_impl() argument
143 save_mean, save_invstd, running_mean, running_var, train, eps); in batch_norm_cpu_channels_last_impl()
407 …const Tensor& running_mean, const Tensor& running_var, const Tensor& save_mean, const Tensor& save… in batch_norm_cpu_backward_contiguous_impl() argument
430 auto save_mean_a = conditional_accessor_1d<const scalar_t>(save_mean); in batch_norm_cpu_backward_contiguous_impl()
530 …const Tensor& running_mean, const Tensor& running_var, const Tensor& save_mean, const Tensor& save… in batch_norm_cpu_backward_channels_last_impl() argument
547 const scalar_t* save_mean_data = conditional_data_ptr<const scalar_t>(save_mean); in batch_norm_cpu_backward_channels_last_impl()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mps/operations/
H A DNormalization.mm78 Tensor& save_mean,
107 return std::tuple<Tensor&, Tensor&, Tensor&>(output, save_mean, save_var);
125 const int64_t N = self.numel() / save_mean.numel();
188 Update the running stats to be stored into save_mean and save_var,
323 auto saveMeanPlaceholder = Placeholder(cachedGraph->saveMeanTensor_, save_mean);
354 save_mean.resize_({0});
357 return std::tuple<Tensor&, Tensor&, Tensor&>(output, save_mean, save_var);
374 auto save_mean = at::empty({n_input},
400 save_mean,
402 return std::make_tuple(output, save_mean, save_var);
[all …]
/aosp_15_r20/external/pytorch/torch/_decomp/
H A Ddecompositions_for_jvp.py222 save_mean: Optional[Tensor],
234 mean = save_mean
238 save_mean is not None and save_invstd is not None
239 ), "when train=True, save_mean and save_invstd are required"
304 save_mean: Optional[Tensor],
317 save_mean,
H A Ddecompositions.py1817 save_mean = torch.squeeze(mean, reduction_dims)
1820 new_running_mean = momentum * save_mean + (1 - momentum) * running_mean
1843 save_mean = running_mean
1846 save_mean = input.new_zeros((0,))
1863 save_mean = save_mean.to(dtype=input.dtype)
1867 save_mean,
1875 @out_wrapper("out", "save_mean", "save_invstd")
1886 output, save_mean, save_rstd, _, _ = native_batch_norm_helper(
1889 return output, save_mean, save_rstd
1984 output, save_mean, save_rstd, _, _ = native_batch_norm_helper(
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mkldnn/
H A DNormalization.cpp214 auto [output, save_mean, save_var] = in _batch_norm_with_update_mkldnn()
217 return std::tuple<Tensor, Tensor, Tensor, Tensor>(output, save_mean, save_var, reserve); in _batch_norm_with_update_mkldnn()
256 const Tensor& save_mean = c10::value_or_else(save_mean_opt, [] {return Tensor();}); in mkldnn_batch_norm_backward() local
263 ideep::tensor& m = itensor_from_mkldnn(save_mean); in mkldnn_batch_norm_backward()
/aosp_15_r20/external/pytorch/tools/autograd/
H A Dderivatives.yaml1219 …put, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_in…
1220 …], grads[1], grads[2], grad_out, running_mean, running_var, train, eps, save_mean, save_invstd, gr…
1221 save_mean: not_implemented("native_batch_norm_backward save_mean")
1261 …nput, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_va…
1262 …, grads[1], grads[2], grad_out, running_mean, running_var, update, eps, save_mean, save_var, grad_…
1263 save_mean: not_implemented("batch_norm_backward save_mean")
2681 # HACK: save_mean and save_var are going to be passed in as
2684 …tput, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_va…
2685 save_mean: not_implemented("cudnn_batch_norm_backward save_mean")
2688 …ds[1], grads[2], grad_output, running_mean, running_var, true, epsilon, save_mean, save_var, grad_…
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/functorch/
H A DBatchRulesNorm.cpp218 const Tensor& save_mean = *save_mean_opt; in batch_norm_backward_plumbing() local
220 TORCH_INTERNAL_ASSERT(save_mean.defined()); in batch_norm_backward_plumbing()
246 auto [save_mean_value, save_mean_bdim] = unwrapTensorAtLevel(save_mean, cur_level); in batch_norm_backward_plumbing()
260 auto mean = training ? save_mean : running_mean; in batch_norm_backward_plumbing()
/aosp_15_r20/external/executorch/exir/tests/
H A Ddynamic_shape_models.py48 # for infernece, the save_mean and save_var should be empty
/aosp_15_r20/external/pytorch/torch/csrc/autograd/
H A DFunctionsManual.h718 const std::optional<Tensor>& save_mean,
812 const Tensor& save_mean,
H A DFunctionsManual.cpp4661 const std::optional<Tensor>& save_mean, in batchnorm_double_backward() argument
4686 // for half inputs, save_mean, save_invstd are float (ideally, we would cast in batchnorm_double_backward()
4689 training ? toNonOptTensor(save_mean).to(input.scalar_type()) in batchnorm_double_backward()
4812 auto save_mean = save_mean_t.reshape({M, 1}); in layer_norm_double_backward() local
4836 // for half inputs, save_mean, save_invstd are float in layer_norm_double_backward()
4838 auto mu = save_mean.to(input.scalar_type()); in layer_norm_double_backward()
/aosp_15_r20/external/pytorch/torch/csrc/utils/
H A Dschema_info.cpp288 …bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save… in getTrainingOps()
/aosp_15_r20/external/pytorch/torch/csrc/lazy/core/
H A Dshape_inference.h64 …optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::op…
H A Dshape_inference.cpp576 const ::std::optional<at::Tensor>& save_mean, in compute_shape_native_batch_norm_backward()
/aosp_15_r20/external/pytorch/test/
H A Dtest_meta.py1496save_mean = torch.zeros((sample.input.shape[1], ), device=device, dtype=dtype) if train else None
1500 save_mean, save_invstd, train, sample.kwargs.get("eps", 1e-5)]
/aosp_15_r20/external/pytorch/test/cpp/jit/
H A Dtest_misc.cpp276 // weight, Tensor running_mean, Tensor running_var, Tensor save_mean, Tensor in TEST()