/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | GridSampler.h | 39 // `grad_in`. 43 bool align_corners, scalar_t *grad_in) { in grid_sampler_unnormalize_set_grad() argument 46 *grad_in = static_cast<scalar_t>(size - 1) / 2; in grid_sampler_unnormalize_set_grad() 50 *grad_in = static_cast<scalar_t>(size) / 2; in grid_sampler_unnormalize_set_grad() 62 // it also returns the `d output / d input` via pointer argument `grad_in`. 66 scalar_t *grad_in) { in clip_coordinates_set_grad() argument 70 *grad_in = static_cast<scalar_t>(0); in clip_coordinates_set_grad() 75 *grad_in = static_cast<scalar_t>(0); in clip_coordinates_set_grad() 78 *grad_in = static_cast<scalar_t>(1); in clip_coordinates_set_grad() 108 // `grad_in`. [all …]
|
H A D | UnfoldBackward.h | 17 Tensor& grad_in, 29 // grad_in does not mean that it is a gradient wrt to input, 30 // grad_in/grad_out is just an input/output of unfold_backward kernel. 34 const Tensor& grad_in, in _make_unfold_backward_iter_over_grad_out() argument 43 auto grad_in_dim_size = ensure_nonempty_size(grad_in, dim); in _make_unfold_backward_iter_over_grad_out() 60 /* prepare grad_in for TensorIterator { */ in _make_unfold_backward_iter_over_grad_out() 61 auto grad_in_strides = ensure_nonempty_vec(grad_in.strides().vec()); in _make_unfold_backward_iter_over_grad_out() 62 auto grad_in_sizes = ensure_nonempty_vec(grad_in.sizes().vec()); in _make_unfold_backward_iter_over_grad_out() 73 auto grad_in_restrided = grad_in.squeeze(-1).as_strided( in _make_unfold_backward_iter_over_grad_out() 83 0, iter_dim_size, grad_in.options().dtype(at::kLong) in _make_unfold_backward_iter_over_grad_out()
|
H A D | ReduceOps.cpp | 2301 auto grad_in = at::zeros_symint(sizes, grad_out.options()); in value_selecting_reduction_backward_symint() local 2303 return grad_in.scatter(dim, indices_, grad_out); in value_selecting_reduction_backward_symint() 2305 return grad_in.scatter_(dim, indices_, grad_out); in value_selecting_reduction_backward_symint()
|
H A D | native_functions.yaml | 10114 - func: unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ |
H A D | GridSampler.cuh | 34 // `grad_in`. 39 bool align_corners, scalar_t *grad_in) { in grid_sampler_unnormalize_set_grad() argument 42 *grad_in = static_cast<scalar_t>(size - 1) / 2; in grid_sampler_unnormalize_set_grad() 46 *grad_in = static_cast<scalar_t>(size) / 2; in grid_sampler_unnormalize_set_grad() 59 // it also returns the `d output / d input` via pointer argument `grad_in`. 63 scalar_t clip_coordinates_set_grad(scalar_t in, int clip_limit, scalar_t *grad_in) { in clip_coordinates_set_grad() argument 67 *grad_in = static_cast<scalar_t>(0); in clip_coordinates_set_grad() 72 *grad_in = static_cast<scalar_t>(0); in clip_coordinates_set_grad() 75 *grad_in = static_cast<scalar_t>(1); in clip_coordinates_set_grad() 105 // `grad_in`. [all …]
|
H A D | UnfoldBackwardKernel.cu | 12 // grad_in does not mean that it is a gradient wrt to input, 13 // grad_in/grad_out is just an input/output of unfold_backward kernel. 123 const Tensor& grad_in, in unfold_backward_cuda_kernel() argument 130 auto last_dim = maybe_wrap_dim(-1, grad_in.dim()); in unfold_backward_cuda_kernel() 132 auto grad_in_dim_stride = ensure_nonempty_stride(grad_in, dim); in unfold_backward_cuda_kernel() 133 auto grad_in_last_dim_stride = ensure_nonempty_stride(grad_in, last_dim); in unfold_backward_cuda_kernel() 134 auto grad_in_dim_size = ensure_nonempty_size(grad_in, dim); in unfold_backward_cuda_kernel() 139 grad_out, grad_in, dim, size, step); in unfold_backward_cuda_kernel()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cpu/ |
H A D | UnfoldBackwardKernel.cpp | 17 // grad_in does not mean that it is a gradient wrt to input, 18 // grad_in/grad_out is just an input/output of unfold_backward kernel. 27 // unfold_backward receives grad_in and returns grad_out such that 28 // grad_in.shape == out.shape, 36 // In this case the iteration takes over grad_in and performs the following copy: 37 // grad_out[..., i_out_dim,...] = grad_in[..., i_in_dim,..., i_in_last_dim], 43 // grad_in[...,i_in_dim,...,i_in_last_dim], where 51 // Simply put, given i_out_dim, we find which folds of grad_in 53 // and then the corresponding value of grad_in[...,i_in_dim,...,i_in_last_dim] 113 const Tensor& grad_in, in unfold_backward_cpu_kernel() argument [all …]
|
H A D | PaddingKernel.cpp | 115 static inline void add_stub(scalar_t* grad_in, const scalar_t* grad_out, int64_t size) { in add_stub() argument 119 Vec grad_vec = Vec::loadu(grad_in + d) + Vec::loadu(grad_out + d); in add_stub() 120 grad_vec.store(grad_in + d); in add_stub() 126 grad_in[d] += grad_out[d]; in add_stub()
|
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/ |
H A D | maxpooling_op.cc | 1030 const Tensor& grad_in, const Tensor& argmax, in launch() 1035 auto shard = [&grad_in, &argmax, &grad_out, include_batch_in_index]( in launch() 1041 const int64_t input_size_per_batch = grad_in.NumElements() / batch_size; in launch() 1046 auto grad_in_flat = grad_in.flat<T>(); in launch() 1117 const Tensor& grad_in = context->input(1); in Compute() local 1130 OP_REQUIRES(context, grad_in.shape() == params.forward_output_shape(), in Compute() 1133 ", but got ", grad_in.shape())); in Compute() 1148 context, params, grad_in, argmax, grad_out, include_batch_in_index_); in Compute() 1185 const Tensor& grad_in = context->input(1); in Compute() local 1199 context, grad_in.shape() == tensor_in.shape(), in Compute() [all …]
|
/aosp_15_r20/external/tensorflow/tensorflow/python/kernel_tests/nn_ops/ |
H A D | pooling_ops_test.py | 936 grad_in = constant_op.constant(tensor_output, shape=output_shape) 937 out_op = gen_nn_ops.max_pool_grad_with_argmax(t, grad_in, argmax, ksize, 945 grad_in = constant_op.constant(tensor_output, shape=output_shape) 946 out_op = gen_nn_ops.max_pool_grad(t, orig_out, grad_in, ksize, strides, 968 grad_in = constant_op.constant(tensor_input, shape=input_shape) 970 t, grad_in, argmax, ksize, strides, padding) 977 grad_in = constant_op.constant(tensor_input, shape=input_shape) 978 out_op = gen_nn_ops.max_pool_grad_grad(t, orig_out, grad_in, ksize,
|
/aosp_15_r20/external/pytorch/test/ |
H A D | test_fake_tensor.py | 877 grad_in = torch.ops.aten._adaptive_avg_pool2d_backward(grad_out, inp) 879 torch._prims_common.suggest_memory_format(grad_in)
|
H A D | test_nn.py | 395 def hook(self, module, grad_out, grad_in): argument 9854 grad_in = in_t.grad 9855 self.assertTrue(grad_in.is_contiguous(memory_format=memory_format)) 9861 self.assertEqual(in_t.grad, grad_in)
|
H A D | test_autograd.py | 1395 def acc_grad_node_post_hook(grad_in, grad_out): argument
|
/aosp_15_r20/external/pytorch/test/nn/ |
H A D | test_module_hooks.py | 1336 def bw_hook(m, grad_in, grad_output): argument
|
H A D | test_convolution.py | 505 grad_in = torch.ones(res.shape, dtype=torch.float32) 506 res.backward(grad_in)
|
/aosp_15_r20/external/pytorch/tools/autograd/ |
H A D | derivatives.yaml | 1811 - name: unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor 1812 grad_in: grad.unfold(dim, size, step)
|