Home
last modified time | relevance | path

Searched full:grad_output (Results 1 – 25 of 273) sorted by relevance

1234567891011

/aosp_15_r20/external/pytorch/torch/csrc/jit/runtime/
H A Dsymbolic_script.cpp55 def backward(grad_output):
56 return grad_output.expand(self_size).to(self_scalar_type) / self_numel, None
67 def backward(grad_output):
68 …grad_self = AD_sum_backward(grad_output, self_size, dim, keepdim).to(self_scalar_type) / AD_safe_s…
78 def backward(grad_output):
79 grad_self = AD_logsumexp_backward(grad_output, self, result, dim, keepdim)
143 def backward(grad_output):
145 grad_self = AD_var_backward_0(grad_output / (std_out * 2), self, correction)
155 def backward(grad_output):
157 … grad_self = AD_var_backward_2(grad_output / (std_out * 2), self, dim, correction, keepdim)
[all …]
/aosp_15_r20/external/pytorch/torch/nn/
H A Dgrad.py11 grad_output, argument
25 grad_output : output gradient tensor (minibatch x out_channels x oW)
36 >>> grad_output = torch.randn(output.shape)
37 >>> grad_input = torch.autograd.grad(output, input, grad_output)
38 >>> F.grad.conv1d_input(input.shape, weight, grad_output)
41 input = grad_output.new_empty(1).expand(input_size)
44 grad_output,
61 grad_output, argument
72 grad_output : output gradient tensor (minibatch x out_channels x oW)
83 >>> grad_output = torch.randn(output.shape)
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/miopen/
H A DConv_miopen.cpp41 IntArrayRef input_size, const at::Tensor& grad_output, const at::Tensor& weight, in miopen_convolution_backward_input() argument
48 IntArrayRef weight_size, const at::Tensor& grad_output, const at::Tensor& input, in miopen_convolution_backward_weight() argument
55 const at::Tensor& grad_output) { in miopen_convolution_backward_bias() argument
60 const at::Tensor& input, const at::Tensor& grad_output, const at::Tensor& weight, in miopen_convolution_backward() argument
74 const at::Tensor& grad_output, const at::Tensor& weight, in miopen_convolution_transpose_backward_input() argument
81 IntArrayRef weight_size, const at::Tensor& grad_output, const at::Tensor& input, in miopen_convolution_transpose_backward_weight() argument
88 const at::Tensor& input, const at::Tensor& grad_output, const at::Tensor& weight, in miopen_convolution_transpose_backward() argument
102 IntArrayRef input_size, const at::Tensor& grad_output, const at::Tensor& weight, in miopen_depthwise_convolution_backward_input() argument
109 IntArrayRef weight_size, const at::Tensor& grad_output, const at::Tensor& input, in miopen_depthwise_convolution_backward_weight() argument
116 const at::Tensor& input, const at::Tensor& grad_output, const at::Tensor& weight, in miopen_depthwise_convolution_backward() argument
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mps/operations/
H A DActivation.mm177 (const Tensor& grad_output,
195 string key = "leaky_relu_backward" + getTensorsStringKey({self, grad_output}) + ":" +
199 MPSGraphTensor* gradOutputTensor = mpsGraphRankedPlaceHolder(mpsGraph, grad_output);
222 Placeholder gradOutputPlaceholder = Placeholder(cachedGraph->gradOutputTensor_, grad_output);
276 (const Tensor& grad_output, const Tensor& output, int64_t dim, ScalarType input_dtype, const Tensor…
287 …string key = "log_softmax_backward_mps_out:" + getMPSTypeString(grad_output) + ":" + std::to_strin…
289 …GraphTensor* gradOutputTensor = mpsGraphUnrankedPlaceHolder(mpsGraph, getMPSDataType(grad_output));
306 Placeholder gradPlaceholder = Placeholder(cachedGraph->gradOutputTensor_, grad_output);
372 Tensor& log_sigmoid_backward_mps_out(const Tensor& grad_output,
394 string key = "log_sigmoid_backward_out:" + getTensorsStringKey({self, grad_output});
[all …]
H A DLinear.mm129 static Tensor _mps_linear_backward_input(IntArrayRef input_size, const Tensor& grad_output, const T…
130 TORCH_CHECK(grad_output.is_mps(), "mps_linear_backward: grad_output needs to be mps layout");
135 TORCH_CHECK(supportedFloatingOrComplexType(grad_output),
148 …input_size, grad_output.scalar_type(), std::nullopt, kMPS, std::nullopt, grad_output.suggest_memor…
150 if (grad_output.numel() == 0) {
157 string key = "mps_linear_backward_input" + getTensorsStringKey({grad_output, weight_reshaped});
160 newCachedGraph->gradOutputTensor_ = mpsGraphRankedPlaceHolder(mpsGraph, grad_output);
164 bool needReshape = grad_output.dim() > 4;
180 Placeholder gradOutputPlaceholder = Placeholder(cachedGraph->gradOutputTensor_, grad_output);
190 static std::tuple<Tensor, Tensor> _mps_linear_backward_weights(const Tensor& grad_output,
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cpu/
H A DUpSampleMoreKernel.cpp99 auto grad_output = grad_output_.contiguous(); in cpu_upsample_nearest_backward() local
102 auto grad_output_data = grad_output.const_data_ptr<scalar_t>(); in cpu_upsample_nearest_backward()
105 auto output_sizes = grad_output.sizes().vec(); in cpu_upsample_nearest_backward()
236 auto grad_output = grad_output_.contiguous(channels_last_memory_format); in cpu_upsample_nearest_backward_channels_last() local
239 auto grad_output_data = grad_output.const_data_ptr<scalar_t>(); in cpu_upsample_nearest_backward_channels_last()
243 auto output_sizes = grad_output.sizes().vec(); in cpu_upsample_nearest_backward_channels_last()
339 const Tensor& grad_output, in upsample_nearest1d_backward_kernel_impl() argument
341 …AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, grad_output.scalar_type(), "upsample_nearest1d_b… in upsample_nearest1d_backward_kernel_impl()
342 …cpu_upsample_nearest_backward<scalar_t, scale_t, nearest_idx>(grad_input, grad_output, {scales_w}); in upsample_nearest1d_backward_kernel_impl()
348 const Tensor& grad_output, in _upsample_nearest_exact1d_backward_kernel_impl() argument
[all …]
H A DPaddingKernel.cpp317 auto grad_output = grad_output_.contiguous(); in cpu_padding_backward() local
320 auto grad_output_data = grad_output.const_data_ptr<scalar_t>(); in cpu_padding_backward()
406 auto grad_output = grad_output_.contiguous(memory_format); in cpu_padding_backward_channels_last() local
409 auto grad_output_data = grad_output.const_data_ptr<scalar_t>(); in cpu_padding_backward_channels_last()
497 const Tensor& grad_input, const Tensor& grad_output, IntArrayRef padding) { in reflection_pad1d_backward_kernel_impl() argument
498 PaddingParams param{grad_input, grad_output, padding}; in reflection_pad1d_backward_kernel_impl()
499 AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kBFloat16, grad_output.scalar_type(), in reflection_pad1d_backward_kernel_impl()
501 cpu_padding_backward<scalar_t, ReflectionPad>(grad_input, grad_output, param); in reflection_pad1d_backward_kernel_impl()
536 const Tensor& grad_input, const Tensor& grad_output, IntArrayRef padding) { in reflection_pad2d_backward_kernel_impl() argument
537 PaddingParams param{grad_input, grad_output, padding}; in reflection_pad2d_backward_kernel_impl()
[all …]
H A DAdaptiveAvgPoolKernel.cpp260 auto grad_output = grad_output_.contiguous(); in cpu_adaptive_avg_pool2d_backward() local
263 auto grad_output_data = grad_output.const_data_ptr<scalar_t>(); in cpu_adaptive_avg_pool2d_backward()
266 int64_t ndim = grad_output.ndimension(); in cpu_adaptive_avg_pool2d_backward()
268 int64_t channels = ndim == 3 ? grad_output.size(0) : grad_output.size(0) * grad_output.size(1); in cpu_adaptive_avg_pool2d_backward()
271 int64_t output_height = grad_output.size(-2); in cpu_adaptive_avg_pool2d_backward()
272 int64_t output_width = grad_output.size(-1); in cpu_adaptive_avg_pool2d_backward()
312 auto grad_output = grad_output_.contiguous(memory_format); in cpu_adaptive_avg_pool2d_backward_channels_last() local
315 auto grad_output_data = grad_output.const_data_ptr<scalar_t>(); in cpu_adaptive_avg_pool2d_backward_channels_last()
321 int64_t output_height = grad_output.size(2); in cpu_adaptive_avg_pool2d_backward_channels_last()
322 int64_t output_width = grad_output.size(3); in cpu_adaptive_avg_pool2d_backward_channels_last()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DConvolutionMM3d.cpp112 const Tensor& grad_output, in slow_conv3d_shape_check() argument
235 if (grad_output.defined()) { in slow_conv3d_shape_check()
238 check_dim_size(grad_output, ndim, dim_planes, n_output_plane); in slow_conv3d_shape_check()
242 check_dim_size(grad_output, ndim, dim_planes, n_output_plane); in slow_conv3d_shape_check()
244 check_dim_size(grad_output, ndim, dim_depth, output_depth); in slow_conv3d_shape_check()
245 check_dim_size(grad_output, ndim, dim_height, output_height); in slow_conv3d_shape_check()
246 check_dim_size(grad_output, ndim, dim_width, output_width); in slow_conv3d_shape_check()
314 TensorAccessor<const scalar_t, 4> grad_output, in slow_conv3d_backward_update_grad_input_frame() argument
327 // Compute fgrad_input = weight.T * grad_output.reshape({grad_output.shape(0), -1}) in slow_conv3d_backward_update_grad_input_frame()
330 const int64_t m = grad_output.size(1) * grad_output.size(2) * grad_output.size(3); in slow_conv3d_backward_update_grad_input_frame()
[all …]
H A DConvolutionMM2d.cpp98 const Tensor& grad_output, in slow_conv2d_shape_check() argument
193 if (grad_output.defined()) { in slow_conv2d_shape_check()
196 check_dim_size(grad_output, ndim, dim_planes, n_output_plane); in slow_conv2d_shape_check()
200 check_dim_size(grad_output, ndim, dim_planes, n_output_plane); in slow_conv2d_shape_check()
202 check_dim_size(grad_output, ndim, dim_height, output_height); in slow_conv2d_shape_check()
203 check_dim_size(grad_output, ndim, dim_width, output_width); in slow_conv2d_shape_check()
288 TensorAccessor<const scalar_t, 3> grad_output, in slow_conv2d_backward_update_grad_input_frame() argument
298 // Compute fgrad_input = weight.T * grad_output.reshape({grad_output.shape(0), -1}) in slow_conv2d_backward_update_grad_input_frame()
303 const int64_t n = grad_output.size(1) * grad_output.size(2); in slow_conv2d_backward_update_grad_input_frame()
316 grad_output.data(), ldb, in slow_conv2d_backward_update_grad_input_frame()
[all …]
H A DReflectionPad.cpp74 TORCH_META_FUNC(reflection_pad1d_backward)(const Tensor& grad_output, in TORCH_META_FUNC()
100 TORCH_CHECK(output_w == grad_output.size(dim_w), "grad_output width unexpected." in TORCH_META_FUNC()
101 " Expected: ", output_w, ", Got: ", grad_output.size(dim_w)); in TORCH_META_FUNC()
165 const Tensor& grad_output, in TORCH_META_FUNC()
171 TORCH_CHECK(grad_output.dim() == input.dim()); in TORCH_META_FUNC()
197 TORCH_CHECK(output_w == grad_output.size(dim_w), "grad_output width unexpected." in TORCH_META_FUNC()
198 " Expected: ", output_w, ", Got: ", grad_output.size(dim_w)); in TORCH_META_FUNC()
199 TORCH_CHECK(output_h == grad_output.size(dim_h), "grad_output height unexpected." in TORCH_META_FUNC()
200 " Expected: ", output_h, ", Got: ", grad_output.size(dim_h)); in TORCH_META_FUNC()
201 TORCH_CHECK(output_d == grad_output.size(dim_d), "grad_output depth unexpected." in TORCH_META_FUNC()
[all …]
H A DUpSampleNearest2d.cpp53 const Tensor& grad_output, in TORCH_META_FUNC()
62 grad_output.dim() == 4, in TORCH_META_FUNC()
63 "Expected grad_output to be a tensor of dimension 4 but got: dimension ", grad_output.dim()); in TORCH_META_FUNC()
67 grad_output.size(i) == full_output_size[i], in TORCH_META_FUNC()
68 "Expected grad_output to have the same shape as output;", in TORCH_META_FUNC()
70 " but got grad_output.size(", i, ") = ", grad_output.size(i)); in TORCH_META_FUNC()
73 …set_output_raw_strided(0, input_size, {}, grad_output.options().memory_format(grad_output.suggest_… in TORCH_META_FUNC()
77 const Tensor& grad_output, in TORCH_META_FUNC()
86 grad_output.dim() == 4, in TORCH_META_FUNC()
87 "Expected grad_output to be a tensor of dimension 4 but got: dimension ", grad_output.dim()); in TORCH_META_FUNC()
[all …]
H A DUpSampleBilinear2d.cpp41 const Tensor& grad_output, in TORCH_META_FUNC()
51 grad_output.dim() == 4, in TORCH_META_FUNC()
52 "Expected grad_output to be a tensor of dimension 4 but got: dimension ", grad_output.dim()); in TORCH_META_FUNC()
56 grad_output.size(i) == full_output_size[i], in TORCH_META_FUNC()
57 "Expected grad_output to have the same shape as output;", in TORCH_META_FUNC()
59 " but got grad_output.size(", i, ") = ", grad_output.size(i)); in TORCH_META_FUNC()
62 …set_output_raw_strided(0, input_size, {}, grad_output.options().memory_format(grad_output.suggest_… in TORCH_META_FUNC()
80 const Tensor& grad_output, in TORCH_META_FUNC()
90 grad_output.dim() == 4, in TORCH_META_FUNC()
91 "Expected grad_output to be a tensor of dimension 4 but got: dimension ", grad_output.dim()); in TORCH_META_FUNC()
[all …]
H A DUpSampleBicubic2d.cpp40 const Tensor& grad_output, in TORCH_META_FUNC()
50 grad_output.dim() == 4, in TORCH_META_FUNC()
51 "Expected grad_output to be a tensor of dimension 4 but got: dimension ", grad_output.dim()); in TORCH_META_FUNC()
55 grad_output.size(i) == full_output_size[i], in TORCH_META_FUNC()
56 "Expected grad_output to have the same shape as output;", in TORCH_META_FUNC()
58 " but got grad_output.size(", i, ") = ", grad_output.size(i)); in TORCH_META_FUNC()
61 set_output_raw_strided(0, input_size, {}, grad_output.options()); in TORCH_META_FUNC()
79 const Tensor& grad_output, in TORCH_META_FUNC()
89 grad_output.dim() == 4, in TORCH_META_FUNC()
90 "Expected grad_output to be a tensor of dimension 4 but got: dimension ", grad_output.dim()); in TORCH_META_FUNC()
[all …]
H A DUpSampleNearest3d.cpp60 const Tensor& grad_output, in TORCH_META_FUNC()
70 grad_output.dim() == 5, in TORCH_META_FUNC()
71 "Expected grad_output to be a tensor of dimension 5 but got: dimension ", grad_output.dim()); in TORCH_META_FUNC()
75 grad_output.size(i) == full_output_size[i], in TORCH_META_FUNC()
76 "Expected grad_output to have the same shape as output;", in TORCH_META_FUNC()
78 " but got grad_output.size(", i, ") = ", grad_output.size(i)); in TORCH_META_FUNC()
81 set_output_raw_strided(0, input_size, {}, grad_output.options()); in TORCH_META_FUNC()
85 const Tensor& grad_output, in TORCH_META_FUNC()
95 grad_output.dim() == 5, in TORCH_META_FUNC()
96 "Expected grad_output to be a tensor of dimension 5 but got: dimension ", grad_output.dim()); in TORCH_META_FUNC()
[all …]
H A DActivation.cpp120 const Tensor& grad_output, in TORCH_META_FUNC()
133 build_borrowing_binary_op(maybe_get_output(), grad_output, self_or_result); in TORCH_META_FUNC()
141 const Tensor& grad_output, const Tensor& input in TORCH_META_FUNC()
143 build_borrowing_binary_op(maybe_get_output(), grad_output, input); in TORCH_META_FUNC()
157 const Tensor& grad_output, in TORCH_META_FUNC()
162 build_borrowing_binary_op(maybe_get_output(), grad_output, self); in TORCH_META_FUNC()
178 const Tensor& grad_output, in TORCH_META_FUNC()
190 build_borrowing_binary_op(maybe_get_output(), self_or_result, grad_output); in TORCH_META_FUNC()
197 TORCH_META_FUNC(hardsigmoid_backward) (const Tensor& grad_output, const Tensor& self) { in TORCH_META_FUNC()
198 build_borrowing_binary_op(maybe_get_output(), grad_output, self); in TORCH_META_FUNC()
[all …]
/aosp_15_r20/external/pytorch/tools/autograd/
H A Dderivatives.yaml63 # - 'grad', the gradient of the output (often spelled grad_output
139 # like 'grad_output', and (2) the gradient to multiply with is always
567 - name: diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2)…
568 grad_output: grad.diagonal(offset, dim1, dim2)
614 - name: native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor
615 grad_output: "native_dropout_double_backward(grad, grad_output, mask, scale)"
790 - name: hardswish_backward(Tensor grad_output, Tensor self) -> Tensor
791 grad_output: hardswish_backward(grad, self)
792 …self: at::where(at::logical_and(-3.0 < self, self < 3.0), grad * grad_output / 3.0, at::zeros({}, …
1076 - name: masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor
[all …]
/aosp_15_r20/external/pytorch/torch/testing/_internal/
H A Dautograd_function_db.py42 def backward(ctx, grad_output, grad_saved): argument
44 return NumpyMul.apply(grad_output, dinput) + 6 * NumpyMul.apply(grad_saved, input)
70 def backward(ctx, grad_output, grad_saved): argument
72 result = grad_output * dinput + 6 * dinput
100 def backward(ctx, grad_output, grad_saved): argument
116 def backward(ctx, grad_output): argument
120 gx = NumpyMul.apply(grad_output, y)
123 gy = NumpyMul.apply(grad_output, x)
162 def backward(ctx, grad_output): argument
166 gx = MulGenVmap.apply(grad_output, y)
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/
H A DNaiveConvolutionTranspose2d.cu31 const Tensor& grad_output, in slow_conv_transpose2d_shape_check() argument
135 if (grad_output.defined()) { in slow_conv_transpose2d_shape_check()
138 check_dim_size(grad_output, ndim, dimf, n_output_plane); in slow_conv_transpose2d_shape_check()
141 check_dim_size(grad_output, ndim, dimf, n_output_plane); in slow_conv_transpose2d_shape_check()
143 check_dim_size(grad_output, ndim, dimh, output_height); in slow_conv_transpose2d_shape_check()
144 check_dim_size(grad_output, ndim, dimw, output_width); in slow_conv_transpose2d_shape_check()
339 grad_output_arg{grad_output_, "grad_output", 2}, in slow_conv_transpose2d_backward_out_cuda_template()
382 Tensor grad_output = grad_output_.contiguous(); in slow_conv_transpose2d_backward_out_cuda_template() local
390 grad_output.resize_( in slow_conv_transpose2d_backward_out_cuda_template()
391 {1, grad_output.size(0), grad_output.size(1), grad_output.size(2)}); in slow_conv_transpose2d_backward_out_cuda_template()
[all …]
H A DNaiveConvolutionTranspose3d.cu30 const Tensor& grad_output, in slow_conv_transpose3d_shape_check() argument
159 if (grad_output.defined()) { in slow_conv_transpose3d_shape_check()
162 check_dim_size(grad_output, ndim, dimf, n_output_plane); in slow_conv_transpose3d_shape_check()
165 check_dim_size(grad_output, ndim, dimf, n_output_plane); in slow_conv_transpose3d_shape_check()
167 check_dim_size(grad_output, ndim, dimd, output_depth); in slow_conv_transpose3d_shape_check()
168 check_dim_size(grad_output, ndim, dimh, output_height); in slow_conv_transpose3d_shape_check()
169 check_dim_size(grad_output, ndim, dimw, output_width); in slow_conv_transpose3d_shape_check()
449 grad_output_arg{grad_output_, "grad_output", 2}, in slow_conv_transpose3d_backward_out_cuda_template()
483 Tensor grad_output = grad_output_.contiguous(); in slow_conv_transpose3d_backward_out_cuda_template() local
492 grad_output.resize_({1, in slow_conv_transpose3d_backward_out_cuda_template()
[all …]
H A DDepthwiseConv3d.cu102 const PackedTensorAccessor32<const scalar_t, 5> grad_output, in conv_depthwise3d_cuda_backward_input_kernel() argument
111 const int oC = grad_output.size(1); in conv_depthwise3d_cuda_backward_input_kernel()
112 const int oT = grad_output.size(2); in conv_depthwise3d_cuda_backward_input_kernel()
113 const int oH = grad_output.size(3); in conv_depthwise3d_cuda_backward_input_kernel()
114 const int oW = grad_output.size(4); in conv_depthwise3d_cuda_backward_input_kernel()
145 const scalar_t* gout_ptr = grad_output[batch][k_chn].data(); in conv_depthwise3d_cuda_backward_input_kernel()
183 const PackedTensorAccessor32<const scalar_t, 5> grad_output, in conv_depthwise3d_cuda_backward_weight_kernel() argument
203 const int oT = grad_output.size(2); in conv_depthwise3d_cuda_backward_weight_kernel()
204 const int oH = grad_output.size(3); in conv_depthwise3d_cuda_backward_weight_kernel()
205 const int oW = grad_output.size(4); in conv_depthwise3d_cuda_backward_weight_kernel()
[all …]
H A DConvolutionMM2d.cu25 const Tensor& input, const Tensor& grad_output, in slow_conv2d_shape_check() argument
89 if (grad_output.defined()) { in slow_conv2d_shape_check()
90 const auto gO_sizes = grad_output.sizes(); in slow_conv2d_shape_check()
92 "Expected grad_output to have ", ndim, in slow_conv2d_shape_check()
104 "Expected grad_output dim ", dimf, " to have size ", in slow_conv2d_shape_check()
108 "Expected grad_output dim ", dimh, " to have size ", in slow_conv2d_shape_check()
111 "Expected grad_output dim ", dimw, " to have size ", in slow_conv2d_shape_check()
218 const Tensor &grad_output, in slow_conv2d_backward() argument
226 slow_conv2d_shape_check(input, grad_output, weight, {}, in slow_conv2d_backward()
234 TORCH_INTERNAL_ASSERT(grad_output.is_contiguous()); in slow_conv2d_backward()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mkldnn/
H A DPooling.cpp113 const Tensor& grad_output, in mkldnn_max_pool2d_backward() argument
125 const Tensor& grad_output, in mkldnn_max_pool3d_backward() argument
136 Tensor& mkldnn_avg_pool2d_backward_out(const Tensor & grad_output, in mkldnn_avg_pool2d_backward_out() argument
149 const Tensor& grad_output, in mkldnn_avg_pool2d_backward() argument
160 Tensor& mkldnn_avg_pool3d_backward_out(const Tensor & grad_output, in mkldnn_avg_pool3d_backward_out() argument
173 const Tensor& grad_output, in mkldnn_avg_pool3d_backward() argument
185 const Tensor& grad_output, in mkldnn_adaptive_avg_pool2d_backward() argument
291 const Tensor& grad_output, in _mkldnn_pooling_backward() argument
346 const ideep::tensor& grady = itensor_from_mkldnn(grad_output); in _mkldnn_pooling_backward()
362 optTypeMetaToScalarType(grad_output.options().dtype_opt()), in _mkldnn_pooling_backward()
[all …]
/aosp_15_r20/external/pytorch/test/cpp/api/
H A Dautograd.cpp106 Variable grad_output = torch::ones({2, 2}); in TEST() local
110 auto input_grads = grad({res}, {x}, {grad_output}, {}, true); in TEST()
123 x.backward(grad_output, false, true); in TEST()
162 variable_list grad_output) { in TEST()
164 return grad_output; in TEST()
285 variable_list grad_output) { in TEST()
309 variable_list grad_output) { in TEST()
315 grad_output[0] + grad_output[0] * var2, in TEST()
317 grad_output[0] * mul + grad_output[0] * var1}; in TEST()
345 variable_list grad_output) { in TEST()
[all …]
/aosp_15_r20/external/pytorch/torch/_decomp/
H A Ddecompositions.py138 grad_output: Tensor,
151 grad_output * negiptcoef * (self_or_result + negcoef),
152 grad_output * poscoef,
157 grad_output * negiptcoef * negcoef * torch.exp(self_or_result * negiptcoef),
158 grad_output * poscoef,
186 def hardsigmoid_backward(grad_output: Tensor, self: Tensor):
189 grad_output * (1.0 / 6.0),
197 grad_output: Tensor, self: Tensor, min_val: float, max_val: float
199 return torch.where((self <= min_val) | (self >= max_val), 0.0, grad_output)
212 def hardswish_backward(grad_output: Tensor, self: Tensor) -> Tensor:
[all …]

1234567891011