/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ |
H A D | RNN.cu | 407 void lstm_backward_impl(const Tensor& grad_hy, const Tensor& grad_cy, in lstm_backward_impl() argument 418 auto grad_hyI = tryGetTensorInfo<scalar_t, index_type>(grad_hy); in lstm_backward_impl() 428 if (allContiguous({grad_hy, grad_cy, cx, cy, workspace, grad_gates, grad_cx})) { in lstm_backward_impl() 479 void gru_backward_impl(const Tensor& grad_hy, const Tensor& workspace, in gru_backward_impl() argument 484 int64_t numel = grad_hy.numel(); in gru_backward_impl() 488 auto grad_hyI = getTensorInfo<scalar_t, index_type>(grad_hy); in gru_backward_impl() 496 if (allContiguous({grad_hy, workspace, grad_input_gates, grad_hidden_gates, grad_hx})) { in gru_backward_impl() 548 void checkLSTMBackwardSizes(const TensorArg& grad_hy, const TensorArg& grad_cy, in checkLSTMBackwardSizes() argument 552 const TensorArg& defined_grad = grad_hy->defined() ? grad_hy : grad_cy; in checkLSTMBackwardSizes() 555 if (grad_hy->defined()) { in checkLSTMBackwardSizes() [all …]
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mkldnn/ |
H A D | RNN.cpp | 325 …auto grad_hy = grad_hy_r.defined() ? grad_hy_r.contiguous() : at::zeros_like(hx_, LEGACY_CONTIGUOU… in mkldnn_rnn_layer_backward() local 399 // Convert grad_y, grad_hy, grad_cy to fp32 in non-fp32 backward in mkldnn_rnn_layer_backward() 408 grad_hy.sizes(), grad_hy.options().dtype(at::ScalarType::Float)); in mkldnn_rnn_layer_backward() 409 grad_hy_.copy_(grad_hy); in mkldnn_rnn_layer_backward() 424 grad_hy, rnn.dst_iter_desc(ideep::tensor::data_type::f32)); in mkldnn_rnn_layer_backward()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | RNN.cpp | 1551 const Tensor& grad_hy = *grad_hy_maybe_owned; in _thnn_differentiable_lstm_cell_backward() local 1556 if (!grad_hy.defined() && !grad_cy.defined()) { in _thnn_differentiable_lstm_cell_backward() 1574 …TORCH_INTERNAL_ASSERT((grad_hy.defined() || grad_cy.defined()),"either gradient with respect to hy… in _thnn_differentiable_lstm_cell_backward() 1575 if (grad_hy.defined()) { in _thnn_differentiable_lstm_cell_backward() 1576 gog = grad_hy * gcx; in _thnn_differentiable_lstm_cell_backward() 1578 gcx = at::tanh_backward(grad_hy * o, gcx); in _thnn_differentiable_lstm_cell_backward() 1599 const Tensor& grad_hy, in _thnn_differentiable_gru_cell_backward() argument 1626 Tensor grad_hx = grad_hy * ig; in _thnn_differentiable_gru_cell_backward() 1628 Tensor gig = at::sigmoid_backward(grad_hy * (hx - ng), ig); in _thnn_differentiable_gru_cell_backward() 1629 Tensor gin = at::tanh_backward(grad_hy * (1 - ig), ng); in _thnn_differentiable_gru_cell_backward()
|
H A D | native_functions.yaml | 256 …_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy,… 4043 …tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy,… 4088 …_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy,… 7740 - func: lstm_mps_backward(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor … 7755 - func: _thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy,… 7760 - func: _thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tens… 7762 - func: _thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gate… 7769 - func: _thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, … 7774 - func: _thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_ga…
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mps/operations/ |
H A D | RnnOps.mm | 364 const auto grad_hy = grad_hy_r.defined() ? grad_hy_r : at::zeros_like(hx[0], input.options()); 411 …lstm_backward_" + getTensorsStringKey({input, z_state, cell_state_fwd, grad_y, grad_cy, grad_hy}) + 443 mpsGraphRankedPlaceHolder(mpsGraph, getMPSDataType(grad_hy), getMPSShape(grad_hy)); 682 Placeholder gradientHyPlaceholder = Placeholder(cachedGraph->inputTensors_[6], grad_hy);
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/miopen/ |
H A D | RNN_miopen.cpp | 559 const Tensor& output_r, const Tensor& grad_output_r, const Tensor& grad_hy, in miopen_rnn_backward_input() argument 601 auto dhy = grad_hy.contiguous().view(hidden_size); in miopen_rnn_backward_input() 778 …auto grad_hy = grad_hy_r.defined() ? grad_hy_r : at::zeros_like(hx, LEGACY_CONTIGUOUS_MEMORY_FORMA… in miopen_rnn_backward() local 781 …iopen_rnn_backward_input(input, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, h… in miopen_rnn_backward()
|
/aosp_15_r20/external/pytorch/test/ |
H A D | test_nn.py | 3965 def forward_backward(cuda, rnn, input_val, grad_output, weights_val, hx_val, grad_hy, argument 3998 grad_hy = grad_hy.cuda() 4010 … torch.autograd.backward([output, hy[0], hy[1]], [grad_output, grad_hy, grad_hy + 1]) 4012 … torch.autograd.backward([output, hy[0], hy[1]], [grad_output, grad_hy, grad_cy + 1]) 4014 torch.autograd.backward([output, hy], [grad_output, grad_hy]) 4059 grad_hy = torch.randn(num_layers * num_directions, batch, hidden_size, dtype=dtype) 4063 grad_hy = make_noncontig(grad_hy) 4083 False, rnn, input_val, grad_output, rnn.all_weights, hx_val, grad_hy) 4094 True, rnn_gpu, input_val, grad_output, rnn.all_weights, hx_val, grad_hy) 4103 grad_hy = torch.randn( [all …]
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cudnn/ |
H A D | RNN.cpp | 1676 const Tensor& grad_hy, in _cudnn_rnn_backward_input() argument 1750 auto dhy = grad_hy.contiguous().view(hidden_size); in _cudnn_rnn_backward_input() 2133 auto grad_hy = grad_hy_r.defined() in _cudnn_rnn_backward() local 2150 grad_hy, in _cudnn_rnn_backward()
|
/aosp_15_r20/external/pytorch/torch/ |
H A D | _meta_registrations.py | 6085 def checkLSTMBackwardSizes(grad_hy, grad_cy, cx, cy, workspace): argument 6086 defined_grad = grad_hy if grad_hy is not None else grad_cy 6089 if grad_hy is not None: 6090 torch._check(grad_hy.size() == exp_size, lambda: "") 6101 def _thnn_fused_lstm_cell_backward_impl(grad_hy, grad_cy, cx, cy, workspace, has_bias): argument 6102 if grad_hy is None and grad_cy is None: 6104 checkLSTMBackwardSizes(grad_hy, grad_cy, cx, cy, workspace)
|
/aosp_15_r20/external/pytorch/tools/autograd/ |
H A D | derivatives.yaml | 2701 - name: lstm_mps_backward(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor … 2712 …_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy,… 2720 grad_hy: not_implemented("_cudnn_rnn_backward", kCudnnDoubleBackwardMsg) 2748 …_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy,… 2755 …tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy,…
|
/aosp_15_r20/external/pytorch/torch/csrc/autograd/ |
H A D | FunctionsManual.cpp | 7062 auto grad_hy = grad_hy_r.defined() in mkldnn_rnn_layer_differentiable_backward() local 7123 new_grad_hy = grad_output[x_index].add(grad_hy); in mkldnn_rnn_layer_differentiable_backward() 7151 grad_hy = dprev_h; in mkldnn_rnn_layer_differentiable_backward()
|