Home
last modified time | relevance | path

Searched full:grad_cy (Results 1 – 11 of 11) sorted by relevance

/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/
H A DRNN.cu407 void lstm_backward_impl(const Tensor& grad_hy, const Tensor& grad_cy, in lstm_backward_impl() argument
419 auto grad_cyI = tryGetTensorInfo<scalar_t, index_type>(grad_cy); in lstm_backward_impl()
428 if (allContiguous({grad_hy, grad_cy, cx, cy, workspace, grad_gates, grad_cx})) { in lstm_backward_impl()
548 void checkLSTMBackwardSizes(const TensorArg& grad_hy, const TensorArg& grad_cy, in checkLSTMBackwardSizes() argument
552 const TensorArg& defined_grad = grad_hy->defined() ? grad_hy : grad_cy; in checkLSTMBackwardSizes()
558 if (grad_cy->defined()) { in checkLSTMBackwardSizes()
559 checkSize(c, grad_cy, exp_size); in checkLSTMBackwardSizes()
573 const Tensor& grad_cy = c10::value_or_else(grad_cy_opt, [] {return Tensor();}); in _thnn_fused_lstm_cell_backward_impl_cuda() local
575 if (!grad_hy.defined() && !grad_cy.defined()) { in _thnn_fused_lstm_cell_backward_impl_cuda()
578 checkLSTMBackwardSizes({grad_hy, "grad_hy", 1}, {grad_cy, "grad_cy", 2}, in _thnn_fused_lstm_cell_backward_impl_cuda()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mkldnn/
H A DRNN.cpp326 …auto grad_cy = cx_tmp.defined() ? (grad_cy_r.defined() ? grad_cy_r.contiguous() : at::zeros_like(c… in mkldnn_rnn_layer_backward() local
399 // Convert grad_y, grad_hy, grad_cy to fp32 in non-fp32 backward in mkldnn_rnn_layer_backward()
411 grad_cy.sizes(), grad_cy.options().dtype(at::ScalarType::Float)); in mkldnn_rnn_layer_backward()
412 grad_cy_.copy_(grad_cy); in mkldnn_rnn_layer_backward()
426 grad_cy, rnn.dst_iter_desc(ideep::tensor::data_type::f32)); in mkldnn_rnn_layer_backward()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mps/operations/
H A DRnnOps.mm365 const auto grad_cy = grad_cy_r.defined() ? grad_cy_r : at::zeros_like(hx[1], input.options());
411 …lstm_backward_" + getTensorsStringKey({input, z_state, cell_state_fwd, grad_y, grad_cy, grad_hy}) +
441 mpsGraphRankedPlaceHolder(mpsGraph, getMPSDataType(grad_cy), getMPSShape(grad_cy));
683 Placeholder gradientCyPlaceholder = Placeholder(cachedGraph->inputTensors_[7], grad_cy);
/aosp_15_r20/external/pytorch/aten/src/ATen/native/miopen/
H A DRNN_miopen.cpp560 const Tensor& grad_cy, in miopen_rnn_backward_input() argument
602 auto dcy = grad_cy.defined() ? grad_cy.contiguous().view(hidden_size) : Tensor(); in miopen_rnn_backward_input()
779 …auto grad_cy = cx.defined() ? (grad_cy_r.defined() ? grad_cy_r : at::zeros_like(cx, LEGACY_CONTIGU… in miopen_rnn_backward() local
781 …_backward_input(input, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_siz… in miopen_rnn_backward()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DRNN.cpp1552 const Tensor& grad_cy = c10::value_or_else(grad_cy_opt, [] {return Tensor();}); in _thnn_differentiable_lstm_cell_backward() local
1556 if (!grad_hy.defined() && !grad_cy.defined()) { in _thnn_differentiable_lstm_cell_backward()
1574 …TORCH_INTERNAL_ASSERT((grad_hy.defined() || grad_cy.defined()),"either gradient with respect to hy… in _thnn_differentiable_lstm_cell_backward()
1579 if (grad_cy.defined()) { in _thnn_differentiable_lstm_cell_backward()
1580 gcx = gcx + grad_cy; in _thnn_differentiable_lstm_cell_backward()
1582 } else if (grad_cy.defined()) { in _thnn_differentiable_lstm_cell_backward()
1584 gcx = grad_cy; in _thnn_differentiable_lstm_cell_backward()
H A Dnative_functions.yaml256 …Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt…
4043 …t, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, in…
4088 …Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hi…
7740 - func: lstm_mps_backward(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor …
7755 - func: _thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy,…
7760 - func: _thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tens…
7762 - func: _thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gate…
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cudnn/
H A DRNN.cpp1677 const Tensor& grad_cy, in _cudnn_rnn_backward_input() argument
1752 grad_cy.defined() ? grad_cy.contiguous().view(cell_size) : Tensor(); in _cudnn_rnn_backward_input()
2136 auto grad_cy = cx.defined() in _cudnn_rnn_backward() local
2151 grad_cy, in _cudnn_rnn_backward()
/aosp_15_r20/external/pytorch/torch/
H A D_meta_registrations.py6085 def checkLSTMBackwardSizes(grad_hy, grad_cy, cx, cy, workspace): argument
6086 defined_grad = grad_hy if grad_hy is not None else grad_cy
6091 if grad_cy is not None:
6092 torch._check(grad_cy.size() == exp_size, lambda: "")
6101 def _thnn_fused_lstm_cell_backward_impl(grad_hy, grad_cy, cx, cy, workspace, has_bias): argument
6102 if grad_hy is None and grad_cy is None:
6104 checkLSTMBackwardSizes(grad_hy, grad_cy, cx, cy, workspace)
/aosp_15_r20/external/pytorch/tools/autograd/
H A Dderivatives.yaml2701 - name: lstm_mps_backward(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor …
2712 …Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt…
2721 grad_cy: not_implemented("_cudnn_rnn_backward", kCudnnDoubleBackwardMsg)
2748 …Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hi…
2755 …t, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, in…
/aosp_15_r20/external/pytorch/test/
H A Dtest_nn.py3966 cx_val=None, grad_cy=None): argument
3999 if grad_cy is not None:
4000 grad_cy = grad_cy.cuda()
4009 if grad_cy is None:
4012 … torch.autograd.backward([output, hy[0], hy[1]], [grad_output, grad_hy, grad_cy + 1])
4128 grad_cy = torch.randn(num_layers * num_directions, batch, hidden_size, dtype=dtype)
4133 grad_cy = make_noncontig(grad_cy)
4156 hx_val, grad_hy, cx_val, grad_cy)
4169 hx_val, grad_hy, cx_val, grad_cy)
/aosp_15_r20/external/pytorch/torch/csrc/autograd/
H A DFunctionsManual.cpp7065 auto grad_cy = cx_tmp.defined() in mkldnn_rnn_layer_differentiable_backward() local
7124 d1 = grad_cy.add(new_grad_hy * o * (1 - cy.tanh() * cy.tanh())); in mkldnn_rnn_layer_differentiable_backward()
7152 grad_cy = dprev_c; in mkldnn_rnn_layer_differentiable_backward()