Searched full:input_gates (Results 1 – 6 of 6) sorted by relevance
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ |
H A D | RNN.cu | 33 const TensorArg& input_gates, const TensorArg& hidden_gates, in checkSizes() argument 36 checkDim(c, input_gates, 2); in checkSizes() 37 checkSameSize(c, input_gates, hidden_gates); in checkSizes() 38 int64_t gates_size = input_gates->size(1); in checkSizes() 47 checkNumel(c, prev_hidden, input_gates->size(0) * gates_size / factor); in checkSizes() 49 checkAllSameGPU(c, {input_gates, hidden_gates, input_bias, hidden_bias, prev_hidden}); in checkSizes() 370 void lstm_forward_impl(const Tensor& input_gates, const Tensor& hidden_gates, in lstm_forward_impl() argument 381 auto input_gatesI = getTensorInfo<scalar_t, index_type>(input_gates); in lstm_forward_impl() 392 if (allContiguous({input_gates, hidden_gates, input_bias, hidden_bias, cx, hy, cy, workspace})) { in lstm_forward_impl() 443 void gru_forward_impl(const Tensor& input_gates, const Tensor& hidden_gates, in gru_forward_impl() argument [all …]
|
/aosp_15_r20/external/pytorch/torch/ |
H A D | _meta_registrations.py | 5863 input_gates, argument 5870 torch._check(input_gates.ndim == 2, lambda: f"{input_gates.ndim} != 2") 5872 input_gates.shape == hidden_gates.shape, 5873 lambda: f"{input_gates.shape} != {hidden_gates.shape}", 5875 gates_size = input_gates.size(1) 5887 expected_prev_hidden_numel = input_gates.size(0) * gates_size // factor 5890 …lambda: f"{prev_hidden.numel()} != {input_gates.size(0)} * {gates_size} // {factor} (aka {expected… 5894 x.device == input_gates.device 5903 input_gates, argument 5909 rnn_cell_checkSizes(input_gates, hidden_gates, input_bias, hidden_bias, 4, cx) [all …]
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | RNN.cpp | 1545 const Tensor& input_gates, in _thnn_differentiable_lstm_cell_backward() argument 1559 Tensor gates = input_gates + hidden_gates; in _thnn_differentiable_lstm_cell_backward() 1600 const Tensor& input_gates, in _thnn_differentiable_gru_cell_backward() argument 1608 Tensor in_g = input_gates; in _thnn_differentiable_gru_cell_backward()
|
H A D | native_functions.yaml | 7747 - func: _thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bia… 7762 …erentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_… 7764 - func: _thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias… 7774 - func: _thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_ga…
|
/aosp_15_r20/external/pytorch/tools/autograd/ |
H A D | derivatives.yaml | 2861 - name: _thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bia… 2863 …input_gates, hidden_gates, cx, input_bias, hidden_bias: "GradMode::is_enabled() ? _thnn_differenti… 2865 - name: _thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias… 2866 …input_gates, hidden_gates, hx, input_bias, hidden_bias: "grad.defined() ? (GradMode::is_enabled() …
|
/aosp_15_r20/external/pytorch/torch/csrc/inductor/aoti_torch/generated/ |
H A D | c_shim_cuda.h | 45 AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cuda__thnn_fused_lstm_cell(AtenTensorHandle input_gates…
|