/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | LossCTC.cpp | 7 // Graves et al. call the probabilities y, we use log_probs (also calling them inputs) 56 …, size_t, std::vector<int64_t>> ctc_loss_allocate_outputs(const Tensor& log_probs, const Tensor& t… in ctc_loss_allocate_outputs() argument 57 // log_probs: input_len x batch_size x num_labels in ctc_loss_allocate_outputs() 61 auto log_probs_arg = TensorArg(log_probs, "log_probs", 1); in ctc_loss_allocate_outputs() 67 int64_t batch_size = log_probs.size(1); in ctc_loss_allocate_outputs() 68 int64_t num_labels = log_probs.size(2); in ctc_loss_allocate_outputs() 108 int64_t max_input_length = log_probs.size(0); in ctc_loss_allocate_outputs() 118 …Tensor log_alpha = at::empty({batch_size, log_probs.size(0), 2*max_target_length+1}, log_probs.opt… in ctc_loss_allocate_outputs() 119 Tensor neg_log_likelihood = at::empty({batch_size}, log_probs.options()); in ctc_loss_allocate_outputs() 125 // A (minor) twist is that we are using log-calculations to enhance numerical stability (log_probs … [all …]
|
H A D | native_functions.yaml | 219 - func: _use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_len… 224 - func: _use_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor t… 229 - func: _cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths… 230 …device_check: NoCheck # log_probs is expected to be on CUDA while targets is expected to be on CPU 235 - func: _cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor targe… 236 …device_check: NoCheck # log_probs is expected to be on CUDA while targets is expected to be on CPU 2028 - func: ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_length… 2031 - func: ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengt… 2033 - func: _ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int … 2041 - func: _ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_leng… [all …]
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cudnn/ |
H A D | LossCTC.cpp | 30 const Tensor& log_probs, in _use_cudnn_ctc_loss() argument 39 const Tensor& log_probs, in _use_cudnn_ctc_loss_tensor() argument 48 const Tensor& log_probs, in _cudnn_ctc_loss() argument 59 const Tensor& log_probs, in _cudnn_ctc_loss_tensor() argument 90 const Tensor& log_probs, in _use_cudnn_ctc_loss() argument 98 (targets.dim() == 1) && (log_probs.scalar_type() == at::kFloat) && in _use_cudnn_ctc_loss() 101 (log_probs.device().type() == at::kCUDA) && (log_probs.dim() == 3); in _use_cudnn_ctc_loss() 106 int64_t max_input_length = log_probs.size(0); in _use_cudnn_ctc_loss() 121 const Tensor& log_probs, in _use_cudnn_ctc_loss_tensor() argument 129 (targets.dim() == 1) && (log_probs.scalar_type() == at::kFloat) && in _use_cudnn_ctc_loss_tensor() [all …]
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ |
H A D | LossCTC.cu | 7 // Graves et al. call the probabilities y, we use log_probs (also calling them inputs) 65 // A (minor) twist is that we are using log-calculations to enhance numerical stability (log_probs … 221 std::tuple<Tensor, Tensor> ctc_loss_gpu_template(const Tensor& log_probs, const Tensor& targets, In… in ctc_loss_gpu_template() argument 222 // log_probs: input_len x batch_size x num_labels in ctc_loss_gpu_template() 226 auto log_probs_arg = TensorArg(log_probs, "log_probs", 1); in ctc_loss_gpu_template() 234 int64_t batch_size = log_probs.size(1); in ctc_loss_gpu_template() 235 int64_t num_labels = log_probs.size(2); in ctc_loss_gpu_template() 276 int64_t max_input_length = log_probs.size(0); in ctc_loss_gpu_template() 290 …Tensor log_alpha = at::empty({batch_size, log_probs.size(0), 2*max_target_length+1}, log_probs.opt… in ctc_loss_gpu_template() 291 Tensor neg_log_likelihood = at::empty({batch_size}, log_probs.options()); in ctc_loss_gpu_template() [all …]
|
/aosp_15_r20/external/libopus/dnn/torch/lpcnet/models/ |
H A D | lpcnet.py | 199 log_probs = torch.log(get_pdf_from_tree(y) + 1e-6) 201 log_probs = torch.log_softmax(y, dim=-1) 203 return log_probs, (gru_a_state, gru_b_state) 226 log_probs, _ = self.sample_rate_network(signals, c, gru_states) 228 return log_probs
|
H A D | multi_rate_lpcnet.py | 314 log_probs = torch.log_softmax(y, dim=-1) 316 return log_probs
|
/aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/ctc/ |
H A D | ctc_beam_search.h | 130 std::vector<float>* log_probs, bool merge_repeated) const; 398 int n, std::vector<std::vector<int>>* paths, std::vector<float>* log_probs, in TopPaths() argument 401 TFLITE_DCHECK(log_probs); in TopPaths() 403 log_probs->clear(); in TopPaths() 423 log_probs->push_back(e->newp.total); in TopPaths()
|
H A D | ctc_beam_search_decoder.cc | 225 std::vector<float> log_probs; in Eval() local 243 &log_probs, merge_repeated)); in Eval() 248 log_probabilities_output[b * top_paths + bp] = log_probs[bp]; in Eval()
|
/aosp_15_r20/external/tensorflow/tensorflow/core/util/ctc/ |
H A D | ctc_beam_search.h | 133 std::vector<T>* log_probs, bool merge_repeated) const; 403 int n, std::vector<std::vector<int>>* paths, std::vector<T>* log_probs, in TopPaths() argument 406 CHECK_NOTNULL(log_probs)->clear(); in TopPaths() 427 log_probs->push_back(e->newp.total); in TopPaths()
|
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/ |
H A D | ctc_decoder_ops.cc | 340 std::vector<T> log_probs; in Compute() local 354 &log_probs, merge_repeated_)); in Compute() 359 log_prob_t(b, bp) = log_probs[bp]; in Compute()
|
/aosp_15_r20/external/pytorch/test/ |
H A D | test_nn.py | 2639 log_probs = torch.randn(50, 3, 15, dtype=torch.float).log_softmax(2) 2642 torch.nn.functional.ctc_loss(log_probs, targets, _input_lengths, target_lengths) 2645 torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths) 2652 log_probs = torch.randn(50, 3, 15, dtype=torch.float, device='cuda').log_softmax(2) 2654 torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths) 2660 log_probs = torch.randn(50, 3, 15, dtype=torch.float).log_softmax(2) 2662 torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths) 2671 …log_probs = torch.randn(input_length, batch_size, vocab_size, dtype=torch.double).log_softmax(2).r… 2676 res_cpu = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, 2679 grad_cpu, = torch.autograd.grad(res_cpu, log_probs, grad_out) [all …]
|
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/functional/ |
H A D | loss.h | 737 const Tensor& log_probs, in ctc_loss() argument 745 log_probs, in ctc_loss() 766 /// F::ctc_loss(log_probs, targets, input_lengths, target_lengths, 770 const Tensor& log_probs, 776 log_probs,
|
/aosp_15_r20/external/pytorch/test/mobile/model_test/ |
H A D | nn_ops.py | 341 log_probs = torch.randn(50, 16, 20).log_softmax(2).detach() 351 F.ctc_loss(log_probs, targets, input_lengths, target_lengths),
|
/aosp_15_r20/external/pytorch/test/cpp/api/ |
H A D | functional.cpp | 2574 const auto log_probs = in TEST_F() local 2579 F::ctc_loss(log_probs, targets, _input_lengths, target_lengths), in TEST_F() 2584 F::ctc_loss(log_probs, targets, input_lengths, target_lengths_), in TEST_F() 2591 const auto log_probs = in TEST_F() local 2594 F::ctc_loss(log_probs, targets, input_lengths, target_lengths), in TEST_F() 2603 const auto log_probs = in TEST_F() local 2606 log_probs, in TEST_F() 2613 -log_probs.sum(0).slice(1, 0, 1).view_as(loss), loss)); in TEST_F() 2619 const auto log_probs = in TEST_F() local 2622 log_probs, in TEST_F() [all …]
|
H A D | modules.cpp | 2629 // log_probs actually returns log_proba in TEST_F() 2658 // forward returns the same thing as log_probs in TEST_F() 3197 const auto log_probs = in TEST_F() local 3200 loss->forward(log_probs, targets, input_lengths, target_lengths); in TEST_F() 3203 -log_probs.sum(0).slice(1, 0, 1).view_as(output), output)); in TEST_F()
|
/aosp_15_r20/external/pytorch/torch/csrc/api/src/nn/modules/ |
H A D | loss.cpp | 303 const Tensor& log_probs, in forward() argument 308 log_probs, in forward()
|
/aosp_15_r20/external/pytorch/tools/autograd/ |
H A D | derivatives.yaml | 535 - name: _ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int … 536 …log_probs: _ctc_loss_backward(grad, log_probs, targets, input_lengths, target_lengths, result0, re… 538 - name: _ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_leng… 539 …log_probs: _ctc_loss_backward(grad, log_probs, targets, input_lengths, target_lengths, result0, re… 2649 - name: _cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths… 2650 log_probs: _cudnn_ctc_loss_backward(grad, result0, result1, zero_infinity) 2652 - name: _cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor targe… 2653 log_probs: _cudnn_ctc_loss_backward(grad, result0, result1, zero_infinity)
|
/aosp_15_r20/external/pytorch/torch/testing/_internal/ |
H A D | common_nn.py | 3020 def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0, reduction='mean'): argument 3023 dt = log_probs.dtype 3024 log_probs = log_probs.double() # we need the accuracy as we are not in logspace 3028 for i in range(log_probs.size(1)): 3037 probs = log_probs[:input_length, i].exp() 3038 alpha = log_probs.new_zeros((target_length * 2 + 1,))
|
/aosp_15_r20/external/pytorch/torch/nn/ |
H A D | functional.py | 3006 log_probs: Tensor, 3025 …log_probs: :math:`(T, N, C)` or :math:`(T, C)` where `C = number of characters in alphabet includi… 3050 >>> log_probs = torch.randn(50, 16, 20).log_softmax(2).detach().requires_grad_() 3054 >>> loss = F.ctc_loss(log_probs, targets, input_lengths, target_lengths) 3057 if has_torch_function_variadic(log_probs, targets, input_lengths, target_lengths): 3060 (log_probs, targets, input_lengths, target_lengths), 3061 log_probs, 3070 log_probs,
|
H A D | functional.pyi.in | 326 log_probs: Tensor,
|
/aosp_15_r20/external/pytorch/torch/nn/modules/ |
H A D | loss.py | 1850 - Log_probs: Tensor of size :math:`(T, N, C)` or :math:`(T, C)`, 1975 log_probs: Tensor, 1981 log_probs,
|
/aosp_15_r20/external/pytorch/test/distributions/ |
H A D | test_distributions.py | 1245 log_probs = dist.log_prob(s) 1246 log_probs_data_flat = log_probs.view(-1) 3940 log_probs = [] 3949 log_probs.append(log_prob - log_abs_det_jacobian) 3957 for x in log_probs 3960 self.assertEqual(log_probs[0], log_probs[1])
|
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/options/ |
H A D | loss.h | 518 /// F::ctc_loss(log_probs, targets, input_lengths, target_lengths,
|
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/modules/ |
H A D | loss.h | 586 const Tensor& log_probs,
|
/aosp_15_r20/external/pytorch/torch/ |
H A D | overrides.py | 551 …lambda log_probs, targets, input_lengths, target_lengths, blank=0, reduction="mean", zero_infinity… 859 …lambda log_probs, targets, input_lengths, target_lengths, blank=0, reduction="mean", zero_infinity…
|