Home
last modified time | relevance | path

Searched full:log_softmax (Results 1 – 25 of 825) sorted by relevance

12345678910>>...33

/aosp_15_r20/packages/modules/NeuralNetworks/runtime/test/generated/spec_V1_2/
Dlog_softmax.example.cpp1 // Generated from log_softmax.mod.py
7 namespace generated_tests::log_softmax { namespace
54 .type = TestOperationType::LOG_SOFTMAX, in get_test_model()
70 const auto dummy_test_model = TestModelManager::get().add("log_softmax", get_test_model());
72 } // namespace generated_tests::log_softmax
74 namespace generated_tests::log_softmax { namespace
121 .type = TestOperationType::LOG_SOFTMAX, in get_test_model_relaxed()
139 } // namespace generated_tests::log_softmax
141 namespace generated_tests::log_softmax { namespace
188 .type = TestOperationType::LOG_SOFTMAX, in get_test_model_float16()
[all …]
/aosp_15_r20/external/ComputeLibrary/src/core/CL/cl_kernels/common/
H A Dsoftmax_layer.cl78 #if defined(LOG_SOFTMAX)
81 #else // defined(LOG_SOFTMAX)
83 #endif // defined(LOG_SOFTMAX)
189 #ifdef LOG_SOFTMAX
194 #else /* LOG_SOFTMAX */
199 #endif /* LOG_SOFTMAX */
211 #ifdef LOG_SOFTMAX
215 #else /* LOG_SOFTMAX */
219 #endif /* LOG_SOFTMAX */
407 #ifdef LOG_SOFTMAX
[all …]
/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/ops/impl/
H A DSoftmax.cpp37 bool log_softmax) { in add_softmax_node() argument
63 if (log_softmax) { in add_softmax_node()
110 graph, args[0], args[1], args[3], /* log_softmax = */ false); in softmax()
113 void log_softmax(ComputeGraph& graph, const std::vector<ValueRef>& args) { in log_softmax() function
116 graph, args[0], args[1], args[3], /* log_softmax = */ true); in log_softmax()
121 VK_REGISTER_OP(aten._log_softmax.default, log_softmax);
/aosp_15_r20/packages/modules/NeuralNetworks/common/cpu_operations/
DLogSoftmax.cpp31 namespace log_softmax { namespace
88 } // namespace log_softmax
90 NN_REGISTER_OPERATION_DEFAULT_VALIDATION(LOG_SOFTMAX, log_softmax::prepare, log_softmax::execute);
/aosp_15_r20/external/pytorch/torch/csrc/jit/tensorexpr/operators/
H A Dsoftmax.cpp11 bool log_softmax) { in computeSoftmax() argument
27 // log_softmax(vi) = log(softmax(vi)) in computeSoftmax()
31 // log_softmax(vi) = vi - max(vi) - log(sum(exp(vi - max(vi)))) in computeSoftmax()
39 // - Final loop computes the log_softmax for every element in v. in computeSoftmax()
129 if (!log_softmax) { in computeSoftmax()
/aosp_15_r20/packages/modules/NeuralNetworks/common/types/operations/src/
DLogSoftmax.cpp24 namespace log_softmax { namespace
46 } // namespace log_softmax
48 NN_DEFINE_VALIDATION_FUNCTION(LOG_SOFTMAX, log_softmax::validate);
/aosp_15_r20/external/tensorflow/tensorflow/lite/testing/op_tests/
H A Dlog_softmax.py15 """Test configs for log_softmax."""
24 """Make a set of tests to do log_softmax."""
32 """Build the log_softmax op testing graph."""
38 out = tf.nn.log_softmax(input_tensor)
/aosp_15_r20/external/executorch/backends/arm/_passes/
H A Ddecompose_softmaxes_pass.py14 torch_softmax = (torch.ops.aten.softmax.int, torch.ops.aten.log_softmax.int)
22 log_softmax = (torch.ops.aten.log_softmax.int, exir_ops.edge.aten._log_softmax.default) variable
74 if op in log_softmax:
/aosp_15_r20/packages/modules/NeuralNetworks/common/types/operations/include/
DLogSoftmax.h22 namespace android::nn::log_softmax {
24 constexpr char kOperationName[] = "LOG_SOFTMAX";
34 } // namespace android::nn::log_softmax
/aosp_15_r20/external/pytorch/torch/csrc/api/src/nn/modules/
H A Dadaptive.cpp137 const Tensor cluster_logprob = F::log_softmax(cluster_output, 1); in forward()
159 const Tensor head_logprob = F::log_softmax(head_output, 1); in forward()
174 const Tensor head_logprob = F::log_softmax(head_output, 1); in _get_full_log_prob()
184 const Tensor cluster_logprob = F::log_softmax(cluster_output, 1); in _get_full_log_prob()
/aosp_15_r20/external/tensorflow/tensorflow/dtensor/mlir/expansions/
H A Dsoftmax_spmd_expander.cc159 auto log_softmax = builder.create<mlir::TF::SubOp>( in ComputeLogSoftmax() local
161 return log_softmax.getResult(); in ComputeLogSoftmax()
169 bool log_softmax) { in ComputeShardedSoftmax() argument
177 if (log_softmax) { in ComputeShardedSoftmax()
574 // softmax is 1 and log_softmax is 0. in ExpandOp()
579 const mlir::Value log_softmax = in ExpandOp() local
599 features_zero, log_softmax) in ExpandOp()
/aosp_15_r20/external/pytorch/torch/nn/modules/
H A Dadaptive.py238 cluster_logprob = F.log_softmax(cluster_output, dim=1)
252 head_logprob = F.log_softmax(head_output, dim=1)
264 head_logprob = F.log_softmax(head_output, dim=1)
270 cluster_logprob = F.log_softmax(cluster_output, dim=1)
H A Dloss.py212 >>> log_softmax = nn.LogSoftmax(dim=1)
218 >>> loss = loss_fn(log_softmax(input), target)
227 >>> log_softmax = nn.LogSoftmax(dim=1)
229 >>> output = log_softmax(conv(data))
519 >>> input = F.log_softmax(torch.randn(3, 5, requires_grad=True), dim=1)
525 >>> log_target = F.log_softmax(torch.rand(3, 5), dim=1)
1855 :func:`torch.nn.functional.log_softmax`).
1895 >>> input = torch.randn(T, N, C).log_softmax(2).detach().requires_grad_()
1913 >>> input = torch.randn(T, N, C).log_softmax(2).detach().requires_grad_()
1930 >>> input = torch.randn(T, C).log_softmax(1).detach().requires_grad_()
/aosp_15_r20/external/executorch/backends/arm/test/ops/
H A Dtest_logsoftmax.py52 .check(["torch.ops.aten.log_softmax.int"])
73 .check_not(["torch.ops.aten.log_softmax.int"])
97 .check_not(["torch.ops.aten.log_softmax.int"])
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/functional/
H A Dactivation.h307 inline Tensor log_softmax( in log_softmax() function
314 ret = input.log_softmax(dim); in log_softmax()
316 ret = input.log_softmax(dim, dtype); in log_softmax()
325 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.log_softmax
334 /// F::log_softmax(input, LogSoftmaxFuncOptions(1));
336 inline Tensor log_softmax( in log_softmax() function
339 return detail::log_softmax(input, options.dim(), options.dtype()); in log_softmax()
/aosp_15_r20/external/pytorch/test/cpp/tensorexpr/
H A Dtest_kernel.cpp1083 for (auto log_softmax : {false, true}) { in TEST_F()
1088 log_softmax ? a.log_softmax(softmax_dim) : a.softmax(softmax_dim); in TEST_F()
1091 env.s("op", log_softmax ? "log_softmax" : "softmax"); in TEST_F()
1156 for (auto log_softmax : {false, true}) { in TEST_F()
1166 log_softmax ? a.log_softmax(softmax_dim) : a.softmax(softmax_dim); in TEST_F()
1170 env.s("op", log_softmax ? "log_softmax" : "softmax"); in TEST_F()
1237 for (auto log_softmax : {false, true}) { in TEST_F()
1247 log_softmax ? a.log_softmax(softmax_dim) : a.softmax(softmax_dim); in TEST_F()
1251 env.s("op", log_softmax ? "log_softmax" : "softmax"); in TEST_F()
/aosp_15_r20/external/pytorch/torch/_refs/special/
H A D__init__.py37 "log_softmax",
206 def log_softmax( function
211 return torch.log_softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DSoftMax.cpp27 #include <ATen/ops/log_softmax.h>
499 Tensor log_softmax(const Tensor& input_, const int64_t dim_, std::optional<ScalarType> dtype) { in log_softmax() function
551 return at::log_softmax(input, dim, dtype); in special_log_softmax()
568 Tensor log_softmax(const Tensor& self, Dimname dim, std::optional<ScalarType> dtype) { in log_softmax() function
569 return at::log_softmax(self, dimname_to_position(self, dim), dtype); in log_softmax()
H A DLossNLL.cpp20 #include <ATen/ops/log_softmax.h>
506 auto input = at::log_softmax(self, class_dim, self.scalar_type()); in cross_entropy_loss_prob_target()
566 auto input = at::log_softmax(self, class_dim, self.scalar_type()); in cross_entropy_loss_label_smoothing()
652 at::log_softmax(self, class_dim, self.scalar_type()), in cross_entropy_loss_symint()
/aosp_15_r20/out/soong/.intermediates/hardware/interfaces/neuralnetworks/aidl/android.hardware.neuralnetworks-V4-ndk-source/gen/include/aidl/android/hardware/neuralnetworks/
DOperationType.h91 LOG_SOFTMAX = 64, enumerator
273 case OperationType::LOG_SOFTMAX: in toString()
274 return "LOG_SOFTMAX"; in toString()
435 aidl::android::hardware::neuralnetworks::OperationType::LOG_SOFTMAX,
/aosp_15_r20/external/pytorch/torch/masked/
H A D_ops.py189 log_softmax=(("dim__as_int",), ("dtype=None", "mask=None")),
237 log_softmax="""\
276 log_softmax="log_softmax",
969 "log_softmax",
1733 def log_softmax( function
1745 return torch.nn.functional.log_softmax(mask_input, dim_, dtype=dtype)
1748 f"masked log_softmax expects strided tensor (got {mask_input.layout} tensor)"
H A D__init__.py15 log_softmax,
42 "log_softmax",
/aosp_15_r20/external/tensorflow/tensorflow/python/ops/distributions/
H A Dcategorical.py317 nn_ops.log_softmax(self.logits) * self.probs, axis=-1)
342 delta_log_probs1 = (nn_ops.log_softmax(a.logits) -
343 nn_ops.log_softmax(b.logits))
/aosp_15_r20/external/executorch/kernels/portable/cpu/
H A Dop_log_softmax.cpp53 // calculate max in log_softmax dim. During log_softmax in log_softmax_out()
/aosp_15_r20/external/executorch/exir/tests/
H A Dtest_op_convert.py64 aten.log_softmax.int: aten.log_softmax.int_out,

12345678910>>...33