/aosp_15_r20/external/pytorch/aten/src/ATen/native/vulkan/ops/ |
H A D | Softmax.cpp | 91 const bool half_to_float) { in softmax_internal() 188 const bool half_to_float) { in softmax() 195 const bool half_to_float) { in log_softmax()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mkldnn/ |
H A D | SoftMax.cpp | 19 const bool half_to_float) { in mkldnn_softmax() 36 const bool half_to_float) { in mkldnn_softmax()
|
/aosp_15_r20/external/executorch/kernels/portable/cpu/util/ |
H A D | activation_ops_util.cpp | 60 bool half_to_float, in check_log_softmax_args() 74 bool half_to_float, in check_softmax_args()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ |
H A D | SoftMax.cu | 820 Tensor host_softmax(const Tensor & input_, const int64_t dim_, const bool half_to_float, const Tens… in host_softmax() 958 …backward(const Tensor &grad_, const Tensor &output_, int64_t dim_, bool half_to_float, const Tenso… in host_softmax_backward() 1081 bool half_to_float = grad.scalar_type() != input_dtype; in TORCH_IMPL_FUNC() local 1105 bool half_to_float = grad.scalar_type() != input_dtype; in TORCH_IMPL_FUNC() local
|
/aosp_15_r20/external/executorch/backends/apple/mps/serialization/ |
H A D | mps_graph_schema.py | 145 half_to_float: bool = False variable in MPSSoftmax 151 half_to_float: bool = False variable in MPSLogSoftmax
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | SoftMax.cpp | 93 bool half_to_float = grad.scalar_type() != input_dtype; in TORCH_META_FUNC() local 125 bool half_to_float = grad.scalar_type() != input_dtype; in TORCH_META_FUNC() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/sparse/ |
H A D | SoftMax.cpp | 538 const bool half_to_float) { in softmax_sparse_cpu() 555 const bool half_to_float) { in log_softmax_sparse_cpu()
|
H A D | ParamUtils.cpp | 19 const bool half_to_float, in softmax_sparse_input_preprocessing()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/sparse/cuda/ |
H A D | SoftMax.cu | 590 const bool half_to_float) { in softmax_sparse_cuda() 607 const bool half_to_float) { in log_softmax_sparse_cuda()
|
/aosp_15_r20/external/executorch/kernels/portable/cpu/ |
H A D | op_softmax.cpp | 26 bool half_to_float, in softmax_out()
|
H A D | op_log_softmax.cpp | 26 bool half_to_float, in log_softmax_out()
|
/aosp_15_r20/external/executorch/kernels/optimized/cpu/ |
H A D | op_log_softmax.cpp | 131 bool half_to_float, in opt_log_softmax_out()
|
/aosp_15_r20/external/igt-gpu-tools/lib/ |
H A D | igt_halffloat.c | 191 static void half_to_float(const uint16_t *h, float *f, unsigned int num) in half_to_float() function
|
/aosp_15_r20/external/pytorch/torch/distributed/tensor/parallel/ |
H A D | loss.py | 127 def _log_softmax(x, dim, half_to_float, mesh, mesh_dim): argument
|
/aosp_15_r20/external/executorch/kernels/test/ |
H A D | op_softmax_test.cpp | 30 bool half_to_float, in op_softmax_out()
|
H A D | op_log_softmax_test.cpp | 31 bool half_to_float, in op_log_softmax_out()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/nested/ |
H A D | NestedTensorMath.cpp | 507 const bool half_to_float) { in softmax_nested()
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/runtime/static/ |
H A D | generated_ops.cpp | 1214 const auto half_to_float = p_node->Input(2).toBool(); in __anon5d9c3eb97802() local 1752 const auto half_to_float = p_node->Input(2).toBool(); in __anon5d9c3eb9ad02() local
|
H A D | ops.cpp | 2067 auto half_to_float = in_t.scalar_type() == at::ScalarType::Half && in __anon11f46a8b6302() local
|
/aosp_15_r20/external/pytorch/torch/onnx/ |
H A D | symbolic_opset9.py | 2240 def _log_softmax(g: jit_utils.GraphContext, input, dim, half_to_float): argument
|
/aosp_15_r20/hardware/google/gfxstream/third-party/astc-encoder/Source/ |
D | tinyexr.h | 7192 static FP32 half_to_float(FP16 h) { in half_to_float() function
|