/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/ |
H A D | hardsigmoid.c | 42 … "failed to create Hardsigmoid operator with %zu channels: number of channels must be non-zero", in pytorch_qnnp_create_hardsigmoid_nc_q8() 49 … "failed to create Hardsigmoid operator with %.7g input scale: scale must be finite and positive", in pytorch_qnnp_create_hardsigmoid_nc_q8() 56 … "failed to create Hardsigmoid operator with %.7g output scale: scale must be finite and positive", in pytorch_qnnp_create_hardsigmoid_nc_q8() 63 "failed to create Hardsigmoid operator with [%" PRIu8 ", %" PRIu8 in pytorch_qnnp_create_hardsigmoid_nc_q8() 74 …"failed to create Hardsigmoid operator with %.7g output scale: only output scale of 1/256 is suppo… in pytorch_qnnp_create_hardsigmoid_nc_q8() 81 "failed to create Hardsigmoid operator with %" PRIu8 in pytorch_qnnp_create_hardsigmoid_nc_q8() 100 "failed to allocate 256 bytes for Hardsigmoid lookup table"); in pytorch_qnnp_create_hardsigmoid_nc_q8() 111 // hardsigmoid, no min/max functions in C in pytorch_qnnp_create_hardsigmoid_nc_q8() 140 pytorch_qnnp_operator_t hardsigmoid, in pytorch_qnnp_setup_hardsigmoid_nc_q8() argument 153 hardsigmoid->batch_size = 0; in pytorch_qnnp_setup_hardsigmoid_nc_q8() [all …]
|
/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/ops/glsl/ |
H A D | activations.h | 34 float hardsigmoid(float x) { in hardsigmoid() function 38 vec4 hardsigmoid(vec4 tex) { in hardsigmoid() function 40 hardsigmoid(tex.x), in hardsigmoid() 41 hardsigmoid(tex.y), in hardsigmoid() 42 hardsigmoid(tex.z), in hardsigmoid() 43 hardsigmoid(tex.w)); in hardsigmoid()
|
H A D | unary_op.yaml | 43 - NAME: hardsigmoid 44 OPERATOR: hardsigmoid(X)
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/ |
H A D | CMakeLists.txt | 164 src/hardsigmoid.c 548 add_executable(hardsigmoid-test test/hardsigmoid.cc) 549 set_target_properties(hardsigmoid-test PROPERTIES 553 target_include_directories(hardsigmoid-test PRIVATE src test) 554 target_link_libraries(hardsigmoid-test PRIVATE pytorch_qnnpack cpuinfo gtest gtest_main) 555 add_test(hardsigmoid-test hardsigmoid-test) 803 add_executable(hardsigmoid-bench bench/hardsigmoid.cc) 804 set_target_properties(hardsigmoid-bench PROPERTIES 808 target_link_libraries(hardsigmoid-bench PRIVATE pytorch_qnnpack benchmark)
|
H A D | configure.py | 89 build.cc("hardsigmoid.c"), 225 build.unittest("hardsigmoid-test", build.cxx("hardsigmoid.cc")) 264 build.benchmark("hardsigmoid-bench", build.cxx("hardsigmoid.cc"))
|
H A D | buckbuild.bzl | 271 "src/hardsigmoid.c", 579 "test/hardsigmoid.cc", 617 "hardsigmoid-operator-tester.h": "test/hardsigmoid-operator-tester.h",
|
/aosp_15_r20/external/pytorch/torch/ao/ns/fx/ |
H A D | mappings.py | 241 # F.hardsigmoid 244 "hardsigmoid", 245 F.hardsigmoid, 246 nn.Hardsigmoid, 541 F.hardsigmoid, 679 nn.Hardsigmoid, 703 "hardsigmoid",
|
/aosp_15_r20/external/pytorch/benchmarks/operator_benchmark/pt/ |
H A D | hardsigmoid_test.py | 8 Microbenchmarks for the hardsigmoid operator. 12 # Configs for hardsigmoid ops 34 ["Hardsigmoid", nn.Hardsigmoid],
|
H A D | qactivation_test.py | 49 ("functional.hardsigmoid", qF.hardsigmoid),
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/metal/ |
H A D | MetalNeuronType.h | 16 HardSigmoid, enumerator 45 } else if (type == NeuronType::HardSigmoid) { in neuron() 46 return [MPSCNNNeuronOp hardSigmoid]; in neuron() 60 } else if (type == NeuronType::HardSigmoid) { in neuronDescriptor()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/bench/ |
H A D | hardsigmoid.cc | 49 state.SkipWithError("failed to create Hardsigmoid operator"); in hardsigmoid_q8() 60 state.SkipWithError("failed to setup Hardsigmoid operator"); in hardsigmoid_q8() 67 state.SkipWithError("failed to run Hardsigmoid operator"); in hardsigmoid_q8() 81 state.SkipWithError("failed to delete Hardsigmoid operator"); in hardsigmoid_q8()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/ |
H A D | qhardsigmoid.cpp | 59 "failed to create QNNPACK Hardsigmoid operator"); in qnnpack_hardsigmoid() 75 "failed to setup QNNPACK Hardsigmoid operator"); in qnnpack_hardsigmoid() 84 "failed to run QNNPACK Hardsigmoid operator"); in qnnpack_hardsigmoid() 103 // Note: we create a new temporary tensor because the output of hardsigmoid in hardsigmoid_out_quantized_cpu()
|
/aosp_15_r20/external/executorch/backends/qualcomm/builders/ |
H A D | op_hardsigmoid.py | 21 target = ["aten.hardsigmoid.default"] 58 # The operation enum of hardsigmoid in QNN 65 # The parameter used in Pytorch definition for hardsigmoid
|
/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/ops/impl/ |
H A D | UnaryOp.cpp | 139 DEFINE_ACTIVATION_FN(hardsigmoid); 157 VK_REGISTER_OP(aten.hardsigmoid.default, hardsigmoid);
|
/aosp_15_r20/external/pytorch/torch/nn/modules/ |
H A D | activation.py | 22 "Hardsigmoid", 330 class Hardsigmoid(Module): class 331 r"""Applies the Hardsigmoid function element-wise. 333 Hardsigmoid is defined as: 336 \text{Hardsigmoid}(x) = \begin{cases} 349 .. image:: ../scripts/activation_images/Hardsigmoid.png 353 >>> m = nn.Hardsigmoid() 367 return F.hardsigmoid(input, self.inplace)
|
H A D | __init__.py | 9 Hardsigmoid, 226 "Hardsigmoid",
|
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/ |
H A D | functional.py | 35 "hardsigmoid", 650 def hardsigmoid(input: Tensor, inplace: bool = False) -> Tensor: function 651 r"""This is the quantized version of :func:`~torch.nn.functional.hardsigmoid`.""" 653 raise ValueError("Input to 'quantized.hardsigmoid' must be quantized!") 656 return torch._C._nn.hardsigmoid(input)
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/vulkan/ops/ |
H A D | Clamp.cpp | 324 Tensor hardsigmoid(const Tensor& self) { in hardsigmoid() function 325 return ops::activation(self, VK_KERNEL(hardsigmoid)); in hardsigmoid() 601 m.impl(TORCH_SELECTIVE_NAME("aten::hardsigmoid"), hardsigmoid); in TORCH_LIBRARY_IMPL()
|
/aosp_15_r20/external/pytorch/test/quantization/eager/ |
H A D | test_quantize_eager_qat.py | 670 self.hardsigmoid = torch.nn.Hardsigmoid() 678 x = self.hardsigmoid(x) 686 for attr in ['sigmoid', 'hardsigmoid', 'tanh']: 694 for attr in ['sigmoid', 'hardsigmoid', 'tanh']: 703 for attr in ['sigmoid', 'hardsigmoid', 'tanh']:
|
/aosp_15_r20/external/pytorch/torch/ao/quantization/ |
H A D | qconfig_mapping.py | 45 torch.nn.Hardsigmoid: default_fixed_qparams_range_0to1_observer, 46 torch.nn.functional.hardsigmoid: default_fixed_qparams_range_0to1_observer, 47 "hardsigmoid": default_fixed_qparams_range_0to1_observer,
|
/aosp_15_r20/external/executorch/backends/qualcomm/tests/ |
H A D | models.py | 522 class HardSigmoid(torch.nn.Module): class 525 self.hardsigmoid = torch.nn.Hardsigmoid() 528 return self.hardsigmoid(x)
|
/aosp_15_r20/external/pytorch/functorch/op_analysis/ |
H A D | public_api | 104 nn.functional.hardsigmoid 474 nn.functional.hardsigmoid
|
/aosp_15_r20/external/executorch/backends/xnnpack/partition/ |
H A D | configs.py | 72 torch.nn.Hardsigmoid, # we can handle decomposition 120 torch.nn.Hardsigmoid,
|
/aosp_15_r20/external/pytorch/test/cpp/jit/ |
H A D | test_subgraph_utils.cpp | 64 %q5 : Tensor = aten::hardsigmoid(%q4) in TEST() 104 ->check("aten::hardsigmoid") in TEST()
|
/aosp_15_r20/external/pytorch/torch/ao/quantization/backend_config/ |
H A D | _common_operator_config_utils.py | 118 torch.nn.Hardsigmoid: _FIXED_QPARAM_OP_0TO1_CONSTRAINTS, 119 torch.nn.functional.hardsigmoid: _FIXED_QPARAM_OP_0TO1_CONSTRAINTS, 120 "hardsigmoid": _FIXED_QPARAM_OP_0TO1_CONSTRAINTS,
|