Searched full:linear_prepack_fp16 (Results 1 – 14 of 14) sorted by relevance
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/dynamic/modules/ |
H A D | rnn.py | 55 packed_weight = torch.ops.quantized.linear_prepack_fp16(qweight, bias) 183 packed_ih = torch.ops.quantized.linear_prepack_fp16(w_ih, b_ih) 184 packed_hh = torch.ops.quantized.linear_prepack_fp16(w_hh, b_hh) 344 packed_ih = torch.ops.quantized.linear_prepack_fp16(w_ih, b_ih) 345 packed_hh = torch.ops.quantized.linear_prepack_fp16(w_hh, b_hh) 450 packed_ih = torch.ops.quantized.linear_prepack_fp16( 453 packed_hh = torch.ops.quantized.linear_prepack_fp16( 993 packed_weight_ih = torch.ops.quantized.linear_prepack_fp16( 996 packed_weight_hh = torch.ops.quantized.linear_prepack_fp16(
|
/aosp_15_r20/external/pytorch/benchmarks/operator_benchmark/pt/ |
H A D | linear_prepack_fp16_test.py | 8 # Configs for PT linear_prepack_fp16 operator 34 self.set_module_name("linear_prepack_fp16") 37 return torch.ops.quantized.linear_prepack_fp16(input_one)
|
H A D | linear_unpack_fp16_test.py | 31 "input_one": torch.ops.quantized.linear_prepack_fp16(
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/ |
H A D | qlinear_prepack.cpp | 615 "quantized::linear_prepack_fp16 is currently " in run() 623 "quantized::linear_prepack_fp16 is currently " in run() 629 "Didn't find engine for operation quantized::linear_prepack_fp16 ", in run() 655 "This model uses an outdated version of quantized.linear_prepack_fp16. " in run() 683 …m.impl(TORCH_SELECTIVE_NAME("quantized::linear_prepack_fp16"), TORCH_FN(QLinearPackWeightFp16::run… in TORCH_LIBRARY_IMPL() 694 …m.impl(TORCH_SELECTIVE_NAME("_quantized::linear_prepack_fp16"), TORCH_FN(QLinearPackWeightFp16::ru… in TORCH_LIBRARY_IMPL()
|
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/modules/ |
H A D | linear.py | 40 self._packed_params = torch.ops.quantized.linear_prepack_fp16(weight, bias)
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/ |
H A D | library.cpp | 190 …m.def(TORCH_SELECTIVE_SCHEMA("quantized::linear_prepack_fp16(Tensor W, Tensor? B=None) -> __torch_… in TORCH_LIBRARY() 248 …m.def(TORCH_SELECTIVE_SCHEMA("_quantized::linear_prepack_fp16(Tensor W, Tensor? B=None) -> __torch… in TORCH_LIBRARY()
|
/aosp_15_r20/external/pytorch/test/mobile/model_test/ |
H A D | model_ops.yaml | 437 quantized::linear_prepack_fp16: 67
|
H A D | coverage.yaml | 1091 quantized::linear_prepack_fp16: 25
|
/aosp_15_r20/external/pytorch/torch/ao/quantization/fx/ |
H A D | utils.py | 149 return torch.ops.quantized.linear_prepack_fp16
|
H A D | _lower_to_native_backend.py | 348 torch._ops.ops.quantized.linear_prepack_fp16,
|
/aosp_15_r20/external/pytorch/test/quantization/core/ |
H A D | test_quantized_op.py | 3312 w_packed_fp16 = torch.ops.quantized.linear_prepack_fp16(w, bias) 3328 qlinear_prepack = torch.ops.quantized.linear_prepack_fp16 3514 packed_ih = torch.ops.quantized.linear_prepack_fp16(Wq1.dequantize(), b1) 3515 packed_hh = torch.ops.quantized.linear_prepack_fp16(Wq2.dequantize(), b2) 3642 packed_ih = torch.ops.quantized.linear_prepack_fp16(Wq1.dequantize(), b1) 3643 packed_hh = torch.ops.quantized.linear_prepack_fp16(Wq2.dequantize(), b2)
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/quantization/ |
H A D | quantization_patterns.h | 1167 %packed_params = quantized::linear_prepack_fp16(%w, %b) in linear_prepack_unpack_patterns()
|
/aosp_15_r20/external/pytorch/benchmarks/static_runtime/ |
H A D | test_static_runtime.cc | 2337 %packed_params = quantized::linear_prepack_fp16(%weights, %bias) in TEST() 2358 %packed_params = quantized::linear_prepack_fp16(%weights, %bias) in TEST()
|
/aosp_15_r20/external/pytorch/test/quantization/fx/ |
H A D | test_quantize_fx.py | 1776 ns.call_function(torch.ops.quantized.linear_prepack_fp16)),
|