Searched full:linear_unpack (Results 1 – 11 of 11) sorted by relevance
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/ |
H A D | qlinear_unpack.cpp | 50 "quantized.linear_unpack(Tensor) is unsupported! Please " in run() 61 "quantized.linear_unpack(Tensor) is unsupported! Please " in run() 68 …m.impl(TORCH_SELECTIVE_NAME("quantized::linear_unpack.legacy"), TORCH_FN(QLinearUnpackWeightInt8Le… in TORCH_LIBRARY_IMPL() 74 m.impl(TORCH_SELECTIVE_NAME("quantized::linear_unpack"), TORCH_FN(QLinearUnpackWeightInt8::run)); in TORCH_LIBRARY_IMPL()
|
H A D | library.cpp | 193 …m.def(TORCH_SELECTIVE_SCHEMA("quantized::linear_unpack(__torch__.torch.classes.quantized.LinearPac… in TORCH_LIBRARY() 195 …m.def(TORCH_SELECTIVE_SCHEMA("quantized::linear_unpack.legacy(Tensor W_prepack) -> (Tensor W_origi… in TORCH_LIBRARY()
|
/aosp_15_r20/external/pytorch/test/quantization/core/ |
H A D | test_quantized_module.py | 160 w_model, b_model = torch.ops.quantized.linear_unpack(model_dict[key]) 161 w_loaded, b_loaded = torch.ops.quantized.linear_unpack(loaded_dict[key]) 170 linear_unpack = torch.ops.quantized.linear_unpack 171 self.assertEqual(linear_unpack(qlinear._packed_params._packed_params), 172 linear_unpack(loaded_qlinear._packed_params._packed_params)) 180 …self.assertEqual(qlinear._weight_bias(), torch.ops.quantized.linear_unpack(qlinear._packed_params.… 1637 w_model, b_model = torch.ops.quantized.linear_unpack(model_dict[key]) 1638 w_loaded, b_loaded = torch.ops.quantized.linear_unpack(loaded_dict[key]) 1646 linear_unpack = torch.ops.quantized.linear_unpack 1647 self.assertEqual(linear_unpack(qlinear._packed_params._packed_params), [all …]
|
H A D | test_quantized_op.py | 4139 """Tests the correctness of the quantized::linear_unpack op.""" 4152 qlinear_unpack = torch.ops.quantized.linear_unpack 4268 """Tests the correctness of the quantized::linear_unpack after freeing original tensor op.""" 4277 qlinear_unpack = torch.ops.quantized.linear_unpack
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/quantization/ |
H A D | quantization_patterns.h | 495 %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params) in quant_fusion_pattern_and_replacements() 504 %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params) in quant_fusion_pattern_and_replacements() 514 %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params) in quant_fusion_pattern_and_replacements() 1087 %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params) in dynamic_quantized_linear_pattern_and_replacements() 1115 %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params) in dynamic_quant_fusion_pattern_and_replacements() 1156 %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::linear_unpack(%packed_params) in linear_prepack_unpack_patterns()
|
/aosp_15_r20/external/pytorch/test/quantization/jit/ |
H A D | test_deprecated_jit_quant.py | 169 torch.ops.quantized.linear_unpack(self._packed_weight)[0], 183 return torch.ops.quantized.linear_unpack(self._packed_weight)[0]
|
H A D | test_ondevice_quantization.py | 285 if n.kind() == "quantized::linear_unpack":
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/onnx/ |
H A D | unpack_quantized_weights.cpp | 642 "quantized::linear_unpack", in UnpackQuantizedWeights() 648 "quantized::linear_unpack", in UnpackQuantizedWeights()
|
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/modules/ |
H A D | linear.py | 47 return torch.ops.quantized.linear_unpack(self._packed_params)
|
/aosp_15_r20/external/pytorch/test/mobile/model_test/ |
H A D | model_ops.yaml | 439 quantized::linear_unpack: 46
|
H A D | coverage.yaml | 1092 quantized::linear_unpack: 4
|