Home
last modified time | relevance | path

Searched full:zero_point (Results 1 – 25 of 1072) sorted by relevance

12345678910>>...43

/aosp_15_r20/external/XNNPACK/test/
H A Dqs8-requantization.cc36 for (int32_t zero_point = std::numeric_limits<int8_t>::min(); in TEST() local
37 zero_point <= std::numeric_limits<int8_t>::max(); in TEST()
38 zero_point++) in TEST()
42 .zero_point(zero_point) in TEST()
52 for (int32_t zero_point = std::numeric_limits<int8_t>::min(); in TEST() local
53 zero_point <= std::numeric_limits<int8_t>::max(); in TEST()
54 zero_point++) in TEST()
58 .zero_point(zero_point) in TEST()
68 for (int32_t zero_point = std::numeric_limits<int8_t>::min(); in TEST() local
69 zero_point <= std::numeric_limits<int8_t>::max(); in TEST()
[all …]
H A Dqu8-requantization.cc36 for (int32_t zero_point = 1; zero_point < 256; zero_point++) { in TEST() local
39 .zero_point(zero_point) in TEST()
49 for (int32_t zero_point = 0; zero_point < 256; zero_point++) { in TEST() local
52 .zero_point(zero_point) in TEST()
62 for (int32_t zero_point = 0; zero_point < 256; zero_point++) { in TEST() local
65 .zero_point(zero_point) in TEST()
75 for (int32_t zero_point = 0; zero_point < 256; zero_point++) { in TEST() local
78 .zero_point(zero_point) in TEST()
98 .zero_point(128) in TEST()
119 for (int32_t zero_point = 1; zero_point < 256; zero_point++) { in TEST() local
[all …]
H A Dqu8-avgpool-minmax.cc71 for (int32_t zero_point = 0; zero_point <= 255; zero_point += 51) { in TEST() local
76 .input_zero_point(zero_point) in TEST()
95 for (int32_t zero_point = 0; zero_point <= 255; zero_point += 51) { in TEST() local
100 .output_zero_point(zero_point) in TEST()
218 for (int32_t zero_point = 0; zero_point <= 255; zero_point += 51) { in TEST() local
223 .input_zero_point(zero_point) in TEST()
246 for (int32_t zero_point = 0; zero_point <= 255; zero_point += 51) { in TEST() local
251 .output_zero_point(zero_point) in TEST()
380 for (int32_t zero_point = 0; zero_point <= 255; zero_point += 51) { in TEST() local
385 .input_zero_point(zero_point) in TEST()
[all …]
H A Drequantization-tester.h42 inline RequantizationTester& zero_point(int32_t zero_point) { in zero_point() argument
43 this->zero_point_ = zero_point; in zero_point()
47 inline int32_t zero_point() const { in zero_point() function
86 ASSERT_GE(zero_point(), std::numeric_limits<uint8_t>::min()); in TestExactDivideByPO2()
87 ASSERT_LE(zero_point(), std::numeric_limits<uint8_t>::max()); in TestExactDivideByPO2()
100 const int32_t max_i = (uint32_t(std::numeric_limits<int32_t>::max()) >> s()) + zero_point(); in TestExactDivideByPO2()
101 const int32_t min_i = -(-uint32_t(std::numeric_limits<int32_t>::min()) >> s()) + zero_point(); in TestExactDivideByPO2()
104 inputs[i] = int32_t(uint32_t(clamped_i - zero_point()) << s()); in TestExactDivideByPO2()
107 scale(), zero_point(), qmin(), qmax(), in TestExactDivideByPO2()
114 << ", s = " << s() << ", zero point = " << zero_point(); in TestExactDivideByPO2()
[all …]
/aosp_15_r20/external/XNNPACK/eval/
H A Df32-qs8-cvt.cc34 for (int32_t zero_point = std::numeric_limits<int8_t>::min(); in TEST() local
35 zero_point <= std::numeric_limits<int8_t>::max(); in TEST()
36 zero_point++) in TEST()
38 …st uint32_t max_input = float_as_uint32((float) (std::numeric_limits<int8_t>::max() - zero_point)); in TEST()
43 …_f32_qs8_cvt__neon(kBlockSize * sizeof(int8_t), inputs.data(), outputs.data(), int8_t(zero_point)); in TEST()
45 long reference_output = std::lrintf(inputs[i]) + long(zero_point); in TEST()
55 << ", zero point = " << std::dec << zero_point; in TEST()
66 for (int32_t zero_point = std::numeric_limits<int8_t>::min(); in TEST() local
67 zero_point <= std::numeric_limits<int8_t>::max(); in TEST()
68 zero_point++) in TEST()
[all …]
H A Df32-qu8-cvt.cc34 for (int32_t zero_point = std::numeric_limits<uint8_t>::min(); in TEST() local
35 zero_point <= std::numeric_limits<uint8_t>::max(); in TEST()
36 zero_point++) in TEST()
38 …t uint32_t max_input = float_as_uint32((float) (std::numeric_limits<uint8_t>::max() - zero_point)); in TEST()
43 …32_qu8_cvt__neon(kBlockSize * sizeof(uint8_t), inputs.data(), outputs.data(), uint8_t(zero_point)); in TEST()
45 long reference_output = std::lrintf(inputs[i]) + long(zero_point); in TEST()
55 << ", zero point = " << std::dec << zero_point; in TEST()
66 for (int32_t zero_point = std::numeric_limits<uint8_t>::min(); in TEST() local
67 zero_point <= std::numeric_limits<uint8_t>::max(); in TEST()
68 zero_point++) in TEST()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/
H A DAffineQuantizerBase.cpp18 void checkZeroPoint(const std::string& fn_name, int64_t zero_point) { in checkZeroPoint() argument
20 zero_point <= std::numeric_limits<T>::max(), in checkZeroPoint()
22 " zero_point ", in checkZeroPoint()
23 zero_point, in checkZeroPoint()
26 zero_point >= std::numeric_limits<T>::min(), in checkZeroPoint()
28 " zero_point ", in checkZeroPoint()
29 zero_point, in checkZeroPoint()
38 T quantize_val(double scale, int64_t zero_point, float value) { in quantize_val() argument
50 static_cast<int32_t>(zero_point), in quantize_val()
59 int64_t zero_point, in quantize_vec() argument
[all …]
H A DFakeQuantPerChannelAffine.cpp23 zero_point: zero_point of per channel affine quantization
35 const Tensor& zero_point, in fake_quantize_per_channel_affine() argument
40 self, scale, zero_point, axis, quant_min, quant_max); in fake_quantize_per_channel_affine()
47 const Tensor& zero_point, in fake_quantize_per_channel_affine_cachemask() argument
53 …TORCH_CHECK(zero_point.scalar_type() == ScalarType::Int || zero_point.scalar_type() == ScalarType:… in fake_quantize_per_channel_affine_cachemask()
54 "Zero-point must be Int32, Float or Half, found ", zero_point.scalar_type()); in fake_quantize_per_channel_affine_cachemask()
56 TORCH_CHECK(zero_point.dim() == 1, "zero point should be a 1-D tensor"); in fake_quantize_per_channel_affine_cachemask()
58 scale.numel() == zero_point.numel(), in fake_quantize_per_channel_affine_cachemask()
69 if(!at::isFloatingType(zero_point.scalar_type())){ in fake_quantize_per_channel_affine_cachemask()
71 at::min(zero_point).item().toInt() >= quant_min && in fake_quantize_per_channel_affine_cachemask()
[all …]
/aosp_15_r20/external/pytorch/test/quantization/core/
H A Dtest_quantized_tensor.py29 """Calculate the dynamic quantization parameters (scale, zero_point)
51 zero_point = 0
156 zero_point = np.random.randint(0.0, 10)
159 qx = torch.quantize_per_tensor(x, scale=scale, zero_point=zero_point, dtype=dtype)
170 qx = torch.quantize_per_tensor(x, scale=scale, zero_point=zero_point, dtype=dtype)
176 qx = torch.quantize_per_tensor(x, scale=scale, zero_point=zero_point, dtype=dtype)
189 zero_point = np.random.randint(0.0, 10)
230 zero_point = 2
233 qr = torch.quantize_per_tensor(r, scale, zero_point, dtype)
235 self.assertEqual(qr.q_zero_point(), zero_point)
[all …]
H A Dtest_workflow_ops.py36 # Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant…
37 def _fake_quantize_per_tensor_affine_reference(X, scale, zero_point, quant_min, quant_max): argument
39 …mp(torch.round(X.to(torch.float32) * (1.0 / scale) + zero_point), quant_min, quant_max) - zero_poi…
43 # Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant…
44 def _fake_quantize_per_tensor_affine_grad_reference(dY, X, scale, zero_point, quant_min, quant_max): argument
46 Xq = torch.round(X.to(torch.float32) * (1.0 / scale) + zero_point)
53 def _fake_quantize_learnable_per_tensor_affine_grad_reference(dY, X, scale, zero_point, quant_min, … argument
58 zero_point_rounded = int((zero_point + 0.5).clamp(quant_min, quant_max).item())
85 dY, X, scale, zero_point, quant_min, quant_max).to(device)
93 def _quantize_per_tensor(x, scale, zero_point, quant_min, quant_max): argument
[all …]
H A Dtest_quantized_op.py180 X, (scale, zero_point, torch_type) = X
207 qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
227 output_zero_point = zero_point
231 zero_point=output_zero_point,
273 for shape, dtype, scale, zero_point in test_cases:
275 X = (X, (scale, zero_point, dtype))
295 for shape, dtype, scale, zero_point in test_cases:
297 X = (X, (scale, zero_point, dtype))
413 X, scale, zero_point, torch_type, alpha = \
417 qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
[all …]
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/modules/
H A Dactivation.py23 zero_point, and :math:`q(6)` is the quantized representation of number 6.
64 zero_point: quantization zero point of the output tensor
67 def __init__(self, scale, zero_point, device=None, dtype=None): argument
71 self.register_buffer("zero_point", torch.tensor(zero_point, **factory_kwargs))
74 return torch.ops.quantized.hardswish(input, self.scale, self.zero_point)
81 scale, zero_point = mod.activation_post_process.calculate_qparams()
82 return Hardswish(float(scale), int(zero_point))
85 def from_reference(cls, mod, scale, zero_point): argument
86 return cls(float(scale), int(zero_point))
94 zero_point: quantization zero point of the output tensor
[all …]
H A Dnormalization.py19 * **zero_point** - quantization zero point of the output, type: long.
29 zero_point, argument
45 self.register_buffer("zero_point", torch.tensor(zero_point, **factory_kwargs))
55 output_zero_point=self.zero_point,
63 scale, zero_point = mod.activation_post_process.calculate_qparams()
69 int(zero_point),
76 def from_reference(cls, mod, scale, zero_point): argument
82 int(zero_point),
93 * **zero_point** - quantization zero point of the output, type: long.
105 zero_point, argument
[all …]
/aosp_15_r20/external/executorch/kernels/quantized/test/
H A Dtest_quant_dequant_per_token.py23 zero_point = torch.tensor([-1, -2, 0])
24 zero_point = zero_point.unsqueeze(-1)
26 input_tensor, scale, zero_point, -128, 127, torch.int8
29 input_tensor, scale, zero_point, -128, 127, torch.int8
37 zero_point = torch.randint(0, 10, (8, 1))
39 input_tensor, scale, zero_point, -128, 127, torch.int8
42 input_tensor, scale, zero_point, -128, 127, torch.int8
50 zero_point = torch.randint(0, 10, (1, 3, 8, 1))
52 input_tensor, scale, zero_point, -128, 127, torch.int8
55 input_tensor, scale, zero_point, -128, 127, torch.int8
[all …]
/aosp_15_r20/external/pytorch/torch/ao/quantization/fx/
H A D_decomposed.py45 "quantize_per_tensor(Tensor input, float scale, int zero_point, "
54 zero_point: int,
65 zero_point (int): quantization parameter for affine quantization
83 torch.round(input * inv_scale) + zero_point, quant_min, quant_max
91 zero_point: int,
105 "quantize_per_tensor.tensor(Tensor input, Tensor scale, Tensor zero_point, "
116 zero_point: torch.Tensor,
123 Same as `quantize_per_tensor` but scale and zero_point are Scalar Tensor instead of
127 zero_point.numel() == 1
128 ), f"Expecting zero_point tensor to be one element, but received : {zero_point.numel()}"
[all …]
/aosp_15_r20/packages/modules/NeuralNetworks/runtime/test/android_fuzzing/corpus_additional/
Dseed35611 zero_point: 128
27 zero_point: 100
43 zero_point: 100
59 zero_point: 100
75 zero_point: 100
91 zero_point: 100
107 zero_point: 100
123 zero_point: 100
139 zero_point: 100
231 zero_point: 128
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/cpu/vec/vec256/
H A Dvec256_qint.h147 int64_t zero_point) { in QuantizeAvx2() argument
205 x_rounded_v = _mm256_add_epi32(x_rounded_v, _mm256_set1_epi32(zero_point)); in QuantizeAvx2()
206 y_rounded_v = _mm256_add_epi32(y_rounded_v, _mm256_set1_epi32(zero_point)); in QuantizeAvx2()
207 z_rounded_v = _mm256_add_epi32(z_rounded_v, _mm256_set1_epi32(zero_point)); in QuantizeAvx2()
208 w_rounded_v = _mm256_add_epi32(w_rounded_v, _mm256_set1_epi32(zero_point)); in QuantizeAvx2()
228 x_rounded_v = _mm256_add_epi32(x_rounded_v, _mm256_set1_epi32(zero_point)); in QuantizeAvx2()
251 transformed = zero_point + std::nearbyint(transformed); in QuantizeAvx2()
316 Vectorized<float> /*zero_point*/,
324 Vectorized<float> zero_point) const {
326 return {(Vectorized<float>(float_vals) - zero_point) * scale};
[all …]
/aosp_15_r20/external/XNNPACK/tools/
H A Dgenerate-avgpool-test.py100 for (int32_t zero_point = 0; zero_point <= 255; zero_point += 51) {
105 .input_zero_point(zero_point)
126 for (int32_t zero_point = 0; zero_point <= 255; zero_point += 51) {
131 .output_zero_point(zero_point)
261 for (int32_t zero_point = 0; zero_point <= 255; zero_point += 51) {
266 .input_zero_point(zero_point)
291 for (int32_t zero_point = 0; zero_point <= 255; zero_point += 51) {
296 .output_zero_point(zero_point)
436 for (int32_t zero_point = 0; zero_point <= 255; zero_point += 51) {
441 .input_zero_point(zero_point)
[all …]
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/
H A Dfunctional.py183 zero_point=0, argument
206 zero_point: quantization zero_point for the output. Default: 0
217 >>> scale, zero_point = 1.0, 0
221 >>> q_filters = torch.quantize_per_tensor(filters, scale, zero_point, dtype_filters)
222 >>> q_inputs = torch.quantize_per_tensor(inputs, scale, zero_point, dtype_inputs)
223 >>> qF.conv1d(q_inputs, q_filters, bias, padding=1, scale=scale, zero_point=zero_point)
242 return torch.ops.quantized.conv1d(input, packed_params, scale, zero_point)
255 zero_point=0, argument
278 zero_point: quantization zero_point for the output. Default: 0
289 >>> scale, zero_point = 1.0, 0
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/cpu/vec/vec512/
H A Dvec512_qint.h151 int64_t zero_point) { in QuantizeAvx512() argument
219 x_rounded_v = _mm512_add_epi32(x_rounded_v, _mm512_set1_epi32(zero_point)); in QuantizeAvx512()
220 y_rounded_v = _mm512_add_epi32(y_rounded_v, _mm512_set1_epi32(zero_point)); in QuantizeAvx512()
221 z_rounded_v = _mm512_add_epi32(z_rounded_v, _mm512_set1_epi32(zero_point)); in QuantizeAvx512()
222 w_rounded_v = _mm512_add_epi32(w_rounded_v, _mm512_set1_epi32(zero_point)); in QuantizeAvx512()
242 x_rounded_v = _mm512_add_epi32(x_rounded_v, _mm512_set1_epi32(zero_point)); in QuantizeAvx512()
265 transformed = zero_point + std::nearbyint(transformed); in QuantizeAvx512()
329 Vectorized<float> zero_point,
337 Vectorized<float> zero_point) const {
339 return {(Vectorized<float>(float_vals) - zero_point) * scale};
[all …]
/aosp_15_r20/external/XNNPACK/src/xnnpack/
H A Drequantization.h38 int8_t zero_point, in xnn_qs8_requantize_fp32() argument
45 const float min_less_zero_point = (float) ((int32_t) min - (int32_t) zero_point); in xnn_qs8_requantize_fp32()
46 const float max_less_zero_point = (float) ((int32_t) max - (int32_t) zero_point); in xnn_qs8_requantize_fp32()
52 const int32_t output = (int32_t) lrintf(scaled_input) + (int32_t) zero_point; in xnn_qs8_requantize_fp32()
59 uint8_t zero_point, in xnn_qu8_requantize_fp32() argument
66 const float min_less_zero_point = (float) ((int32_t) min - (int32_t) zero_point); in xnn_qu8_requantize_fp32()
67 const float max_less_zero_point = (float) ((int32_t) max - (int32_t) zero_point); in xnn_qu8_requantize_fp32()
73 const int32_t output = (int32_t) lrintf(scaled_input) + (int32_t) zero_point; in xnn_qu8_requantize_fp32()
80 int8_t zero_point, in xnn_qs8_requantize_rndna() argument
94 const int32_t min_less_zero_point = (int32_t) min - (int32_t) zero_point; in xnn_qs8_requantize_rndna()
[all …]
/aosp_15_r20/external/pytorch/torch/testing/_internal/
H A Dcommon_quantized.py26 def _quantize(x, scale, zero_point, qmin=None, qmax=None, dtype=np.uint8): argument
32 qx = np.round(x / scale + zero_point).astype(np.int64)
38 def _dequantize(qx, scale, zero_point): argument
40 x = (qx.astype(float) - zero_point) * scale
44 def _requantize(x, multiplier, zero_point, qmin=0, qmax=255, qtype=np.uint8): argument
47 qx = (x * multiplier).round() + zero_point
52 """Calculate the dynamic quantization parameters (scale, zero_point)
74 zero_point = 0
81 zero_point = 0
87 zero_point = qmin - round(min_val / scale)
[all …]
/aosp_15_r20/external/executorch/kernels/quantized/cpu/
H A Dop_quantize.cpp16 * For an input tensor, use the scale and zero_point arguments to quantize it.
93 int64_t zero_point, in quantize_val() argument
100 static_cast<int32_t>(zero_point) + in quantize_val()
111 int64_t zero_point, in quantize_per_tensor_out() argument
134 scale, zero_point, value, quant_min, quant_max); \ in quantize_per_tensor_out()
168 const Tensor& zero_point, in quantize_per_tensor_tensor_args_out() argument
186 zero_point.scalar_type() == ScalarType::Long, in quantize_per_tensor_tensor_args_out()
187 "Expected zero_point to be Long tensor received: %" PRId8, in quantize_per_tensor_tensor_args_out()
188 static_cast<int8_t>(zero_point.scalar_type())); in quantize_per_tensor_tensor_args_out()
194 zero_point.numel() == 1, in quantize_per_tensor_tensor_args_out()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/
H A Dkernel_util_test.cc372 input_params->zero_point = TfLiteIntArrayCreate(1); in TEST_F()
373 input_params->zero_point->data[0] = 5; in TEST_F()
394 filter_params->zero_point = TfLiteIntArrayCreate(3); in TEST_F()
395 filter_params->zero_point->data[0] = 0; in TEST_F()
396 filter_params->zero_point->data[1] = 0; in TEST_F()
397 filter_params->zero_point->data[2] = 0; in TEST_F()
415 bias_params->zero_point = TfLiteIntArrayCreate(3); in TEST_F()
416 bias_params->zero_point->data[0] = 11; in TEST_F()
417 bias_params->zero_point->data[1] = 12; in TEST_F()
418 bias_params->zero_point->data[2] = 15; in TEST_F()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/lite/g3doc/performance/
H A Dquantization_spec.md45 `zero_point` per slice in the `quantized_dimension`. The quantized dimension
48 quantization params: `scale=[1.0, 2.0, 3.0]`, `zero_point=[1, 2, 3]`,
51 t[:, 0, :, :] will have scale[0]=1.0, zero_point[0]=1
52 t[:, 1, :, :] will have scale[1]=2.0, zero_point[1]=2
53 t[:, 2, :, :] will have scale[2]=3.0, zero_point[2]=3
133 restriction: Input and outputs must all have same scale/zero_point
144 restriction: Input and outputs must all have same scale/zero_point
155 restriction: zero_point = 0
160 restriction: (scale, zero_point) = (input0_scale * input1_scale[...], 0)
175 restriction: zero_point = 0
[all …]

12345678910>>...43