/aosp_15_r20/external/pytorch/torch/ao/quantization/ |
H A D | observer.py | 206 # min_val and max_val buffers from torch.Size([0]) to torch.Size([]) 208 # to min_val and from max_vals to max_val. 325 self, min_val: torch.Tensor, max_val: torch.Tensor 331 min_val: Minimum values per channel 343 if not check_min_max_valid(min_val, max_val): 344 return torch.tensor([1.0], device=min_val.device.type), torch.tensor( 345 [0], device=min_val.device.type 349 min_val_neg = torch.min(min_val, torch.zeros_like(min_val)) 372 scale = (max_val - min_val) / float(quant_max - quant_min) 378 zero_point = -1 * min_val / scale [all …]
|
H A D | utils.py | 394 def check_min_max_valid(min_val: torch.Tensor, max_val: torch.Tensor) -> bool: 398 if min_val.numel() == 0 or max_val.numel() == 0: 405 if min_val.dim() == 0 or max_val.dim() == 0: 406 if min_val == float("inf") and max_val == float("-inf"): 414 assert min_val <= max_val, f"min {min_val} should be less than max {max_val}" 417 min_val <= max_val 418 ), f"min {min_val} should be less than max {max_val}" 616 min_val: torch.Tensor, 629 min_val: Minimum values per channel 636 if not check_min_max_valid(min_val, max_val): [all …]
|
/aosp_15_r20/external/pytorch/torch/ao/quantization/experimental/ |
H A D | observer.py | 24 min_val: torch.Tensor 32 self.min_val = torch.tensor([]) 35 # min_val and max_val are optional args to override 36 # the min_val and max_val observed by forward 38 return self._calculate_qparams(signed, self.min_val, self.max_val) 44 min_val: optional arg that can override min_val internal attribute 53 def _calculate_qparams(self, signed: bool, min_val=None, max_val=None): argument 54 if min_val is not None: 55 self.min_val = min_val 60 alpha = torch.max(-self.min_val, self.max_val) [all …]
|
/aosp_15_r20/external/flatbuffers/python/flatbuffers/ |
H A D | number_types.py | 31 min_val = False variable in BoolFlags 40 min_val = 0 variable in Uint8Flags 49 min_val = 0 variable in Uint16Flags 58 min_val = 0 variable in Uint32Flags 67 min_val = 0 variable in Uint64Flags 76 min_val = -(2**7) variable in Int8Flags 85 min_val = -(2**15) variable in Int16Flags 94 min_val = -(2**31) variable in Int32Flags 103 min_val = -(2**63) variable in Int64Flags 112 min_val = None variable in Float32Flags [all …]
|
/aosp_15_r20/external/pytorch/torch/ao/quantization/fx/_model_report/ |
H A D | model_report_observer.py | 31 * :attr:`min_val` defines the per channel minimum values passed through 48 min_val: torch.Tensor 66 self.register_buffer("min_val", torch.tensor([])) 126 min_val = self.min_val 136 y = y.to(self.min_val.dtype) 138 if min_val.numel() == 0 or max_val.numel() == 0: 139 min_val, max_val = torch.aminmax(y, dim=1) 142 min_val = torch.min(min_val_cur, min_val) 145 self.min_val.resize_(min_val.shape) 147 self.min_val.copy_(min_val) [all …]
|
/aosp_15_r20/external/libopus/dnn/torch/osce/models/ |
H A D | scale_embedding.py | 38 min_val, argument 44 if min_val >= max_val: 45 raise ValueError('min_val must be smaller than max_val') 47 if min_val <= 0 and logscale: 48 raise ValueError('min_val must be positive when logscale is true') 52 self.min_val = min_val 56 self.min_val = m.log(self.min_val) 60 self.offset = (self.min_val + self.max_val) / 2 62 torch.arange(1, dim+1, dtype=torch.float32) * torch.pi / (self.max_val - self.min_val) 67 x = torch.clip(x, self.min_val, self.max_val) - self.offset
|
/aosp_15_r20/external/pytorch/torch/testing/_internal/ |
H A D | common_quantized.py | 69 min_val = X.min() 72 if min_val == max_val: 77 max_val = max(max_val, -min_val) 78 min_val = -max_val 79 scale = (max_val - min_val) / (qmax - qmin) 84 min_val = min(min_val, 0.0) 85 scale = (max_val - min_val) / (qmax - qmin) 87 zero_point = qmin - round(min_val / scale) 102 min_val = X.min() 104 if min_val == max_val: [all …]
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/integration_test/ |
H A D | custom_aggregator_op_test.py | 44 min_val = quantize_model_wrapper.get_min_from_calibrator('1') 46 self.assertAllEqual((min_val, max_val), (1.0, 5.0)) 62 min_val = quantize_model_wrapper.get_min_from_calibrator('2') 64 self.assertAllEqual((min_val, max_val), (1.0, 5.0)) 65 min_val = quantize_model_wrapper.get_min_from_calibrator('3') 67 self.assertAllEqual((min_val, max_val), (-5.0, -1.0)) 83 min_val = quantize_model_wrapper.get_min_from_calibrator('4') 85 self.assertAllEqual((min_val, max_val), (1.0, 5.0)) 86 min_val = quantize_model_wrapper.get_min_from_calibrator('5') 88 self.assertAllEqual((min_val, max_val), (-5.0, -1.0)) [all …]
|
/aosp_15_r20/external/pytorch/test/quantization/core/ |
H A D | test_workflow_module.py | 84 def _get_ref_params(reduce_range, qscheme, dtype, input_scale, min_val, max_val): argument 130 self.assertEqual(myobs.min_val, 1.0 * input_scale) 148 self.assertEqual(myobs.min_val, loaded_obs.min_val) 220 self.assertEqual(myobs.min_val, ref_min_vals[ch_axis]) 257 self.assertEqual(myobs.min_val, loaded_obs.min_val) 324 min_shape_before = obs.min_val.shape 328 self.assertEqual(min_shape_before, obs.min_val.shape) 342 self.assertTrue(obs.min_val != -torch.inf and obs.max_val != torch.inf) 382 self.assertEqual(obs2.min_val.shape, torch.Size([])) 475 self.assertTrue(torch.equal(obs.min_val, new_obs.min_val)) [all …]
|
/aosp_15_r20/external/pytorch/test/quantization/core/experimental/ |
H A D | test_nonuniform_observer.py | 14 obs.min_val = torch.tensor([0.0]) 32 obs.min_val = torch.tensor([0.0]) 36 alpha_test = torch.max(-obs.min_val, obs.max_val) 74 obs.min_val = torch.tensor([0.0]) 78 alpha_test = torch.max(-obs.min_val, obs.max_val) 118 obs.min_val = torch.tensor([0.0]) 121 alpha_test = torch.max(-obs.min_val, obs.max_val) 167 obs.min_val = torch.tensor([0.0]) 209 min_val = torch.min(X) 212 expected_alpha = torch.max(-min_val, max_val)
|
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/ |
H A D | fake_quant_ops_functor.h | 135 const float min_val = min(); in operator() local 138 if (min_val == 0.0f && max_val == 0.0f) { in operator() 143 Nudge(min_val, max_val, quant_min, quant_max, &nudged_min, &nudged_max, in operator() 168 const float min_val = min(); in operator() local 171 if (min_val == 0.0f && max_val == 0.0f) { in operator() 178 Nudge(min_val, max_val, quant_min, quant_max, &nudged_min, &nudged_max, in operator() 211 const float min_val = min(i); in operator() local 214 if (min_val == 0.0f && max_val == 0.0f) { in operator() 220 Nudge(min_val, max_val, quant_min, quant_max, &nudged_min, &nudged_max, in operator() 249 const float min_val = min(i); in operator() local [all …]
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/ |
H A D | calibrator_singleton_test.cc | 24 CalibratorSingleton::ReportMinMax(/*id=*/"1", /*min_val=*/1.0f, in TEST() 32 CalibratorSingleton::ReportMinMax(/*id=*/"1", /*min_val=*/-1.0f, in TEST() 39 CalibratorSingleton::ReportMinMax(/*id=*/"1", /*min_val=*/3.0f, in TEST() 49 CalibratorSingleton::ReportMinMax(/*id=*/"2", /*min_val=*/1.0f, in TEST() 57 CalibratorSingleton::ReportMinMax(/*id=*/"3", /*min_val=*/-1.0f, in TEST() 64 CalibratorSingleton::ReportMinMax(/*id=*/"2", /*min_val=*/3.0f, in TEST() 78 CalibratorSingleton::ReportMinMax(/*id=*/"4", /*min_val=*/1.0f, in TEST() 93 CalibratorSingleton::ReportMinMax(/*id=*/"5", /*min_val=*/1.0f, in TEST() 101 CalibratorSingleton::ReportMinMax(/*id=*/"6", /*min_val=*/3.0f, in TEST()
|
/aosp_15_r20/external/ComputeLibrary/tests/validation/dynamic_fusion/gpu/cl/ |
H A D | Clamp.cpp | 68 input_info, min_val, max_val, expected) 79 attributes.min_val(min_val) 97 … framework::dataset::make("ClampAttributes", { ClampAttributes().min_val(0.1f).max_val(0.6f) })), in TEST_SUITE() 109 … framework::dataset::make("ClampAttributes", { ClampAttributes().min_val(0.1f).max_val(0.6f) })), 121 … framework::dataset::make("ClampAttributes", { ClampAttributes().min_val(0.2f).max_val(0.4f) })), 136 … framework::dataset::make("ClampAttributes", { ClampAttributes().min_val(0.3f).max_val(0.7f) })), in TEST_SUITE() 148 … framework::dataset::make("ClampAttributes", { ClampAttributes().min_val(0.3f).max_val(0.7f) })), 160 … framework::dataset::make("ClampAttributes", { ClampAttributes().min_val(0.1f).max_val(0.9f) })),
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cuda/ |
H A D | FusedObsFakeQuant.cu | 37 float min_val = x_min[i]; in ChooseQuantizationParamsKernelImpl() local 40 if (min_val < 0 && max_val > 0 && preserve_sparsity) { in ChooseQuantizationParamsKernelImpl() 44 fabs(min_val / symmetric_qmin), fabs(max_val / symmetric_qmax)); in ChooseQuantizationParamsKernelImpl() 45 min_val = max_scale * symmetric_qmin; in ChooseQuantizationParamsKernelImpl() 52 min_val = std::min(min_val, 0.f); in ChooseQuantizationParamsKernelImpl() 54 scale[i] = (static_cast<double>(max_val) - min_val) / (qmax - qmin); in ChooseQuantizationParamsKernelImpl() 62 double zero_point_from_min = qmin - min_val / static_cast<double>(scale[i]); in ChooseQuantizationParamsKernelImpl() 65 std::abs(qmin) + std::abs(min_val / static_cast<double>(scale[i])); in ChooseQuantizationParamsKernelImpl() 77 if (min_val < 0 && max_val > 0 && preserve_sparsity) { in ChooseQuantizationParamsKernelImpl()
|
/aosp_15_r20/external/executorch/exir/dialects/edge/ |
H A D | edge.yaml | 4368 min_val: T0 4372 min_val: T1 4376 min_val: T1 4380 min_val: T1 4384 min_val: T5 4388 min_val: T7 4392 min_val: T0 4396 min_val: T1 4400 min_val: T1 4404 min_val: T1 [all …]
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | Constraints.cpp | 30 int64_t min_val = min.has_value() ? min.value() : std::numeric_limits<int64_t>::min(); in sym_constrain_range() local 35 max_val >= min_val, in sym_constrain_range() 37 min_val, in sym_constrain_range() 43 min_val <= size_as_int && size_as_int <= max_val, in sym_constrain_range() 47 min_val, in sym_constrain_range() 64 int64_t min_val = min.has_value() ? min.value() : 0; in sym_constrain_range_for_size() local 68 sym_constrain_range(size, min_val, max); in sym_constrain_range_for_size()
|
/aosp_15_r20/external/tensorflow/tensorflow/python/profiler/ |
H A D | model_analyzer_test.py | 491 min_val = random.randint(0, 10000) 494 min_micros=min_val)).with_empty_output().build() 497 check_min(tfprof_node.children, mm=min_val) 500 min_accelerator_micros=min_val)).with_empty_output().build() 503 check_min(tfprof_node.children, mam=min_val) 506 min_cpu_micros=min_val)).with_empty_output().build() 509 check_min(tfprof_node.children, mcm=min_val) 512 min_bytes=min_val)).with_empty_output().build() 515 check_min(tfprof_node.children, mb=min_val) 518 min_peak_bytes=min_val)).with_empty_output().build() [all …]
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/ |
H A D | frozen_ops_to_mkldnn.cpp | 396 auto min_val = n->f(attr::min_val); in hardtanh_helper() local 398 return [min_val, max_val](at::Tensor output, at::Tensor input) { in hardtanh_helper() 399 at::cpu::hardtanh_out(output, input, min_val, max_val); in hardtanh_helper() 405 auto min_val = n->f(attr::min_val); in clamp_helper() local 407 return [min_val, max_val](at::Tensor output, at::Tensor input) { in clamp_helper() 408 at::cpu::clamp_out(output, input, min_val, max_val); in clamp_helper() 697 double min_val, in clamp_node_creator() argument 703 // `emitBuiltinCall`) which uses `min_val` and `max_val` attrs which we in clamp_node_creator() 707 out_node->f_(attr::min_val, min_val); in clamp_node_creator() 798 auto min_val = in ComputeSubgraphInMKLDNN() local [all …]
|
/aosp_15_r20/external/webrtc/common_audio/signal_processing/ |
H A D | min_max_operations.c | 159 int16_t min_val, max_val; in WebRtcSpl_MaxAbsElementW16() local 160 WebRtcSpl_MinMaxW16(vector, length, &min_val, &max_val); in WebRtcSpl_MaxAbsElementW16() 161 if (min_val == max_val || min_val < -max_val) { in WebRtcSpl_MaxAbsElementW16() 162 return min_val; in WebRtcSpl_MaxAbsElementW16() 237 int16_t* min_val, int16_t* max_val) { in WebRtcSpl_MinMaxW16() argument 239 return WebRtcSpl_MinMaxW16Neon(vector, length, min_val, max_val); in WebRtcSpl_MinMaxW16() 253 *min_val = minimum; in WebRtcSpl_MinMaxW16()
|
/aosp_15_r20/external/sdv/vsomeip/third_party/boost/integer/include/boost/ |
D | integer_traits.hpp | 51 template<class T, T min_val, T max_val> 56 BOOST_STATIC_CONSTANT(T, const_min = min_val); 62 template<class T, T min_val, T max_val> 63 const bool integer_traits_base<T, min_val, max_val>::is_integral; 65 template<class T, T min_val, T max_val> 66 const T integer_traits_base<T, min_val, max_val>::const_min; 68 template<class T, T min_val, T max_val> 69 const T integer_traits_base<T, min_val, max_val>::const_max;
|
/aosp_15_r20/external/executorch/backends/xnnpack/test/ops/ |
H A D | clamp.py | 15 def __init__(self, min_val=None, max_val=None): argument 17 self.min_val = min_val 21 z = torch.clamp(x, min=self.min_val, max=self.max_val) 49 module = self.Clamp(min_val=-0.5) 60 Tester(self.Clamp(min_val=-1, max_val=1), inputs)
|
/aosp_15_r20/external/pytorch/aten/src/ATen/cpu/vec/vec256/ |
H A D | vec256_qint.h | 68 T min_val, 75 int32_t /*min_val*/, 85 int8_t min_val, 89 _mm256_set1_epi8(min_val), 97 uint8_t min_val, 101 _mm256_set1_epi8(min_val), 130 constexpr auto min_val = std::numeric_limits<T>::min(); in convert_float_to_int8() local 135 xy_packed_v, xy_packed_v, min_val, max_val); in convert_float_to_int8() 149 constexpr auto min_val = std::numeric_limits<T>::min(); in QuantizeAvx2() local 151 const __m256i min_v = _mm256_set1_epi32(min_val); in QuantizeAvx2() [all …]
|
H A D | vec256_int.h | 1057 Vectorized<int64_t> inline clamp(const Vectorized<int64_t>& a, const Vectorized<int64_t>& min_val, … in clamp() argument 1059 …return emulate(a, min_val, max_val, [](int64_t a_point, int64_t min_point, int64_t max_point) {ret… in clamp() 1061 return minimum(maximum(a, min_val), max_val); in clamp() 1066 Vectorized<int32_t> inline clamp(const Vectorized<int32_t>& a, const Vectorized<int32_t>& min_val, … in clamp() argument 1067 return _mm256_min_epi32(max_val, _mm256_max_epi32(a, min_val)); in clamp() 1071 Vectorized<int16_t> inline clamp(const Vectorized<int16_t>& a, const Vectorized<int16_t>& min_val, … in clamp() argument 1072 return _mm256_min_epi16(max_val, _mm256_max_epi16(a, min_val)); in clamp() 1076 Vectorized<int8_t> inline clamp(const Vectorized<int8_t>& a, const Vectorized<int8_t>& min_val, con… in clamp() argument 1077 return _mm256_min_epi8(max_val, _mm256_max_epi8(a, min_val)); in clamp() 1081 Vectorized<uint8_t> inline clamp(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& min_val, … in clamp() argument [all …]
|
/aosp_15_r20/external/perfetto/src/trace_processor/db/column/ |
H A D | numeric_storage_unittest.cc | 100 SqlValue min_val = SqlValue::Long( in TEST() local 102 ASSERT_EQ(chain->ValidateSearchConstraints(FilterOp::kGe, min_val), in TEST() 104 ASSERT_EQ(chain->ValidateSearchConstraints(FilterOp::kGt, min_val), in TEST() 106 ASSERT_EQ(chain->ValidateSearchConstraints(FilterOp::kNe, min_val), in TEST() 109 ASSERT_EQ(chain->ValidateSearchConstraints(FilterOp::kLe, min_val), in TEST() 111 ASSERT_EQ(chain->ValidateSearchConstraints(FilterOp::kLt, min_val), in TEST() 113 ASSERT_EQ(chain->ValidateSearchConstraints(FilterOp::kEq, min_val), in TEST() 139 SqlValue min_val = SqlValue::Long( in TEST() local 141 ASSERT_EQ(chain->ValidateSearchConstraints(FilterOp::kGe, min_val), in TEST() 143 ASSERT_EQ(chain->ValidateSearchConstraints(FilterOp::kGt, min_val), in TEST() [all …]
|
/aosp_15_r20/external/mesa3d/src/util/tests/ |
H A D | rb_tree_test.cpp | 118 int min_val = INT_MAX; in validate_tree_order() local 122 assert(n->key <= min_val); in validate_tree_order() 123 if (n->key < min_val) { in validate_tree_order() 124 min_val = n->key; in validate_tree_order() 139 min_val = INT_MAX; in validate_tree_order() 143 assert(n->key <= min_val); in validate_tree_order() 144 if (n->key < min_val) { in validate_tree_order() 145 min_val = n->key; in validate_tree_order()
|