Home
last modified time | relevance | path

Searched full:bias (Results 1 – 25 of 6228) sorted by relevance

12345678910>>...250

/aosp_15_r20/external/pytorch/torch/testing/_internal/
H A Dcommon_pruning.py57 nn.Linear(7, 5, bias=False),
58 nn.Linear(5, 6, bias=False),
59 nn.Linear(6, 4, bias=False),
61 self.linear1 = nn.Linear(4, 4, bias=False)
62 self.linear2 = nn.Linear(4, 10, bias=False)
73 wrapped in a Sequential. Used to test pruned Linear-Bias-Linear fusion."""
78 nn.Linear(7, 5, bias=True),
79 nn.Linear(5, 6, bias=False),
80 nn.Linear(6, 3, bias=True),
81 nn.Linear(3, 3, bias=True),
[all …]
/aosp_15_r20/external/pytorch/test/inductor/
H A Dtest_cpu_select_algorithm.py152 @parametrize("bias", (True, False))
156 self, batch_size, in_features, out_features, bias, input_3d, dtype argument
159 def __init__(self, bias): argument
161 self.linear = torch.nn.Linear(in_features, out_features, bias)
167 mod = M(bias=bias).to(dtype=dtype).eval()
187 @parametrize("bias", (True,))
191 def test_linear_wgt_multi_users(self, in_features, out_features, bias, dtype): argument
193 def __init__(self, bias): argument
196 self.linear = torch.nn.Linear(in_features, out_features, bias)
204 mod = M(bias=bias).to(dtype=dtype).eval()
[all …]
/aosp_15_r20/external/pytorch/benchmarks/static_runtime/
H A Dtest_generated_ops.cc18 %bias: None = prim::Constant() in TEST()
20 %cloned = aten::clone(%ret, %bias) in TEST()
48 %bias: None = prim::Constant() in TEST()
50 %cloned = aten::clone(%ret, %bias) in TEST()
78 %bias: None = prim::Constant() in TEST()
80 %cloned = aten::clone(%ret, %bias) in TEST()
108 %bias: None = prim::Constant() in TEST()
110 %cloned = aten::clone(%ret, %bias) in TEST()
138 %bias: None = prim::Constant() in TEST()
140 %cloned = aten::clone(%ret, %bias) in TEST()
[all …]
/aosp_15_r20/external/pytorch/test/
H A Dtest_stateless.py32 self.tied_bias = self.l1.bias
45 bias = torch.tensor([0.0], device=device)
49 f'{prefix}.l1.bias': bias,
53 'l1.bias': bias,
157 bias = torch.tensor([0.0], requires_grad=True)
160 'l1.bias': bias,
166 self.assertIsNotNone(bias.grad)
170 self.assertIsNone(module.l1.bias.grad)
204 bias = torch.tensor([0.0])
207 'l1.bias': bias,
[all …]
H A Dtest_mkldnn_fusion.py65 def __init__(self, in_channels, out_channels, bias, **kwargs): argument
67 self.conv = torch.nn.Conv2d(in_channels, out_channels, bias=bias, **kwargs)
82 for bias, dilation, groups in options:
87 bias,
104 def __init__(self, unary_fn, in_channels, out_channels, bias, **kwargs): argument
106 self.conv = torch.nn.Conv2d(in_channels, out_channels, bias=bias, **kwargs)
119 for bias in [True, False]:
121 … m = M(unary_fn, 3, oC, bias, kernel_size=(3, 3)).to(memory_format=memory_format)
133 def __init__(self, m, in_channels, out_channels, bias, **kwargs): argument
135 self.conv = m(in_channels, out_channels, bias=bias, **kwargs)
[all …]
/aosp_15_r20/external/ComputeLibrary/tests/validation/fixtures/
H A DGEMMFixture.h269 TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1); in compute_target() local
281 …gemm.configure(gpu_arch, lhs.info(), rhs.info(), bias.info(), dst.info(), alpha, beta, false, resh… in compute_target()
285 ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); in compute_target()
287 add_padding_x({ &lhs, &rhs, &bias, &dst }); in compute_target()
292 bias.allocator()->allocate(); in compute_target()
297 ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); in compute_target()
303 fill(AccessorType(bias), 2); in compute_target()
308 { ACL_SRC_2, &bias }, in compute_target()
326 SimpleTensor<T> bias{ dst_shape, data_type, 1 }; in compute_reference()
335 fill(bias, 2); in compute_reference()
[all …]
/aosp_15_r20/external/pytorch/torch/ao/nn/intrinsic/qat/modules/
H A Dconv_fused.py55 bias, argument
87 if bias:
88 self.bias = Parameter(torch.empty(out_channels))
90 self.register_parameter("bias", None)
111 init.zeros_(self.bn.bias)
113 if self.bias is not None:
116 init.uniform_(self.bias, -bound, bound)
150 # using zero bias here since the bias for original conv
152 if self.bias is not None:
153 zero_bias = torch.zeros_like(self.bias, dtype=input.dtype)
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/
H A Dfully_connected.cc135 const TfLiteTensor* bias, TfLiteTensor* output, in CheckTypes() argument
144 // optional bias tensor. in CheckTypes()
145 const bool is_optional_bias_float = !bias || (bias->type == kTfLiteFloat32); in CheckTypes()
147 !bias || (bias->type == kTfLiteInt32) || (bias->type == kTfLiteInt64); in CheckTypes()
210 const TfLiteTensor* bias = in PrepareImpl() local
220 CheckTypes(context, input, filter, bias, output, params)); in PrepareImpl()
257 if (bias) { in PrepareImpl()
258 TF_LITE_ENSURE_EQ(context, NumElements(bias), SizeOfDimension(filter, 0)); in PrepareImpl()
268 context, input, filter, bias, output, &real_multiplier)); in PrepareImpl()
479 const TfLiteTensor* bias, TfLiteTensor* output) { in EvalPie() argument
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DConvolution.cpp370 …d(const at::Tensor& input, const at::Tensor& weight, const std::optional<at::Tensor>& bias) const { in use_cpu_depthwise3x3_winograd()
386 (!bias.has_value() || bias->is_contiguous()) && in use_cpu_depthwise3x3_winograd()
657 const c10::ArrayRef<T>& weight_sizes, const at::Tensor& bias, in check_shape_forward() argument
694 …TORCH_CHECK(!bias.defined() || (bias.ndimension() == 1 && at::symint::size<T>(bias, 0) == weight_s… in check_shape_forward()
696 ", expected bias to be 1-dimensional with ", weight_sizes[0], " elements", in check_shape_forward()
697 ", but got bias of size ", at::symint::sizes<T>(bias), " instead"); in check_shape_forward()
730 …TORCH_CHECK(!bias.defined() || (bias.ndimension() == 1 && at::symint::size<T>(bias, 0) == weight_s… in check_shape_forward()
732 ", expected bias to be 1-dimensional with ", weight_sizes[1] * groups, " elements", in check_shape_forward()
733 ", but got bias of size ", at::symint::sizes<T>(bias), " instead"); in check_shape_forward()
742 check_shape_forward<T>(input, weight_sizes, /*bias=*/ Tensor(), params); in check_shape_backward()
[all …]
/aosp_15_r20/external/pytorch/torch/nn/modules/
H A Dlinear.py60 bias: If set to ``False``, the layer will not learn an additive bias.
74 bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
75 If :attr:`bias` is ``True``, the values are initialized from
97 bias: bool = True,
108 if bias:
109 self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
111 self.register_parameter("bias", None)
119 if self.bias is not None:
122 init.uniform_(self.bias, -bound, bound)
125 return F.linear(input, self.weight, self.bias)
[all …]
H A Dconv.py49 __annotations__ = {'bias': Optional[torch.Tensor]}
51 …def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor: # type:…
66 bias: Optional[Tensor]
78 bias: bool,
134 if bias:
135 self.bias = Parameter(torch.empty(out_channels, **factory_kwargs))
137 self.register_parameter('bias', None)
146 if self.bias is not None:
150 init.uniform_(self.bias, -bound, bound)
163 if self.bias is None:
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/service/gpu/
H A Dgemm_rewriter.cc64 // If the bias is a sequence of ops that depend only on broadcasts of
65 // constants, materialize the bias if it's small.
67 // Normally the constant-folding pass would materialize the bias if it is
68 // calculated entirely from constants. But if the bias is a broadcast of a
80 // broadcasted bias, if it supports that fusion efficiently.
81 HloInstruction *MaybeConstantFoldBias(HloInstruction *bias) { in MaybeConstantFoldBias() argument
97 if (ShapeUtil::ByteSizeOf(bias->shape()) <= kMaxMaterializeBiasBytes && in MaybeConstantFoldBias()
98 (Match(bias, broadcast_of_nonscalar) || in MaybeConstantFoldBias()
99 Match(bias, m::Reshape(broadcast_of_nonscalar)) || in MaybeConstantFoldBias()
100 Match(bias, m::Transpose(broadcast_of_nonscalar)) || in MaybeConstantFoldBias()
[all …]
/aosp_15_r20/external/trusty/arm-trusted-firmware/plat/imx/imx8ulp/upower/
Dupower_soc_defs.h126 #define UPWR_FILL_DOMBIAS_ARGS(dom, bias, args) \ argument
129 switch ((bias)->apply) { \
150 (args).B.dommode = (uint32_t)((bias)->dommode); \
151 (args).B.avdmode = (uint32_t)((bias)->avdmode); \
153 (args).B.domrbbn = ((bias)->dombias.rbbn > sat) ? sat : \
154 UPWR_BIAS_MILIV((bias)->dombias.rbbn); \
155 (args).B.domrbbp = ((bias)->dombias.rbbp > sat) ? sat : \
156 UPWR_BIAS_MILIV((bias)->dombias.rbbp); \
157 (args).B.avdrbbn = ((bias)->avdbias.rbbn > sat) ? sat : \
158 UPWR_BIAS_MILIV((bias)->avdbias.rbbn); \
[all …]
/aosp_15_r20/external/arm-trusted-firmware/fdts/
H A Dstm32mp15-pinctrl.dtsi24 bias-disable;
30 bias-pull-up;
38 bias-disable;
47 bias-disable;
59 bias-disable;
65 bias-pull-up;
77 bias-disable;
83 bias-pull-up;
98 bias-disable;
104 bias-disable;
[all …]
/aosp_15_r20/external/trusty/arm-trusted-firmware/fdts/
Dstm32mp15-pinctrl.dtsi25 bias-disable;
31 bias-pull-up;
40 bias-disable;
50 bias-disable;
63 bias-disable;
76 bias-disable;
86 bias-pull-up;
96 bias-pull-up;
112 bias-disable;
118 bias-disable;
[all …]
/aosp_15_r20/external/pytorch/torch/ao/pruning/_experimental/pruner/
H A Dprune_functions.py4 Also contains utilities for bias propagation
16 # BIAS PROPAGATION
31 r"""Returns new adjusted bias for the second supported module"""
44 # Propagating first layer pruned biases and calculating the new second layer bias
46 # so adding bias involves broadcasting, logically:
67 ): # next_layer is parametrized & has original bias ._bias
70 not parametrize.is_parametrized(next_layer) and next_layer.bias is not None
71 ): # next_layer not parametrized & has .bias
72 adjusted_bias = nn.Parameter(scaled_biases + next_layer.bias)
73 else: # next_layer has no bias
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/vulkan/ops/
H A DMm.cpp155 Tensor bias = *bias_arg; in pack_biases() local
156 if (bias.is_cpu()) { in pack_biases()
157 bias = bias.vulkan(); in pack_biases()
159 return convert(bias); in pack_biases()
182 const Tensor bias = bias_arg->contiguous(); in pack_biases_quantized_weights() local
183 const IntArrayRef b_sizes = bias.sizes(); in pack_biases_quantized_weights()
184 const float* const src_bias_ptr = bias.const_data_ptr<float>(); in pack_biases_quantized_weights()
191 if (bias.sizes().size() == 3) { in pack_biases_quantized_weights()
195 } else if (bias.sizes().size() == 2) { in pack_biases_quantized_weights()
208 if (bias.sizes().size() == 2) { in pack_biases_quantized_weights()
[all …]
/aosp_15_r20/hardware/invensense/65xx/libsensors_iio/software/core/mllite/
H A Ddata_builder.c56 void inv_apply_calibration(struct inv_single_sensor_t *sensor, const long *bias);
480 /** Takes raw data stored in the sensor, removes bias, and converts it to
483 * @param[in] bias bias in the mounting frame, in hardware units scaled by
486 void inv_apply_calibration(struct inv_single_sensor_t *sensor, const long *bias) in inv_apply_calibration() argument
497 raw32[0] -= bias[0] >> 1; in inv_apply_calibration()
498 raw32[1] -= bias[1] >> 1; in inv_apply_calibration()
499 raw32[2] -= bias[2] >> 1; in inv_apply_calibration()
506 /** Returns the current bias for the compass
507 * @param[out] bias Compass bias in hardware units scaled by 2^16. In mounting frame.
510 void inv_get_compass_bias(long *bias) in inv_get_compass_bias() argument
[all …]
/aosp_15_r20/external/pytorch/torch/ao/nn/quantizable/modules/
H A Dactivation.py38 bias: add bias as module parameter. Default: True.
39 add_bias_kv: add bias to the key and value sequences at dim=0.
67 bias: bool = True,
81 bias,
90 self.embed_dim, self.embed_dim, bias=bias, **factory_kwargs
93 self.kdim, self.embed_dim, bias=bias, **factory_kwargs
96 self.vdim, self.embed_dim, bias=bias, **factory_kwargs
99 …self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=bias, **factory_kwargs) # type: ig…
138 observed.out_proj.bias = other.out_proj.bias # type: ignore[has-type]
141 bias = other.in_proj_bias
[all …]
/aosp_15_r20/system/chre/chre_api/legacy/v1_3/chre/
H A Dsensor.h239 * field within 'readings', or by the 3D array 'bias' (bias[0] == x_bias;
240 * bias[1] == y_bias; bias[2] == z_bias). Bias is subtracted from uncalibrated
246 * If bias delivery is supported, this event is generated by default when
248 * CHRE_SENSOR_TYPE_GYROSCOPE, or if bias delivery is explicitly enabled
258 * field within 'readings', or by the 3D array 'bias' (bias[0] == x_bias;
259 * bias[1] == y_bias; bias[2] == z_bias). Bias is subtracted from uncalibrated
265 * If bias delivery is supported, this event is generated by default when
267 * CHRE_SENSOR_TYPE_GEOMAGNETIC_FIELD, or if bias delivery is explicitly enabled
277 * field within 'readings', or by the 3D array 'bias' (bias[0] == x_bias;
278 * bias[1] == y_bias; bias[2] == z_bias). Bias is subtracted from uncalibrated
[all …]
/aosp_15_r20/system/chre/chre_api/legacy/v1_4/chre/
H A Dsensor.h240 * field within 'readings', or by the 3D array 'bias' (bias[0] == x_bias;
241 * bias[1] == y_bias; bias[2] == z_bias). Bias is subtracted from uncalibrated
247 * If bias delivery is supported, this event is generated by default when
249 * CHRE_SENSOR_TYPE_GYROSCOPE, or if bias delivery is explicitly enabled
259 * field within 'readings', or by the 3D array 'bias' (bias[0] == x_bias;
260 * bias[1] == y_bias; bias[2] == z_bias). Bias is subtracted from uncalibrated
266 * If bias delivery is supported, this event is generated by default when
268 * CHRE_SENSOR_TYPE_GEOMAGNETIC_FIELD, or if bias delivery is explicitly enabled
278 * field within 'readings', or by the 3D array 'bias' (bias[0] == x_bias;
279 * bias[1] == y_bias; bias[2] == z_bias). Bias is subtracted from uncalibrated
[all …]
/aosp_15_r20/hardware/invensense/6515/libsensors_iio/software/core/mllite/
H A Ddata_builder.c57 void inv_apply_calibration(struct inv_single_sensor_t *sensor, const long *bias);
685 /** Takes raw data stored in the sensor, removes bias, and converts it to
688 * @param[in] bias bias in the mounting frame, in hardware units scaled by
691 void inv_apply_calibration(struct inv_single_sensor_t *sensor, const long *bias) in inv_apply_calibration() argument
702 raw32[0] -= bias[0] >> 1; in inv_apply_calibration()
703 raw32[1] -= bias[1] >> 1; in inv_apply_calibration()
704 raw32[2] -= bias[2] >> 1; in inv_apply_calibration()
711 /** Returns the current bias for the compass
712 * @param[out] bias Compass bias in hardware units scaled by 2^16. In mounting frame.
715 void inv_get_compass_bias(long *bias) in inv_get_compass_bias() argument
[all …]
/aosp_15_r20/external/pytorch/torch/_inductor/
H A Dmkldnn_lowerings.py73 bias: TensorBox,
86 bias,
102 bias: TensorBox,
118 bias,
136 bias: TensorBox,
152 bias,
297 bias: TensorBox,
311 bias,
372 bias: TensorBox,
392 bias,
[all …]
/aosp_15_r20/system/chre/chre_api/legacy/v1_6/chre/
H A Dsensor.h274 * field within 'readings', or by the 3D array 'bias' (bias[0] == x_bias;
275 * bias[1] == y_bias; bias[2] == z_bias). Bias is subtracted from uncalibrated
281 * If bias delivery is supported, this event is generated by default when
283 * CHRE_SENSOR_TYPE_GYROSCOPE, or if bias delivery is explicitly enabled
293 * field within 'readings', or by the 3D array 'bias' (bias[0] == x_bias;
294 * bias[1] == y_bias; bias[2] == z_bias). Bias is subtracted from uncalibrated
300 * If bias delivery is supported, this event is generated by default when
302 * CHRE_SENSOR_TYPE_GEOMAGNETIC_FIELD, or if bias delivery is explicitly enabled
312 * field within 'readings', or by the 3D array 'bias' (bias[0] == x_bias;
313 * bias[1] == y_bias; bias[2] == z_bias). Bias is subtracted from uncalibrated
[all …]
/aosp_15_r20/system/chre/chre_api/legacy/v1_5/chre/
H A Dsensor.h274 * field within 'readings', or by the 3D array 'bias' (bias[0] == x_bias;
275 * bias[1] == y_bias; bias[2] == z_bias). Bias is subtracted from uncalibrated
281 * If bias delivery is supported, this event is generated by default when
283 * CHRE_SENSOR_TYPE_GYROSCOPE, or if bias delivery is explicitly enabled
293 * field within 'readings', or by the 3D array 'bias' (bias[0] == x_bias;
294 * bias[1] == y_bias; bias[2] == z_bias). Bias is subtracted from uncalibrated
300 * If bias delivery is supported, this event is generated by default when
302 * CHRE_SENSOR_TYPE_GEOMAGNETIC_FIELD, or if bias delivery is explicitly enabled
312 * field within 'readings', or by the 3D array 'bias' (bias[0] == x_bias;
313 * bias[1] == y_bias; bias[2] == z_bias). Bias is subtracted from uncalibrated
[all …]

12345678910>>...250