/aosp_15_r20/external/XNNPACK/test/ |
H A D | leaky-relu.cc | 28 const float negative_slope = std::uniform_real_distribution<float>(0.5f, 1.0f)(rng); in TEST_F() local 50 …ASSERT_EQ(xnn_status_success, xnn_define_leaky_relu(subgraph, negative_slope, input_id, output_id,… in TEST_F() 56 ASSERT_EQ(node->params.leaky_relu.negative_slope, negative_slope); in TEST_F() 66 const float negative_slope = std::uniform_real_distribution<float>(0.5f, 1.0f)(rng); in TEST_F() local 93 …ASSERT_EQ(xnn_status_success, xnn_define_leaky_relu(subgraph, negative_slope, input_id, output_id,… in TEST_F() 99 ASSERT_EQ(node->params.leaky_relu.negative_slope, negative_slope); in TEST_F() 113 const float negative_slope = std::uniform_real_distribution<float>(0.1f, 10.0f)(rng); in TEST_F() local 135 …ASSERT_EQ(xnn_status_success, xnn_define_leaky_relu(subgraph, negative_slope, input_id, output_id,… in TEST_F() 141 ASSERT_EQ(node->params.leaky_relu.negative_slope, negative_slope); in TEST_F() 151 const float negative_slope = std::uniform_real_distribution<float>(0.1f, 10.0f)(rng); in TEST_F() local [all …]
|
H A D | leaky-relu-operator-tester.h | 83 inline LeakyReLUOperatorTester& negative_slope(float negative_slope) { in negative_slope() function 84 assert(std::isnormal(negative_slope)); in negative_slope() 85 this->negative_slope_ = negative_slope; in negative_slope() 89 inline float negative_slope() const { in negative_slope() function 155 const uint16_t negative_slope_as_half = fp16_ieee_from_fp32_value(negative_slope()); in TestF16() 173 negative_slope(), in TestF16() 223 const float y = std::signbit(x) ? x * negative_slope() : x; in TestF32() 235 negative_slope(), in TestF32() 257 … << ", input " << input[i * input_stride() + c] << ", negative slope " << negative_slope(); in TestF32() 285 … float y = (x < 0.0f ? x * negative_slope() : x) / output_scale() + float(output_zero_point()); in TestQS8() [all …]
|
H A D | leaky-relu-nc.cc | 71 for (float negative_slope : std::vector<float>({-10.0f, -1.0f, -0.1f, 0.1f, 10.0f})) { in TEST() local 75 .negative_slope(negative_slope) in TEST() 141 for (float negative_slope : std::vector<float>({-10.0f, -1.0f, -0.1f, 0.1f, 10.0f})) { in TEST() local 145 .negative_slope(negative_slope) in TEST() 166 for (float negative_slope : std::vector<float>({-10.0f, -1.0f, -0.1f, 0.1f, 10.0f})) { in TEST() local 170 .negative_slope(negative_slope) in TEST() 286 for (float negative_slope : std::vector<float>({-10.0f, -1.0f, -0.1f, 0.1f, 10.0f})) { in TEST() local 290 .negative_slope(negative_slope) in TEST()
|
/aosp_15_r20/external/tensorflow/tensorflow/python/keras/layers/ |
H A D | advanced_activations.py | 373 f(x) = negative_slope * (x - threshold) otherwise 386 >>> layer = tf.keras.layers.ReLU(negative_slope=1.0) 406 negative_slope: Float >= 0. Negative slope coefficient. Default to 0. 411 def __init__(self, max_value=None, negative_slope=0, threshold=0, **kwargs): argument 416 if negative_slope is None or negative_slope < 0.: 417 raise ValueError('negative_slope of a ReLU layer cannot be a negative ' 418 'value. Got: %s' % negative_slope) 427 self.negative_slope = backend.cast_to_floatx(negative_slope) 432 # negative_slope. 434 alpha=self.negative_slope, [all …]
|
/aosp_15_r20/external/XNNPACK/src/subgraph/ |
H A D | leaky-relu.c | 45 node->params.leaky_relu.negative_slope, in create_leaky_relu_operator() 53 node->params.leaky_relu.negative_slope, in create_leaky_relu_operator() 61 node->params.leaky_relu.negative_slope, in create_leaky_relu_operator() 72 node->params.leaky_relu.negative_slope, in create_leaky_relu_operator() 154 float negative_slope, in xnn_define_leaky_relu() argument 164 if (!isfinite(negative_slope)) { in xnn_define_leaky_relu() 168 negative_slope); in xnn_define_leaky_relu() 250 const float negative_input_output_scale = positive_input_output_scale * negative_slope; in xnn_define_leaky_relu() 274 node->params.leaky_relu.negative_slope = negative_slope; in xnn_define_leaky_relu()
|
/aosp_15_r20/external/executorch/backends/xnnpack/operators/ |
H A D | op_leaky_relu.py | 26 # LeakyReLU nodes which use the default value for negative_slope don't have the 27 # negative_slope value included in their args, so we need to hardcode it. 49 # No negative_slope in args, meaning the default negative_slope is used 50 negative_slope = ( 58 negative_slope=negative_slope,
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/ |
H A D | leaky-relu.c | 21 float negative_slope, in pytorch_qnnp_create_leaky_relu_nc_q8() argument 48 if (negative_slope <= 0.0f || !isnormal(negative_slope)) { in pytorch_qnnp_create_leaky_relu_nc_q8() 51 negative_slope); in pytorch_qnnp_create_leaky_relu_nc_q8() 55 if (negative_slope > 1.0f) { in pytorch_qnnp_create_leaky_relu_nc_q8() 58 negative_slope); in pytorch_qnnp_create_leaky_relu_nc_q8() 121 float y = x < 0.0f ? x * negative_slope : x; in pytorch_qnnp_create_leaky_relu_nc_q8()
|
/aosp_15_r20/external/pytorch/torch/ao/nn/intrinsic/quantized/modules/ |
H A D | linear_relu.py | 64 + negative_slope 76 self, in_features, out_features, negative_slope, bias=True, dtype=torch.qint8 argument 79 self.negative_slope = negative_slope 87 self.negative_slope, 109 mod.in_features, mod.out_features, leaky_relu.negative_slope, dtype=dtype 121 linear.in_features, linear.out_features, leaky_relu.negative_slope
|
/aosp_15_r20/external/XNNPACK/src/operators/ |
H A D | unary-elementwise-nc.c | 877 float negative_slope, in xnn_create_leaky_relu_nc_f16() argument 881 const uint16_t negative_slope_as_half = fp16_ieee_from_fp32_value(negative_slope); in xnn_create_leaky_relu_nc_f16() 882 negative_slope = fp16_ieee_to_fp32_value(negative_slope_as_half); in xnn_create_leaky_relu_nc_f16() 883 if (!isfinite(negative_slope)) { in xnn_create_leaky_relu_nc_f16() 887 negative_slope); in xnn_create_leaky_relu_nc_f16() 907 float negative_slope, in xnn_create_leaky_relu_nc_f32() argument 911 if (!isfinite(negative_slope)) { in xnn_create_leaky_relu_nc_f32() 915 negative_slope); in xnn_create_leaky_relu_nc_f32() 921 xnn_params.f32.lrelu.init.f32_lrelu(¶ms, negative_slope); in xnn_create_leaky_relu_nc_f32() 935 float negative_slope, in xnn_create_leaky_relu_nc_qs8() argument [all …]
|
H A D | prelu-nc.c | 26 const void* negative_slope, in create_prelu_nc() argument 95 pack_prelu_w(channels, negative_slope, weights_ptr); in create_prelu_nc() 124 const void* negative_slope, in xnn_create_prelu_nc_f16() argument 136 negative_slope, flags, in xnn_create_prelu_nc_f16() 148 const float* negative_slope, in xnn_create_prelu_nc_f32() argument 155 negative_slope, flags, in xnn_create_prelu_nc_f32()
|
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/modules/ |
H A D | activation.py | 127 negative_slope: Controls the angle of the negative slope. Default: 1e-2 134 negative_slope: float = 1e-2, 140 super().__init__(negative_slope, inplace) 146 input, self.negative_slope, self.inplace, self.scale, self.zero_point 155 return cls(float(scale), int(zero_point), mod.negative_slope, mod.inplace) 159 return cls(float(scale), int(zero_point), mod.negative_slope, mod.inplace)
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/metal/ops/ |
H A D | MetalLeakyReLU.mm | 21 float negative_slope = negative_slope_val.toFloat(); 32 @(negative_slope) 56 float negative_slope = negative_slope_val.toFloat(); 67 @(negative_slope)
|
/aosp_15_r20/external/executorch/kernels/portable/cpu/ |
H A D | op_leaky_relu.cpp | 26 const Scalar& negative_slope, in leaky_relu_out() argument 42 ScalarType sc_type = utils::get_scalar_dtype(negative_slope); in leaky_relu_out() 52 utils::extract_scalar(negative_slope, &negative_slope_val); in leaky_relu_out()
|
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/ |
H A D | functional.py | 558 negative_slope: float = 0.01, 565 leaky_relu(input, negative_slope=0.01, inplace=False, scale, zero_point) -> Tensor 572 negative_slope: The slope of the negative input 583 torch._C._nn.leaky_relu(input, negative_slope, out=output) 586 result = torch._C._nn.leaky_relu_(input, negative_slope) 588 result = torch._C._nn.leaky_relu(input, negative_slope)
|
/aosp_15_r20/external/pytorch/torch/nn/modules/ |
H A D | activation.py | 800 negative_slope: Controls the angle of the negative slope (which is used for 818 __constants__ = ["inplace", "negative_slope"] 820 negative_slope: float 822 def __init__(self, negative_slope: float = 1e-2, inplace: bool = False) -> None: 824 self.negative_slope = negative_slope 828 return F.leaky_relu(input, self.negative_slope, self.inplace) 832 return f"negative_slope={self.negative_slope}{inplace_str}"
|
/aosp_15_r20/external/pytorch/torch/_inductor/fx_passes/ |
H A D | mkldnn_fusion.py | 179 CallFunction(aten.mul, computation_call, KeywordArg("negative_slope")), 261 negative_slope = kwargs.get("negative_slope") 262 if isinstance(negative_slope, ir.TensorBox): 278 [negative_slope], 290 L[aten.mul](out, negative_slope),
|
/aosp_15_r20/external/pytorch/torch/nn/ |
H A D | init.py | 102 >>> gain = nn.init.calculate_gain('leaky_relu', 0.2) # leaky_relu with negative_slope=0.2 123 negative_slope = 0.01 130 negative_slope = param 132 raise ValueError(f"negative_slope {param} not a valid number") 133 return math.sqrt(2.0 / (1 + negative_slope**2))
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/vulkan/ops/ |
H A D | Clamp.cpp | 558 Tensor leaky_relu(const Tensor& self_arg, const Scalar& negative_slope) { in leaky_relu() argument 560 scalar.push_back(negative_slope); in leaky_relu() 564 Tensor& leaky_relu_(Tensor& self, const Scalar& negative_slope) { in leaky_relu_() argument 566 scalar.push_back(negative_slope); in leaky_relu_()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/vulkan/glsl/ |
H A D | leaky_relu_.glsl | 12 float negative_slope; 24 const vec4 mask = negative_values * vec4(uBlock.negative_slope) + positive_values;
|
H A D | leaky_relu.glsl | 13 float negative_slope; 25 const vec4 mask = negative_values * vec4(uBlock.negative_slope) + positive_values;
|
/aosp_15_r20/external/executorch/kernels/test/ |
H A D | op_leaky_relu_test.cpp | 27 const Scalar& negative_slope, in op_leaky_relu_out() argument 30 context_, in, negative_slope, out); in op_leaky_relu_out()
|
/aosp_15_r20/external/tensorflow/tensorflow/lite/delegates/xnnpack/ |
H A D | leaky_relu_tester.h | 47 inline LeakyReluTester& NegativeSlope(float negative_slope) { in NegativeSlope() argument 48 negative_slope_ = negative_slope; in NegativeSlope()
|
H A D | quantized_leaky_relu_tester.h | 77 inline QuantizedLeakyReluTester& NegativeSlope(float negative_slope) { in NegativeSlope() argument 78 negative_slope_ = negative_slope; in NegativeSlope()
|
/aosp_15_r20/external/pytorch/tools/autograd/ |
H A D | derivatives.yaml | 2099 - name: leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor 2100 self: leaky_relu_backward(grad, self, negative_slope, false) 2103 - name: leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!) 2104 self: leaky_relu_backward(grad, result, negative_slope, true) 2105 …result: self_t.copy_(leaky_relu_backward(original_self_t.conj(), result, negative_slope, true).con… 2445 - name: leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_re… 2447 grad_output: leaky_relu_backward(grad, self, negative_slope, false) 2449 # leaky_relu_backward(grad_output, self, negative_slope, false) 2450 # computes grad_output * at::where(self_p > 0, 1, negative_slope) 2452 # grad_output_t * at::where(self_p > 0, self_p.new_ones([]), negative_slope); [all …]
|
/aosp_15_r20/external/executorch/backends/xnnpack/test/ops/ |
H A D | leaky_relu.py | 52 module = self.LeakyReLU(negative_slope=0.2) 77 Tester(self.LeakyReLU(negative_slope=0.2), inputs)
|