1 // Auto-generated file. Do not edit! 2 // Template: src/f32-velu/scalar-rr2-p6.c.in 3 // Generator: tools/xngen 4 // 5 // Copyright 2020 Google LLC 6 // 7 // This source code is licensed under the BSD-style license found in the 8 // LICENSE file in the root directory of this source tree. 9 10 #include <assert.h> 11 #include <math.h> 12 13 #include <xnnpack/common.h> 14 #include <xnnpack/math.h> 15 #include <xnnpack/vunary.h> 16 17 xnn_f32_velu_ukernel__scalar_rr2_p6_x4(size_t n,const float * x,float * y,const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS (1)])18void xnn_f32_velu_ukernel__scalar_rr2_p6_x4( 19 size_t n, 20 const float* x, 21 float* y, 22 const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) 23 { 24 assert(n % sizeof(float) == 0); 25 26 const float vprescale = params->scalar_rr2_p6.prescale; 27 const float valpha = params->scalar_rr2_p6.alpha; 28 const float vbeta = params->scalar_rr2_p6.beta; 29 const float vmagic_bias = params->scalar_rr2_p6.magic_bias; 30 const float vlog2e = params->scalar_rr2_p6.log2e; 31 const float vsat_cutoff = params->scalar_rr2_p6.sat_cutoff; 32 const float vminus_ln2_hi = params->scalar_rr2_p6.minus_ln2_hi; 33 const float vminus_ln2_lo = params->scalar_rr2_p6.minus_ln2_lo; 34 const float vc6 = params->scalar_rr2_p6.c6; 35 const float vc5 = params->scalar_rr2_p6.c5; 36 const float vc4 = params->scalar_rr2_p6.c4; 37 const float vc3 = params->scalar_rr2_p6.c3; 38 const float vc2 = params->scalar_rr2_p6.c2; 39 const float vone = params->scalar_rr2_p6.one; 40 41 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) { 42 float vx0 = x[0]; 43 float vx1 = x[1]; 44 float vx2 = x[2]; 45 float vx3 = x[3]; 46 x += 4; 47 48 const float vz0 = vx0 * vprescale; 49 const float vz1 = vx1 * vprescale; 50 const float vz2 = vx2 * vprescale; 51 const float vz3 = vx3 * vprescale; 52 53 float vn0 = vz0 * vlog2e + vmagic_bias; 54 float vn1 = vz1 * vlog2e + vmagic_bias; 55 float vn2 = vz2 * vlog2e + vmagic_bias; 56 float vn3 = vz3 * vlog2e + vmagic_bias; 57 58 float vs0 = uint32_as_float(float_as_uint32(vn0) << 23); 59 vn0 -= vmagic_bias; 60 float vs1 = uint32_as_float(float_as_uint32(vn1) << 23); 61 vn1 -= vmagic_bias; 62 float vs2 = uint32_as_float(float_as_uint32(vn2) << 23); 63 vn2 -= vmagic_bias; 64 float vs3 = uint32_as_float(float_as_uint32(vn3) << 23); 65 vn3 -= vmagic_bias; 66 67 float vt0 = vn0 * vminus_ln2_hi + vz0; 68 float vt1 = vn1 * vminus_ln2_hi + vz1; 69 float vt2 = vn2 * vminus_ln2_hi + vz2; 70 float vt3 = vn3 * vminus_ln2_hi + vz3; 71 72 vt0 = vn0 * vminus_ln2_lo + vt0; 73 vt1 = vn1 * vminus_ln2_lo + vt1; 74 vt2 = vn2 * vminus_ln2_lo + vt2; 75 vt3 = vn3 * vminus_ln2_lo + vt3; 76 77 if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) { 78 vs0 = 0.0f; 79 vt0 = 0.0f; 80 } 81 if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) { 82 vs1 = 0.0f; 83 vt1 = 0.0f; 84 } 85 if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) { 86 vs2 = 0.0f; 87 vt2 = 0.0f; 88 } 89 if XNN_UNPREDICTABLE(vz3 <= vsat_cutoff) { 90 vs3 = 0.0f; 91 vt3 = 0.0f; 92 } 93 94 float vp0 = vc6 * vt0 + vc5; 95 float vp1 = vc6 * vt1 + vc5; 96 float vp2 = vc6 * vt2 + vc5; 97 float vp3 = vc6 * vt3 + vc5; 98 99 vp0 = vp0 * vt0 + vc4; 100 vp1 = vp1 * vt1 + vc4; 101 vp2 = vp2 * vt2 + vc4; 102 vp3 = vp3 * vt3 + vc4; 103 104 vp0 = vp0 * vt0 + vc3; 105 vp1 = vp1 * vt1 + vc3; 106 vp2 = vp2 * vt2 + vc3; 107 vp3 = vp3 * vt3 + vc3; 108 109 vp0 = vp0 * vt0 + vc2; 110 vp1 = vp1 * vt1 + vc2; 111 vp2 = vp2 * vt2 + vc2; 112 vp3 = vp3 * vt3 + vc2; 113 114 vp0 *= vt0; 115 vp1 *= vt1; 116 vp2 *= vt2; 117 vp3 *= vt3; 118 119 vt0 *= vs0; 120 vs0 -= vone; 121 vt1 *= vs1; 122 vs1 -= vone; 123 vt2 *= vs2; 124 vs2 -= vone; 125 vt3 *= vs3; 126 vs3 -= vone; 127 128 vp0 = vp0 * vt0 + vt0; 129 vp1 = vp1 * vt1 + vt1; 130 vp2 = vp2 * vt2 + vt2; 131 vp3 = vp3 * vt3 + vt3; 132 133 const float ve0 = (vp0 + vs0) * valpha; 134 float vy0 = vx0 * vbeta; 135 const float ve1 = (vp1 + vs1) * valpha; 136 float vy1 = vx1 * vbeta; 137 const float ve2 = (vp2 + vs2) * valpha; 138 float vy2 = vx2 * vbeta; 139 const float ve3 = (vp3 + vs3) * valpha; 140 float vy3 = vx3 * vbeta; 141 142 if XNN_UNPREDICTABLE(vx0 < 0.0f) { 143 vy0 = ve0; 144 } 145 if XNN_UNPREDICTABLE(vx1 < 0.0f) { 146 vy1 = ve1; 147 } 148 if XNN_UNPREDICTABLE(vx2 < 0.0f) { 149 vy2 = ve2; 150 } 151 if XNN_UNPREDICTABLE(vx3 < 0.0f) { 152 vy3 = ve3; 153 } 154 155 y[0] = vy0; 156 y[1] = vy1; 157 y[2] = vy2; 158 y[3] = vy3; 159 y += 4; 160 } 161 if XNN_UNLIKELY(n != 0) { 162 do { 163 float vx = *x++; 164 165 const float vz = vx * vprescale; 166 167 float vn = vz * vlog2e + vmagic_bias; 168 float vs = uint32_as_float(float_as_uint32(vn) << 23); 169 vn -= vmagic_bias; 170 171 float vt = vn * vminus_ln2_hi + vz; 172 vt = vn * vminus_ln2_lo + vt; 173 174 if XNN_UNPREDICTABLE(vz <= vsat_cutoff) { 175 vs = 0.0f; 176 vt = 0.0f; 177 } 178 179 float vp = vc6 * vt + vc5; 180 vp = vp * vt + vc4; 181 vp = vp * vt + vc3; 182 vp = vp * vt + vc2; 183 vp *= vt; 184 185 vt *= vs; 186 vs -= vone; 187 vp = vp * vt + vt; 188 const float ve = (vp + vs) * valpha; 189 190 float vy = vx * vbeta; 191 if XNN_UNPREDICTABLE(vx < 0.0f) { 192 vy = ve; 193 } 194 195 *y++ = vy; 196 197 n -= sizeof(float); 198 } while (n != 0); 199 } 200 } 201