1 // Auto-generated file. Do not edit! 2 // Template: src/f32-velu/scalar-rr2-lut16-p3.c.in 3 // Generator: tools/xngen 4 // 5 // Copyright 2020 Google LLC 6 // 7 // This source code is licensed under the BSD-style license found in the 8 // LICENSE file in the root directory of this source tree. 9 10 #include <assert.h> 11 #include <math.h> 12 13 #include <xnnpack/common.h> 14 #include <xnnpack/math.h> 15 #include <xnnpack/vunary.h> 16 17 18 extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16]; 19 xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6(size_t n,const float * x,float * y,const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS (1)])20void xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6( 21 size_t n, 22 const float* x, 23 float* y, 24 const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) 25 { 26 assert(n % sizeof(float) == 0); 27 28 const float vprescale = params->scalar_rr2_lut16_p3.prescale; 29 const float valpha = params->scalar_rr2_lut16_p3.alpha; 30 const float vbeta = params->scalar_rr2_lut16_p3.beta; 31 const float vmagic_bias = params->scalar_rr2_lut16_p3.magic_bias; 32 const float vlog2e = params->scalar_rr2_lut16_p3.log2e; 33 const uint32_t vindex_mask = UINT32_C(0xF); 34 const float vsat_cutoff = params->scalar_rr2_lut16_p3.sat_cutoff; 35 const float vminus_ln2_hi = params->scalar_rr2_lut16_p3.minus_ln2_hi; 36 const float vminus_ln2_lo = params->scalar_rr2_lut16_p3.minus_ln2_lo; 37 const float vc3 = params->scalar_rr2_lut16_p3.c3; 38 const float vc2 = params->scalar_rr2_lut16_p3.c2; 39 const float vone = params->scalar_rr2_lut16_p3.one; 40 41 for (; n >= 6 * sizeof(float); n -= 6 * sizeof(float)) { 42 float vx0 = x[0]; 43 float vx1 = x[1]; 44 float vx2 = x[2]; 45 float vx3 = x[3]; 46 float vx4 = x[4]; 47 float vx5 = x[5]; 48 x += 6; 49 50 const float vz0 = vx0 * vprescale; 51 const float vz1 = vx1 * vprescale; 52 const float vz2 = vx2 * vprescale; 53 const float vz3 = vx3 * vprescale; 54 const float vz4 = vx4 * vprescale; 55 const float vz5 = vx5 * vprescale; 56 57 float vn0 = vz0 * vlog2e + vmagic_bias; 58 float vn1 = vz1 * vlog2e + vmagic_bias; 59 float vn2 = vz2 * vlog2e + vmagic_bias; 60 float vn3 = vz3 * vlog2e + vmagic_bias; 61 float vn4 = vz4 * vlog2e + vmagic_bias; 62 float vn5 = vz5 * vlog2e + vmagic_bias; 63 64 const uint32_t ven0 = float_as_uint32(vn0) << 19; 65 const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask; 66 vn0 -= vmagic_bias; 67 const uint32_t ven1 = float_as_uint32(vn1) << 19; 68 const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask; 69 vn1 -= vmagic_bias; 70 const uint32_t ven2 = float_as_uint32(vn2) << 19; 71 const uint32_t vidx2 = float_as_uint32(vn2) & vindex_mask; 72 vn2 -= vmagic_bias; 73 const uint32_t ven3 = float_as_uint32(vn3) << 19; 74 const uint32_t vidx3 = float_as_uint32(vn3) & vindex_mask; 75 vn3 -= vmagic_bias; 76 const uint32_t ven4 = float_as_uint32(vn4) << 19; 77 const uint32_t vidx4 = float_as_uint32(vn4) & vindex_mask; 78 vn4 -= vmagic_bias; 79 const uint32_t ven5 = float_as_uint32(vn5) << 19; 80 const uint32_t vidx5 = float_as_uint32(vn5) & vindex_mask; 81 vn5 -= vmagic_bias; 82 83 float vt0 = vn0 * vminus_ln2_hi + vz0; 84 float vs0 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx0] + ven0); 85 float vt1 = vn1 * vminus_ln2_hi + vz1; 86 float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx1] + ven1); 87 float vt2 = vn2 * vminus_ln2_hi + vz2; 88 float vs2 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx2] + ven2); 89 float vt3 = vn3 * vminus_ln2_hi + vz3; 90 float vs3 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx3] + ven3); 91 float vt4 = vn4 * vminus_ln2_hi + vz4; 92 float vs4 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx4] + ven4); 93 float vt5 = vn5 * vminus_ln2_hi + vz5; 94 float vs5 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx5] + ven5); 95 96 vt0 = vn0 * vminus_ln2_lo + vt0; 97 if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) { 98 vs0 = 0.0f; 99 vt0 = 0.0f; 100 } 101 vt1 = vn1 * vminus_ln2_lo + vt1; 102 if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) { 103 vs1 = 0.0f; 104 vt1 = 0.0f; 105 } 106 vt2 = vn2 * vminus_ln2_lo + vt2; 107 if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) { 108 vs2 = 0.0f; 109 vt2 = 0.0f; 110 } 111 vt3 = vn3 * vminus_ln2_lo + vt3; 112 if XNN_UNPREDICTABLE(vz3 <= vsat_cutoff) { 113 vs3 = 0.0f; 114 vt3 = 0.0f; 115 } 116 vt4 = vn4 * vminus_ln2_lo + vt4; 117 if XNN_UNPREDICTABLE(vz4 <= vsat_cutoff) { 118 vs4 = 0.0f; 119 vt4 = 0.0f; 120 } 121 vt5 = vn5 * vminus_ln2_lo + vt5; 122 if XNN_UNPREDICTABLE(vz5 <= vsat_cutoff) { 123 vs5 = 0.0f; 124 vt5 = 0.0f; 125 } 126 127 float vp0 = vc3 * vt0 + vc2; 128 float vp1 = vc3 * vt1 + vc2; 129 float vp2 = vc3 * vt2 + vc2; 130 float vp3 = vc3 * vt3 + vc2; 131 float vp4 = vc3 * vt4 + vc2; 132 float vp5 = vc3 * vt5 + vc2; 133 134 vp0 *= vt0; 135 vp1 *= vt1; 136 vp2 *= vt2; 137 vp3 *= vt3; 138 vp4 *= vt4; 139 vp5 *= vt5; 140 141 vt0 *= vs0; 142 vs0 -= vone; 143 vt1 *= vs1; 144 vs1 -= vone; 145 vt2 *= vs2; 146 vs2 -= vone; 147 vt3 *= vs3; 148 vs3 -= vone; 149 vt4 *= vs4; 150 vs4 -= vone; 151 vt5 *= vs5; 152 vs5 -= vone; 153 154 vp0 = vp0 * vt0 + vt0; 155 vp1 = vp1 * vt1 + vt1; 156 vp2 = vp2 * vt2 + vt2; 157 vp3 = vp3 * vt3 + vt3; 158 vp4 = vp4 * vt4 + vt4; 159 vp5 = vp5 * vt5 + vt5; 160 161 const float ve0 = (vp0 + vs0) * valpha; 162 float vy0 = vx0 * vbeta; 163 const float ve1 = (vp1 + vs1) * valpha; 164 float vy1 = vx1 * vbeta; 165 const float ve2 = (vp2 + vs2) * valpha; 166 float vy2 = vx2 * vbeta; 167 const float ve3 = (vp3 + vs3) * valpha; 168 float vy3 = vx3 * vbeta; 169 const float ve4 = (vp4 + vs4) * valpha; 170 float vy4 = vx4 * vbeta; 171 const float ve5 = (vp5 + vs5) * valpha; 172 float vy5 = vx5 * vbeta; 173 174 if XNN_UNPREDICTABLE(vx0 < 0.0f) { 175 vy0 = ve0; 176 } 177 if XNN_UNPREDICTABLE(vx1 < 0.0f) { 178 vy1 = ve1; 179 } 180 if XNN_UNPREDICTABLE(vx2 < 0.0f) { 181 vy2 = ve2; 182 } 183 if XNN_UNPREDICTABLE(vx3 < 0.0f) { 184 vy3 = ve3; 185 } 186 if XNN_UNPREDICTABLE(vx4 < 0.0f) { 187 vy4 = ve4; 188 } 189 if XNN_UNPREDICTABLE(vx5 < 0.0f) { 190 vy5 = ve5; 191 } 192 193 y[0] = vy0; 194 y[1] = vy1; 195 y[2] = vy2; 196 y[3] = vy3; 197 y[4] = vy4; 198 y[5] = vy5; 199 y += 6; 200 } 201 if XNN_UNLIKELY(n != 0) { 202 do { 203 float vx = *x++; 204 205 const float vz = vx * vprescale; 206 207 float vn = vz * vlog2e + vmagic_bias; 208 const uint32_t ven = float_as_uint32(vn) << 19; 209 const uint32_t vidx = float_as_uint32(vn) & vindex_mask; 210 vn -= vmagic_bias; 211 212 float vt = vn * vminus_ln2_hi + vz; 213 float vs = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx] + ven); 214 215 vt = vn * vminus_ln2_lo + vt; 216 if XNN_UNPREDICTABLE(vz <= vsat_cutoff) { 217 vs = 0.0f; 218 vt = 0.0f; 219 } 220 221 float vp = vc3 * vt + vc2; 222 vp *= vt; 223 224 vt *= vs; 225 vs -= vone; 226 vp = vp * vt + vt; 227 const float ve = (vp + vs) * valpha; 228 229 float vy = vx * vbeta; 230 if XNN_UNPREDICTABLE(vx < 0.0f) { 231 vy = ve; 232 } 233 234 *y++ = vy; 235 236 n -= sizeof(float); 237 } while (n != 0); 238 } 239 } 240