1// Copyright 2019 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert BATCH_TILE % 4 == 0 7$assert BATCH_TILE >= 4 8$assert DIV_ALGO in ["div", "nr2fma", "nr2recps", "nr1recps1fma"] 9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 10$VMULADDQ_F32 = "vfmaq_f32" if FMA else "vmlaq_f32" 11#include <assert.h> 12 13#include <arm_neon.h> 14 15#include <xnnpack/common.h> 16#include <xnnpack/vunary.h> 17 18 19$PARAMS_STRUCT = "neonfma_rr1_p5" if FMA else "neon_rr2_p5" 20void xnn_f32_vsigmoid_ukernel__${"neonfma" if FMA else "neon"}_rr${1 if FMA else 2}_p5_${DIV_ALGO}_x${BATCH_TILE}( 21 size_t n, 22 const float* x, 23 float* y, 24 const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS 25{ 26 assert(n % sizeof(float) == 0); 27 28 const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->${PARAMS_STRUCT}.magic_bias); 29 const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->${PARAMS_STRUCT}.minus_log2e); 30 $if FMA: 31 const float32x4_t vln2 = vld1q_dup_f32(¶ms->${PARAMS_STRUCT}.ln2); 32 $else: 33 const float32x4_t vln2_hi = vld1q_dup_f32(¶ms->${PARAMS_STRUCT}.ln2_hi); 34 const float32x4_t vln2_lo = vld1q_dup_f32(¶ms->${PARAMS_STRUCT}.ln2_lo); 35 const float32x4_t vc5 = vld1q_dup_f32(¶ms->${PARAMS_STRUCT}.c5); 36 const float32x4_t vc4 = vld1q_dup_f32(¶ms->${PARAMS_STRUCT}.c4); 37 const float32x4_t vc3 = vld1q_dup_f32(¶ms->${PARAMS_STRUCT}.c3); 38 const float32x4_t vc2 = vld1q_dup_f32(¶ms->${PARAMS_STRUCT}.c2); 39 const float32x4_t vc1 = vld1q_dup_f32(¶ms->${PARAMS_STRUCT}.c1); 40 const float32x4_t vone = vmovq_n_f32(1.0f); 41 const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->${PARAMS_STRUCT}.denorm_cutoff); 42 43 $if BATCH_TILE > 4: 44 for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) { 45 $for N in range(0, BATCH_TILE, 4): 46 const float32x4_t vx${ABC[N:N+4]} = vld1q_f32(x); x += 4; 47 48 $for N in range(0, BATCH_TILE, 4): 49 const float32x4_t vz${ABC[N:N+4]} = vabsq_f32(vx${ABC[N:N+4]}); 50 51 $for N in range(0, BATCH_TILE, 4): 52 float32x4_t vn${ABC[N:N+4]} = ${VMULADDQ_F32}(vmagic_bias, vz${ABC[N:N+4]}, vminus_log2e); 53 54 $for N in range(0, BATCH_TILE, 4): 55 const float32x4_t vs${ABC[N:N+4]} = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn${ABC[N:N+4]}), 23)); 56 57 $for N in range(0, BATCH_TILE, 4): 58 vn${ABC[N:N+4]} = vsubq_f32(vn${ABC[N:N+4]}, vmagic_bias); 59 60 $if FMA: 61 $for N in range(0, BATCH_TILE, 4): 62 float32x4_t vt${ABC[N:N+4]} = ${VMULADDQ_F32}(vz${ABC[N:N+4]}, vn${ABC[N:N+4]}, vln2); 63 $else: 64 $for N in range(0, BATCH_TILE, 4): 65 float32x4_t vt${ABC[N:N+4]} = ${VMULADDQ_F32}(vz${ABC[N:N+4]}, vn${ABC[N:N+4]}, vln2_hi); 66 67 $for N in range(0, BATCH_TILE, 4): 68 vt${ABC[N:N+4]} = ${VMULADDQ_F32}(vt${ABC[N:N+4]}, vn${ABC[N:N+4]}, vln2_lo); 69 70 $for N in range(0, BATCH_TILE, 4): 71 float32x4_t vp${ABC[N:N+4]} = ${VMULADDQ_F32}(vc4, vc5, vt${ABC[N:N+4]}); 72 73 $for N in range(0, BATCH_TILE, 4): 74 vp${ABC[N:N+4]} = ${VMULADDQ_F32}(vc3, vp${ABC[N:N+4]}, vt${ABC[N:N+4]}); 75 76 $for N in range(0, BATCH_TILE, 4): 77 vp${ABC[N:N+4]} = ${VMULADDQ_F32}(vc2, vp${ABC[N:N+4]}, vt${ABC[N:N+4]}); 78 79 $for N in range(0, BATCH_TILE, 4): 80 vp${ABC[N:N+4]} = ${VMULADDQ_F32}(vc1, vp${ABC[N:N+4]}, vt${ABC[N:N+4]}); 81 82 $for N in range(0, BATCH_TILE, 4): 83 vt${ABC[N:N+4]} = vmulq_f32(vt${ABC[N:N+4]}, vs${ABC[N:N+4]}); 84 85 $for N in range(0, BATCH_TILE, 4): 86 const float32x4_t ve${ABC[N:N+4]} = ${VMULADDQ_F32}(vs${ABC[N:N+4]}, vp${ABC[N:N+4]}, vt${ABC[N:N+4]}); 87 88 $for N in range(0, BATCH_TILE, 4): 89 const float32x4_t vd${ABC[N:N+4]} = vaddq_f32(ve${ABC[N:N+4]}, vone); 90 91 $if DIV_ALGO == "div": 92 $for N in range(0, BATCH_TILE, 4): 93 float32x4_t vf${ABC[N:N+4]} = vdivq_f32(ve${ABC[N:N+4]}, vd${ABC[N:N+4]}); 94 $else: 95 $for N in range(0, BATCH_TILE, 4): 96 float32x4_t vr${ABC[N:N+4]} = vrecpeq_f32(vd${ABC[N:N+4]}); 97 98 $if DIV_ALGO == "nr2fma": 99 $for N in range(0, BATCH_TILE, 4): 100 vr${ABC[N:N+4]} = vfmaq_f32(vr${ABC[N:N+4]}, vr${ABC[N:N+4]}, vfmsq_f32(vone, vr${ABC[N:N+4]}, vd${ABC[N:N+4]})); 101 $else: 102 $for N in range(0, BATCH_TILE, 4): 103 vr${ABC[N:N+4]} = vmulq_f32(vr${ABC[N:N+4]}, vrecpsq_f32(vr${ABC[N:N+4]}, vd${ABC[N:N+4]})); 104 105 $if DIV_ALGO == "nr2recps": 106 $for N in range(0, BATCH_TILE, 4): 107 vr${ABC[N:N+4]} = vmulq_f32(vr${ABC[N:N+4]}, vrecpsq_f32(vr${ABC[N:N+4]}, vd${ABC[N:N+4]})); 108 $else: 109 $for N in range(0, BATCH_TILE, 4): 110 vr${ABC[N:N+4]} = vfmaq_f32(vr${ABC[N:N+4]}, vr${ABC[N:N+4]}, vfmsq_f32(vone, vr${ABC[N:N+4]}, vd${ABC[N:N+4]})); 111 112 $for N in range(0, BATCH_TILE, 4): 113 float32x4_t vf${ABC[N:N+4]} = vmulq_f32(ve${ABC[N:N+4]}, vr${ABC[N:N+4]}); 114 115 $for N in range(0, BATCH_TILE, 4): 116 vf${ABC[N:N+4]} = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf${ABC[N:N+4]}), vcagtq_f32(vx${ABC[N:N+4]}, vdenorm_cutoff))); 117 118 $for N in range(0, BATCH_TILE, 4): 119 const uint32x4_t vm${ABC[N:N+4]} = vcltq_f32(vx${ABC[N:N+4]}, vmovq_n_f32(0.0f)); 120 121 $for N in range(0, BATCH_TILE, 4): 122 vf${ABC[N:N+4]} = vbslq_f32(vm${ABC[N:N+4]}, vf${ABC[N:N+4]}, vsubq_f32(vone, vf${ABC[N:N+4]})); 123 124 $for N in range(0, BATCH_TILE, 4): 125 vst1q_f32(y, vf${ABC[N:N+4]}); y += 4; 126 } 127 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) { 128 const float32x4_t vx = vld1q_f32(x); x += 4; 129 130 const float32x4_t vz = vabsq_f32(vx); 131 132 float32x4_t vn = ${VMULADDQ_F32}(vmagic_bias, vz, vminus_log2e); 133 const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23)); 134 vn = vsubq_f32(vn, vmagic_bias); 135 $if FMA: 136 float32x4_t vt = ${VMULADDQ_F32}(vz, vn, vln2); 137 $else: 138 float32x4_t vt = ${VMULADDQ_F32}(vz, vn, vln2_hi); 139 vt = ${VMULADDQ_F32}(vt, vn, vln2_lo); 140 141 float32x4_t vp = ${VMULADDQ_F32}(vc4, vc5, vt); 142 vp = ${VMULADDQ_F32}(vc3, vp, vt); 143 vp = ${VMULADDQ_F32}(vc2, vp, vt); 144 vp = ${VMULADDQ_F32}(vc1, vp, vt); 145 146 vt = vmulq_f32(vt, vs); 147 const float32x4_t ve = ${VMULADDQ_F32}(vs, vp, vt); 148 const float32x4_t vd = vaddq_f32(ve, vone); 149 150 $if DIV_ALGO == "div": 151 float32x4_t vf = vdivq_f32(ve, vd); 152 $else: 153 float32x4_t vr = vrecpeq_f32(vd); 154 $if DIV_ALGO == "nr2fma": 155 vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd)); 156 $else: 157 vr = vmulq_f32(vr, vrecpsq_f32(vr, vd)); 158 $if DIV_ALGO == "nr2recps": 159 vr = vmulq_f32(vr, vrecpsq_f32(vr, vd)); 160 $else: 161 vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd)); 162 163 float32x4_t vf = vmulq_f32(ve, vr); 164 vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff))); 165 const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f)); 166 vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf)); 167 168 vst1q_f32(y, vf); y += 4; 169 } 170 if XNN_UNLIKELY(n != 0) { 171 const float32x4_t vx = vld1q_f32(x); 172 173 const float32x4_t vz = vabsq_f32(vx); 174 175 float32x4_t vn = ${VMULADDQ_F32}(vmagic_bias, vz, vminus_log2e); 176 const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23)); 177 vn = vsubq_f32(vn, vmagic_bias); 178 $if FMA: 179 float32x4_t vt = ${VMULADDQ_F32}(vz, vn, vln2); 180 $else: 181 float32x4_t vt = ${VMULADDQ_F32}(vz, vn, vln2_hi); 182 vt = ${VMULADDQ_F32}(vt, vn, vln2_lo); 183 184 float32x4_t vp = ${VMULADDQ_F32}(vc4, vc5, vt); 185 vp = ${VMULADDQ_F32}(vc3, vp, vt); 186 vp = ${VMULADDQ_F32}(vc2, vp, vt); 187 vp = ${VMULADDQ_F32}(vc1, vp, vt); 188 189 vt = vmulq_f32(vt, vs); 190 const float32x4_t ve = ${VMULADDQ_F32}(vs, vp, vt); 191 const float32x4_t vd = vaddq_f32(ve, vone); 192 193 $if DIV_ALGO == "div": 194 float32x4_t vf = vdivq_f32(ve, vd); 195 $else: 196 float32x4_t vr = vrecpeq_f32(vd); 197 $if DIV_ALGO == "nr2fma": 198 vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd)); 199 $else: 200 vr = vmulq_f32(vr, vrecpsq_f32(vr, vd)); 201 $if DIV_ALGO == "nr2recps": 202 vr = vmulq_f32(vr, vrecpsq_f32(vr, vd)); 203 $else: 204 vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd)); 205 206 float32x4_t vf = vmulq_f32(ve, vr); 207 vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff))); 208 const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f)); 209 vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf)); 210 211 float32x2_t vf_lo = vget_low_f32(vf); 212 if (n & (2 * sizeof(float))) { 213 vst1_f32(y, vf_lo); y += 2; 214 vf_lo = vget_high_f32(vf); 215 } 216 if (n & (1 * sizeof(float))) { 217 vst1_lane_f32(y, vf_lo, 0); 218 } 219 } 220} 221