1// Copyright 2019 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert CHANNEL_TILE % 4 == 0 7$assert CHANNEL_TILE >= 4 8$assert ROW_TILE >= 1 9$assert SSE in [1, 2, 4] 10$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 11$SSE_HEADER = {1: "xmmintrin.h", 2: "emmintrin.h", 4: "smmintrin.h"}[SSE] 12#include <assert.h> 13 14#include <${SSE_HEADER}> 15 16#include <xnnpack/math.h> 17#include <xnnpack/prelu.h> 18 19 20$ISA = {1: "sse", 2: "sse2", 4: "sse41"}[SSE] 21void xnn_f32_prelu_ukernel__${ISA}_${ROW_TILE}x${CHANNEL_TILE}( 22 size_t rows, 23 size_t channels, 24 const float*restrict input, 25 size_t input_stride, 26 const float*restrict weights, 27 float*restrict output, 28 size_t output_stride) XNN_OOB_READS 29{ 30 assert(rows != 0); 31 assert(channels != 0); 32 assert(channels % sizeof(float) == 0); 33 34 const float* i0 = input; 35 float* o0 = output; 36 $for M in range(1, ROW_TILE): 37 const float* i${M} = (const float*) ((uintptr_t) i${M-1} + input_stride); 38 float* o${M} = (float*) ((uintptr_t) o${M-1} + output_stride); 39 40 const size_t input_increment = input_stride * ${ROW_TILE} - channels; 41 const size_t output_increment = output_stride * ${ROW_TILE} - channels; 42 43 $if SSE == 1: 44 const __m128 vzero = _mm_setzero_ps(); 45 do { 46 $for M in range(1, ROW_TILE): 47 $if M % 2 == 0: 48 if XNN_UNPREDICTABLE(rows <= ${M}) { 49 i${M} = i${M-1}; 50 o${M} = o${M-1}; 51 } 52 $else: 53 if XNN_UNPREDICTABLE(rows < ${M+1}) { 54 i${M} = i${M-1}; 55 o${M} = o${M-1}; 56 } 57 58 const float* w = weights; 59 size_t c = channels; 60 for (; c >= ${CHANNEL_TILE} * sizeof(float); c -= ${CHANNEL_TILE} * sizeof(float)) { 61 const __m128 vw${ABC[0:4]} = _mm_load_ps(w); 62 $for C in range(4, CHANNEL_TILE, 4): 63 const __m128 vw${ABC[C:C+4]} = _mm_load_ps(w + ${C}); 64 w += ${CHANNEL_TILE}; 65 66 $for M in range(ROW_TILE): 67 $if SSE == 1: 68 __m128 vi${M}x${ABC[0:4]} = _mm_loadu_ps(i${M}); 69 $for C in range(4, CHANNEL_TILE, 4): 70 __m128 vi${M}x${ABC[C:C+4]} = _mm_loadu_ps(i${M} + ${C}); 71 $else: 72 const __m128 vi${M}x${ABC[0:4]} = _mm_loadu_ps(i${M}); 73 $for C in range(4, CHANNEL_TILE, 4): 74 const __m128 vi${M}x${ABC[C:C+4]} = _mm_loadu_ps(i${M} + ${C}); 75 i${M} += ${CHANNEL_TILE}; 76 77 $for M in range(ROW_TILE): 78 $for C in range(0, CHANNEL_TILE, 4): 79 $if SSE == 1: 80 __m128 vacc${M}x${ABC[C:C+4]} = _mm_max_ps(_mm_setzero_ps(), vi${M}x${ABC[C:C+4]}); 81 vi${M}x${ABC[C:C+4]} = _mm_min_ps(vi${M}x${ABC[C:C+4]}, vzero); 82 $else: 83 const __m128 vprod${M}x${ABC[C:C+4]} = _mm_mul_ps(vi${M}x${ABC[C:C+4]}, vw${ABC[C:C+4]}); 84 $if SSE == 2: 85 const __m128 vmask${M}x${ABC[C:C+4]} = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi${M}x${ABC[C:C+4]}))); 86 87 $for M in range(ROW_TILE): 88 $for C in range(0, CHANNEL_TILE, 4): 89 $if SSE == 1: 90 vacc${M}x${ABC[C:C+4]} = _mm_add_ps(vacc${M}x${ABC[C:C+4]}, _mm_mul_ps(vi${M}x${ABC[C:C+4]}, vw${ABC[C:C+4]})); 91 $elif SSE == 2: 92 const __m128 vacc${M}x${ABC[C:C+4]} = _mm_or_ps(_mm_and_ps(vprod${M}x${ABC[C:C+4]}, vmask${M}x${ABC[C:C+4]}), _mm_andnot_ps(vmask${M}x${ABC[C:C+4]}, vi${M}x${ABC[C:C+4]})); 93 $elif SSE == 4: 94 const __m128 vacc${M}x${ABC[C:C+4]} = _mm_blendv_ps(vi${M}x${ABC[C:C+4]}, vprod${M}x${ABC[C:C+4]}, vi${M}x${ABC[C:C+4]}); 95 96 $for M in range(ROW_TILE): 97 _mm_storeu_ps(o${M}, vacc${M}x${ABC[0:4]}); 98 $for C in range(4, CHANNEL_TILE, 4): 99 _mm_storeu_ps(o${M} + ${C}, vacc${M}x${ABC[C:C+4]}); 100 o${M} += ${CHANNEL_TILE}; 101 } 102 $if CHANNEL_TILE > 4: 103 for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) { 104 const __m128 vw0123 = _mm_load_ps(w); 105 w += 4; 106 107 $for M in range(ROW_TILE): 108 $if SSE == 1: 109 __m128 vi${M}x0123 = _mm_loadu_ps(i${M}); 110 $else: 111 const __m128 vi${M}x0123 = _mm_loadu_ps(i${M}); 112 i${M} += 4; 113 114 $for M in range(ROW_TILE): 115 $if SSE == 1: 116 __m128 vacc${M}x0123 = _mm_max_ps(_mm_setzero_ps(), vi${M}x0123); 117 vi${M}x0123 = _mm_min_ps(vi${M}x0123, vzero); 118 $else: 119 const __m128 vprod${M}x0123 = _mm_mul_ps(vi${M}x0123, vw0123); 120 $if SSE == 2: 121 const __m128 vmask${M}x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi${M}x0123))); 122 123 $for M in range(ROW_TILE): 124 $if SSE == 1: 125 vacc${M}x0123 = _mm_add_ps(vacc${M}x0123, _mm_mul_ps(vi${M}x0123, vw0123)); 126 $elif SSE == 2: 127 __m128 vacc${M}x0123 = _mm_or_ps(_mm_and_ps(vprod${M}x0123, vmask${M}x0123), _mm_andnot_ps(vmask${M}x0123, vi${M}x0123)); 128 $elif SSE == 4: 129 __m128 vacc${M}x0123 = _mm_blendv_ps(vi${M}x0123, vprod${M}x0123, vi${M}x0123); 130 131 $for M in range(ROW_TILE): 132 _mm_storeu_ps(o${M}, vacc${M}x0123); 133 o${M} += 4; 134 } 135 if XNN_UNLIKELY(c != 0) { 136 const __m128 vw0123 = _mm_load_ps(w); 137 w = (const float*) ((uintptr_t) w + c); 138 139 $for M in range(ROW_TILE): 140 $if SSE == 1: 141 __m128 vi${M}x0123 = _mm_loadu_ps(i${M}); 142 $else: 143 const __m128 vi${M}x0123 = _mm_loadu_ps(i${M}); 144 i${M} = (const float*) ((uintptr_t) i${M} + c); 145 146 $for M in range(ROW_TILE): 147 $if SSE == 1: 148 __m128 vacc${M}x0123 = _mm_max_ps(_mm_setzero_ps(), vi${M}x0123); 149 vi${M}x0123 = _mm_min_ps(vi${M}x0123, vzero); 150 $else: 151 const __m128 vprod${M}x0123 = _mm_mul_ps(vi${M}x0123, vw0123); 152 $if SSE == 2: 153 const __m128 vmask${M}x0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vi${M}x0123))); 154 155 $for M in range(ROW_TILE): 156 $if SSE == 1: 157 vacc${M}x0123 = _mm_add_ps(vacc${M}x0123, _mm_mul_ps(vi${M}x0123, vw0123)); 158 $elif SSE == 2: 159 __m128 vacc${M}x0123 = _mm_or_ps(_mm_and_ps(vprod${M}x0123, vmask${M}x0123), _mm_andnot_ps(vmask${M}x0123, vi${M}x0123)); 160 $elif SSE == 4: 161 __m128 vacc${M}x0123 = _mm_blendv_ps(vi${M}x0123, vprod${M}x0123, vi${M}x0123); 162 163 if (c & (2 * sizeof(float))) { 164 $for M in range(ROW_TILE): 165 _mm_storel_pi((__m64*) o${M}, vacc${M}x0123); 166 167 $for M in range(ROW_TILE): 168 vacc${M}x0123 = _mm_movehl_ps(vacc${M}x0123, vacc${M}x0123); 169 170 $for M in range(ROW_TILE): 171 o${M} += 2; 172 } 173 if (c & (1 * sizeof(float))) { 174 $for M in range(ROW_TILE): 175 _mm_store_ss(o${M}, vacc${M}x0123); 176 177 $for M in range(ROW_TILE): 178 o${M} += 1; 179 } 180 } 181 $for M in range(ROW_TILE): 182 i${M} = (const float*) ((uintptr_t) i${M} + input_increment); 183 o${M} = (float*) ((uintptr_t) o${M} + output_increment); 184 rows = doz(rows, ${ROW_TILE}); 185 } while (rows != 0); 186} 187