xref: /aosp_15_r20/external/XNNPACK/src/f32-vlrelu/gen/vlrelu-sse2-x8.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-vlrelu/sse.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <emmintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16 
17 
xnn_f32_vlrelu_ukernel__sse2_x8(size_t n,const float * x,float * y,const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_vlrelu_ukernel__sse2_x8(
19     size_t n,
20     const float* x,
21     float* y,
22     const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24   assert(n != 0);
25   assert(n % sizeof(float) == 0);
26 
27   const __m128 vslope = _mm_load_ps(params->sse.slope);
28   for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
29     const __m128 vx0123 = _mm_loadu_ps(x);
30     const __m128 vx4567 = _mm_loadu_ps(x + 4);
31     x += 8;
32 
33     __m128 vacc0123 = _mm_mul_ps(vx0123, vslope);
34     const __m128 vmask0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx0123)));
35     __m128 vacc4567 = _mm_mul_ps(vx4567, vslope);
36     const __m128 vmask4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx4567)));
37 
38     vacc0123 = _mm_or_ps(_mm_and_ps(vacc0123, vmask0123), _mm_andnot_ps(vmask0123, vx0123));
39     vacc4567 = _mm_or_ps(_mm_and_ps(vacc4567, vmask4567), _mm_andnot_ps(vmask4567, vx4567));
40 
41     _mm_storeu_ps(y, vacc0123);
42     _mm_storeu_ps(y + 4, vacc4567);
43     y += 8;
44   }
45   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
46     const __m128 vx = _mm_loadu_ps(x);
47     x += 4;
48 
49     __m128 vacc = _mm_mul_ps(vx, vslope);
50     const __m128 vmask = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
51     vacc = _mm_or_ps(_mm_and_ps(vacc, vmask), _mm_andnot_ps(vmask, vx));
52 
53     _mm_storeu_ps(y, vacc);
54     y += 4;
55   }
56   if XNN_UNLIKELY(n != 0) {
57     const __m128 vx = _mm_loadu_ps(x);
58 
59     __m128 vacc = _mm_mul_ps(vx, vslope);
60     const __m128 vmask = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
61     vacc = _mm_or_ps(_mm_and_ps(vacc, vmask), _mm_andnot_ps(vmask, vx));
62 
63     if (n & (2 * sizeof(float))) {
64       _mm_storel_pi((__m64*) y, vacc);
65       vacc = _mm_movehl_ps(vacc, vacc);
66       y += 2;
67     }
68     if (n & (1 * sizeof(float))) {
69       _mm_store_ss(y, vacc);
70     }
71   }
72 }
73