1 // Copyright 2021 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <assert.h>
7
8 #include <smmintrin.h>
9
10 #include <xnnpack/unaligned.h>
11 #include <xnnpack/vunary.h>
12
13
xnn_s8_vclamp_ukernel__sse41_x64(size_t n,const int8_t * x,int8_t * y,const union xnn_s8_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])14 void xnn_s8_vclamp_ukernel__sse41_x64(
15 size_t n,
16 const int8_t* x,
17 int8_t* y,
18 const union xnn_s8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
19 {
20 assert(n != 0);
21
22 const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4.max);
23 const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4.min);
24 for (; n >= 64; n -= 64) {
25 __m128i vacc0 = _mm_loadu_si128((const __m128i*) x);
26 __m128i vacc1 = _mm_loadu_si128((const __m128i*) x + 1);
27 __m128i vacc2 = _mm_loadu_si128((const __m128i*) x + 2);
28 __m128i vacc3 = _mm_loadu_si128((const __m128i*) x + 3);
29 x += 64;
30
31 vacc0 = _mm_max_epi8(vacc0, voutput_min);
32 vacc1 = _mm_max_epi8(vacc1, voutput_min);
33 vacc2 = _mm_max_epi8(vacc2, voutput_min);
34 vacc3 = _mm_max_epi8(vacc3, voutput_min);
35
36 vacc0 = _mm_min_epi8(vacc0, voutput_max);
37 vacc1 = _mm_min_epi8(vacc1, voutput_max);
38 vacc2 = _mm_min_epi8(vacc2, voutput_max);
39 vacc3 = _mm_min_epi8(vacc3, voutput_max);
40
41 _mm_storeu_si128((__m128i*) y, vacc0);
42 _mm_storeu_si128((__m128i*) y + 1, vacc1);
43 _mm_storeu_si128((__m128i*) y + 2, vacc2);
44 _mm_storeu_si128((__m128i*) y + 3, vacc3);
45 y += 64;
46 }
47 for (; n >= 16; n -= 16) {
48 __m128i vacc = _mm_loadu_si128((const __m128i*) x);
49 x += 16;
50
51 vacc = _mm_min_epi8(vacc, voutput_max);
52 vacc = _mm_max_epi8(vacc, voutput_min);
53
54 _mm_storeu_si128((__m128i*) y, vacc);
55 y += 16;
56 }
57 if XNN_UNLIKELY(n != 0) {
58 __m128i vacc = _mm_loadu_si128((const __m128i*) x);
59
60 vacc = _mm_min_epi8(vacc, voutput_max);
61 vacc = _mm_max_epi8(vacc, voutput_min);
62
63 if (n & 8) {
64 _mm_storel_epi64((__m128i*) y, vacc);
65 y += 8;
66 vacc = _mm_unpackhi_epi64(vacc, vacc);
67 }
68 if (n & 4) {
69 unaligned_store_u32(y, (uint32_t) _mm_cvtsi128_si32(vacc));
70 y += 4;
71 vacc = _mm_srli_epi64(vacc, 32);
72 }
73 if (n & 2) {
74 unaligned_store_u16(y, (uint16_t) _mm_cvtsi128_si32(vacc));
75 y += 2;
76 vacc = _mm_srli_epi32(vacc, 16);
77 }
78 if (n & 1) {
79 *y = (int8_t) _mm_cvtsi128_si32(vacc);
80 }
81 }
82 }
83