1 // Auto-generated file. Do not edit!
2 // Template: src/f32-qs8-vcvt/neon.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/vcvt.h>
17
18
xnn_f32_qu8_vcvt_ukernel__neon_x32(size_t n,const float * x,uint8_t * y,const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_qu8_vcvt_ukernel__neon_x32(
20 size_t n,
21 const float* x,
22 uint8_t* y,
23 const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25 assert(n != 0);
26 assert(n % sizeof(float) == 0);
27 assert(x != NULL);
28 assert(y != NULL);
29
30 const float32x4_t vscale = vld1q_dup_f32(¶ms->neon.scale);
31 const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon.magic_bias);
32 const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(¶ms->neon.magic_bias_less_zero_point);
33 const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->neon.output_min);
34 const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->neon.output_max);
35 for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
36 float32x4_t vx0123 = vld1q_f32(x); x += 4;
37 float32x4_t vx4567 = vld1q_f32(x); x += 4;
38 float32x4_t vx89AB = vld1q_f32(x); x += 4;
39 float32x4_t vxCDEF = vld1q_f32(x); x += 4;
40 float32x4_t vxGHIJ = vld1q_f32(x); x += 4;
41 float32x4_t vxKLMN = vld1q_f32(x); x += 4;
42 float32x4_t vxOPQR = vld1q_f32(x); x += 4;
43 float32x4_t vxSTUV = vld1q_f32(x); x += 4;
44
45 vx0123 = vmulq_f32(vx0123, vscale);
46 vx4567 = vmulq_f32(vx4567, vscale);
47 vx89AB = vmulq_f32(vx89AB, vscale);
48 vxCDEF = vmulq_f32(vxCDEF, vscale);
49 vxGHIJ = vmulq_f32(vxGHIJ, vscale);
50 vxKLMN = vmulq_f32(vxKLMN, vscale);
51 vxOPQR = vmulq_f32(vxOPQR, vscale);
52 vxSTUV = vmulq_f32(vxSTUV, vscale);
53
54 vx0123 = vaddq_f32(vx0123, vmagic_bias);
55 vx4567 = vaddq_f32(vx4567, vmagic_bias);
56 vx89AB = vaddq_f32(vx89AB, vmagic_bias);
57 vxCDEF = vaddq_f32(vxCDEF, vmagic_bias);
58 vxGHIJ = vaddq_f32(vxGHIJ, vmagic_bias);
59 vxKLMN = vaddq_f32(vxKLMN, vmagic_bias);
60 vxOPQR = vaddq_f32(vxOPQR, vmagic_bias);
61 vxSTUV = vaddq_f32(vxSTUV, vmagic_bias);
62
63 const int32x4_t vacc0123 = vqsubq_s32(vreinterpretq_s32_f32(vx0123), vmagic_bias_less_zero_point);
64 const int32x4_t vacc4567 = vqsubq_s32(vreinterpretq_s32_f32(vx4567), vmagic_bias_less_zero_point);
65 const int32x4_t vacc89AB = vqsubq_s32(vreinterpretq_s32_f32(vx89AB), vmagic_bias_less_zero_point);
66 const int32x4_t vaccCDEF = vqsubq_s32(vreinterpretq_s32_f32(vxCDEF), vmagic_bias_less_zero_point);
67 const int32x4_t vaccGHIJ = vqsubq_s32(vreinterpretq_s32_f32(vxGHIJ), vmagic_bias_less_zero_point);
68 const int32x4_t vaccKLMN = vqsubq_s32(vreinterpretq_s32_f32(vxKLMN), vmagic_bias_less_zero_point);
69 const int32x4_t vaccOPQR = vqsubq_s32(vreinterpretq_s32_f32(vxOPQR), vmagic_bias_less_zero_point);
70 const int32x4_t vaccSTUV = vqsubq_s32(vreinterpretq_s32_f32(vxSTUV), vmagic_bias_less_zero_point);
71
72 const int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
73 const int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
74 const int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
75 const int16x8_t vaccOPQRSTUV = vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV));
76
77 uint8x16_t vy0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
78 uint8x16_t vyGHIJKLMNOPQRSTUV = vcombine_u8(vqmovun_s16(vaccGHIJKLMN), vqmovun_s16(vaccOPQRSTUV));
79
80 vy0123456789ABCDEF = vmaxq_u8(vy0123456789ABCDEF, voutput_min);
81 vyGHIJKLMNOPQRSTUV = vmaxq_u8(vyGHIJKLMNOPQRSTUV, voutput_min);
82
83 vy0123456789ABCDEF = vminq_u8(vy0123456789ABCDEF, voutput_max);
84 vyGHIJKLMNOPQRSTUV = vminq_u8(vyGHIJKLMNOPQRSTUV, voutput_max);
85
86 vst1q_u8(y, vy0123456789ABCDEF); y += 16;
87 vst1q_u8(y, vyGHIJKLMNOPQRSTUV); y += 16;
88 }
89 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
90 float32x4_t vx_lo = vld1q_f32(x); x += 4;
91 float32x4_t vx_hi = vld1q_f32(x); x += 4;
92
93 vx_lo = vmulq_f32(vx_lo, vscale);
94 vx_hi = vmulq_f32(vx_hi, vscale);
95
96 vx_lo = vaddq_f32(vx_lo, vmagic_bias);
97 vx_hi = vaddq_f32(vx_hi, vmagic_bias);
98
99 const int32x4_t vacc_lo = vqsubq_s32(vreinterpretq_s32_f32(vx_lo), vmagic_bias_less_zero_point);
100 const int32x4_t vacc_hi = vqsubq_s32(vreinterpretq_s32_f32(vx_hi), vmagic_bias_less_zero_point);
101
102 const int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
103
104 uint8x8_t vy = vqmovun_s16(vacc);
105 vy = vmax_u8(vy, vget_low_u8(voutput_min));
106 vy = vmin_u8(vy, vget_low_u8(voutput_max));
107 vst1_u8(y, vy); y += 8;
108 }
109 if XNN_UNLIKELY(n != 0) {
110 assert(n >= 1 * sizeof(float));
111 assert(n <= 7 * sizeof(float));
112 float32x4_t vx_lo = vld1q_f32(x);
113 const float* x_hi = (const float*) ((uintptr_t) x + (n & (4 * sizeof(float))));
114 float32x4_t vx_hi = vld1q_f32(x_hi);
115
116 vx_lo = vmulq_f32(vx_lo, vscale);
117 vx_hi = vmulq_f32(vx_hi, vscale);
118
119 vx_lo = vaddq_f32(vx_lo, vmagic_bias);
120 vx_hi = vaddq_f32(vx_hi, vmagic_bias);
121
122 const int32x4_t vacc_lo = vqsubq_s32(vreinterpretq_s32_f32(vx_lo), vmagic_bias_less_zero_point);
123 const int32x4_t vacc_hi = vqsubq_s32(vreinterpretq_s32_f32(vx_hi), vmagic_bias_less_zero_point);
124
125 const int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
126
127 uint8x8_t vy = vqmovun_s16(vacc);
128 vy = vmax_u8(vy, vget_low_u8(voutput_min));
129 vy = vmin_u8(vy, vget_low_u8(voutput_max));
130
131 if (n & (4 * sizeof(float))) {
132 vst1_lane_u32((void*) y, vreinterpret_u32_u8(vy), 0); y += 4;
133 vy = vext_u8(vy, vy, 4);
134 }
135 if (n & (2 * sizeof(float))) {
136 vst1_lane_u16((void*) y, vreinterpret_u16_u8(vy), 0); y += 2;
137 vy = vext_u8(vy, vy, 2);
138 }
139 if (n & (1 * sizeof(float))) {
140 vst1_lane_u8(y, vy, 0);
141 }
142 }
143 }
144