xref: /aosp_15_r20/external/XNNPACK/src/f32-spmm/gen/16x1-minmax-neon-pipelined.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-spmm/neon-pipelined.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/spmm.h>
15 
16 
xnn_f32_spmm_minmax_ukernel_16x1__neon_pipelined(size_t mc,size_t nc,const float * restrict input,const float * restrict weights,const int32_t * restrict widx_dmap,const uint32_t * restrict nidx_nnzmap,float * restrict output,size_t output_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_spmm_minmax_ukernel_16x1__neon_pipelined(
18     size_t mc,
19     size_t nc,
20     const float*restrict input,
21     const float*restrict weights,
22     const int32_t*restrict widx_dmap,
23     const uint32_t*restrict nidx_nnzmap,
24     float*restrict output,
25     size_t output_stride,
26     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28   assert(mc != 0);
29   assert(mc % sizeof(float) == 0);
30   assert(nc != 0);
31 
32   const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
33   const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
34   size_t output_decrement = output_stride * nc - 16 * sizeof(float);
35   while XNN_LIKELY(mc >= 16 * sizeof(float)) {
36     const float*restrict w = weights;
37     const int32_t* dmap = widx_dmap;
38     const uint32_t* nnzmap = nidx_nnzmap;
39     float32x4_t vw = vld1q_dup_f32(w); w += 1;
40     intptr_t diff = *dmap++;
41     float32x4_t vi0123 = vld1q_f32(input);
42     float32x4_t vi4567 = vld1q_f32(input + 4);
43     float32x4_t vi89AB = vld1q_f32(input + 8);
44     float32x4_t viCDEF = vld1q_f32(input + 12);
45     size_t n = nc;
46     do {
47       uint32_t nnz = *nnzmap++;
48       float32x4_t vacc0123 = vw;
49       float32x4_t vacc4567 = vw;
50       float32x4_t vacc89AB = vw;
51       float32x4_t vaccCDEF = vw;
52       vw = vld1q_dup_f32(w); w += 1;
53       if XNN_LIKELY(nnz != 0) {
54         do {
55           vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
56           vacc4567 = vmlaq_f32(vacc4567, vi4567, vw);
57           vacc89AB = vmlaq_f32(vacc89AB, vi89AB, vw);
58           vaccCDEF = vmlaq_f32(vaccCDEF, viCDEF, vw);
59           input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
60           __builtin_prefetch(input + 16);
61           diff = *dmap++;
62           vw = vld1q_dup_f32(w); w += 1;
63           __builtin_prefetch(w + 32);
64           vi0123 = vld1q_f32(input);
65           vi4567 = vld1q_f32(input + 4);
66           vi89AB = vld1q_f32(input + 8);
67           viCDEF = vld1q_f32(input + 12);
68         } while (--nnz != 0);
69       }
70       float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
71       float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
72       float32x4_t vout89AB = vminq_f32(vacc89AB, vmax);
73       float32x4_t voutCDEF = vminq_f32(vaccCDEF, vmax);
74       vout0123 = vmaxq_f32(vout0123, vmin);
75       vout4567 = vmaxq_f32(vout4567, vmin);
76       vout89AB = vmaxq_f32(vout89AB, vmin);
77       voutCDEF = vmaxq_f32(voutCDEF, vmin);
78       vst1q_f32(output, vout0123);
79       vst1q_f32(output + 4, vout4567);
80       vst1q_f32(output + 8, vout89AB);
81       vst1q_f32(output + 12, voutCDEF);
82       output = (float*restrict) ((uintptr_t) output + output_stride);
83     } while (--n != 0);
84     output = (float*restrict) ((uintptr_t) output - output_decrement);
85     input += 16;
86     mc -= 16 * sizeof(float);
87   }
88   if XNN_UNLIKELY(mc != 0) {
89     output_decrement += 8 * sizeof(float);
90     if (mc & (8 * sizeof(float))) {
91       const float*restrict w = weights;
92       const int32_t* dmap = widx_dmap;
93       const uint32_t* nnzmap = nidx_nnzmap;
94       size_t n = nc;
95       do {
96         uint32_t nnz = *nnzmap++;
97         float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
98         float32x4_t vacc4567 = vacc0123;
99         if XNN_LIKELY(nnz != 0) {
100           do {
101             const intptr_t diff = *dmap++;
102             const float32x4_t vi0123 = vld1q_f32(input);
103             const float32x4_t vi4567 = vld1q_f32(input + 4);
104             input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
105             __builtin_prefetch(input + 16);
106             const float32x4_t vb = vld1q_dup_f32(w); w += 1;
107             __builtin_prefetch(w + 32);
108             vacc0123 = vmlaq_f32(vacc0123, vi0123, vb);
109             vacc4567 = vmlaq_f32(vacc4567, vi4567, vb);
110           } while (--nnz != 0);
111         }
112         float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
113         float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
114         vout0123 = vmaxq_f32(vout0123, vmin);
115         vout4567 = vmaxq_f32(vout4567, vmin);
116         vst1q_f32(output, vout0123);
117         vst1q_f32(output + 4, vout4567);
118         output = (float*restrict) ((uintptr_t) output + output_stride);
119       } while (--n != 0);
120       output = (float*restrict) ((uintptr_t) output - output_decrement);
121       input += 8;
122     }
123     output_decrement += 4 * sizeof(float);
124     if (mc & (4 * sizeof(float))) {
125       const float*restrict w = weights;
126       const int32_t* dmap = widx_dmap;
127       const uint32_t* nnzmap = nidx_nnzmap;
128       size_t n = nc;
129       do {
130         uint32_t nnz = *nnzmap++;
131         float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
132         if XNN_LIKELY(nnz != 0) {
133           do {
134             const intptr_t diff = *dmap++;
135             const float32x4_t vi0123 = vld1q_f32(input);
136             input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
137             __builtin_prefetch(input + 16);
138             const float32x4_t vb = vld1q_dup_f32(w); w += 1;
139             __builtin_prefetch(w + 32);
140             vacc0123 = vmlaq_f32(vacc0123, vi0123, vb);
141           } while (--nnz != 0);
142         }
143         float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
144         vout0123 = vmaxq_f32(vout0123, vmin);
145         vst1q_f32(output, vout0123);
146         output = (float*restrict) ((uintptr_t) output + output_stride);
147       } while (--n != 0);
148       output = (float*restrict) ((uintptr_t) output - output_decrement);
149       input += 4;
150     }
151     output_decrement += 2 * sizeof(float);
152     if (mc & (2 * sizeof(float))) {
153       const float*restrict w = weights;
154       const int32_t* dmap = widx_dmap;
155       const uint32_t* nnzmap = nidx_nnzmap;
156       size_t n = nc;
157       do {
158         uint32_t nnz = *nnzmap++;
159         float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
160         if XNN_LIKELY(nnz != 0) {
161           do {
162             const intptr_t diff = *dmap++;
163             const float32x2_t vi01 = vld1_f32(input);
164             input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
165             __builtin_prefetch(input + 16);
166             const float32x2_t vb = vld1_dup_f32(w); w += 1;
167             __builtin_prefetch(w + 32);
168             vacc01 = vmla_f32(vacc01, vi01, vb);
169           } while (--nnz != 0);
170         }
171         float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
172         vout01 = vmax_f32(vout01, vget_low_f32(vmin));
173         vst1_f32(output, vout01);
174         output = (float*restrict) ((uintptr_t) output + output_stride);
175       } while (--n != 0);
176       output = (float*restrict) ((uintptr_t) output - output_decrement);
177       input += 2;
178     }
179     output_decrement += 1 * sizeof(float);
180     if (mc & (1 * sizeof(float))) {
181       const float*restrict w = weights;
182       const int32_t* dmap = widx_dmap;
183       const uint32_t* nnzmap = nidx_nnzmap;
184       size_t n = nc;
185       do {
186         uint32_t nnz = *nnzmap++;
187         float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
188         if XNN_LIKELY(nnz != 0) {
189           do {
190             const intptr_t diff = *dmap++;
191             const float32x2_t vi0 = vld1_dup_f32(input);
192             input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
193             __builtin_prefetch(input + 16);
194             const float32x2_t vb = vld1_dup_f32(w); w += 1;
195             __builtin_prefetch(w + 32);
196             vacc0 = vmla_f32(vacc0, vi0, vb);
197           } while (--nnz != 0);
198         }
199         float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
200         vout0 = vmax_f32(vout0, vget_low_f32(vmin));
201         vst1_lane_f32(output, vout0, 0);
202         output = (float*restrict) ((uintptr_t) output + output_stride);
203       } while (--n != 0);
204       output = (float*restrict) ((uintptr_t) output - output_decrement);
205       input += 1;
206     }
207   }
208 }
209