xref: /aosp_15_r20/external/XNNPACK/src/f32-gemm/gen/4x4-scalar.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-gemm/scalar.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <xnnpack/gemm.h>
13 #include <xnnpack/math.h>
14 
15 
xnn_f32_gemm_ukernel_4x4__scalar(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS (1)])16 void xnn_f32_gemm_ukernel_4x4__scalar(
17     size_t mr,
18     size_t nc,
19     size_t kc,
20     const float* restrict a,
21     size_t a_stride,
22     const float* restrict w,
23     float* restrict c,
24     size_t cm_stride,
25     size_t cn_stride,
26     const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28   assert(mr != 0);
29   assert(mr <= 4);
30   assert(nc != 0);
31   assert(kc != 0);
32   assert(kc % sizeof(float) == 0);
33   assert(a != NULL);
34   assert(w != NULL);
35   assert(c != NULL);
36 
37   const float* a0 = a;
38   float* c0 = c;
39   const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
40   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
41   if XNN_UNPREDICTABLE(mr < 2) {
42     a1 = a0;
43     c1 = c0;
44   }
45   const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
46   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
47   if XNN_UNPREDICTABLE(mr <= 2) {
48     a2 = a1;
49     c2 = c1;
50   }
51   const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
52   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
53   if XNN_UNPREDICTABLE(mr != 4) {
54     a3 = a2;
55     c3 = c2;
56   }
57 
58   do {
59     float vacc00 = w[0];
60     float vacc01 = w[1];
61     float vacc02 = w[2];
62     float vacc03 = w[3];
63     w += 4;
64     float vacc10 = vacc00;
65     float vacc11 = vacc01;
66     float vacc12 = vacc02;
67     float vacc13 = vacc03;
68     float vacc20 = vacc00;
69     float vacc21 = vacc01;
70     float vacc22 = vacc02;
71     float vacc23 = vacc03;
72     float vacc30 = vacc00;
73     float vacc31 = vacc01;
74     float vacc32 = vacc02;
75     float vacc33 = vacc03;
76 
77     size_t k = kc;
78     do {
79       const float va0 = *a0++;
80       const float va1 = *a1++;
81       const float va2 = *a2++;
82       const float va3 = *a3++;
83 
84       const float vb0 = w[0];
85       const float vb1 = w[1];
86       const float vb2 = w[2];
87       const float vb3 = w[3];
88       w += 4;
89 
90       vacc00 = math_muladd_f32(va0, vb0, vacc00);
91       vacc01 = math_muladd_f32(va0, vb1, vacc01);
92       vacc02 = math_muladd_f32(va0, vb2, vacc02);
93       vacc03 = math_muladd_f32(va0, vb3, vacc03);
94       vacc10 = math_muladd_f32(va1, vb0, vacc10);
95       vacc11 = math_muladd_f32(va1, vb1, vacc11);
96       vacc12 = math_muladd_f32(va1, vb2, vacc12);
97       vacc13 = math_muladd_f32(va1, vb3, vacc13);
98       vacc20 = math_muladd_f32(va2, vb0, vacc20);
99       vacc21 = math_muladd_f32(va2, vb1, vacc21);
100       vacc22 = math_muladd_f32(va2, vb2, vacc22);
101       vacc23 = math_muladd_f32(va2, vb3, vacc23);
102       vacc30 = math_muladd_f32(va3, vb0, vacc30);
103       vacc31 = math_muladd_f32(va3, vb1, vacc31);
104       vacc32 = math_muladd_f32(va3, vb2, vacc32);
105       vacc33 = math_muladd_f32(va3, vb3, vacc33);
106 
107       k -= sizeof(float);
108     } while (k != 0);
109 
110 
111     if XNN_LIKELY(nc >= 4) {
112       c3[0] = vacc30;
113       c3[1] = vacc31;
114       c3[2] = vacc32;
115       c3[3] = vacc33;
116       c3 = (float*) ((uintptr_t) c3 + cn_stride);
117       c2[0] = vacc20;
118       c2[1] = vacc21;
119       c2[2] = vacc22;
120       c2[3] = vacc23;
121       c2 = (float*) ((uintptr_t) c2 + cn_stride);
122       c1[0] = vacc10;
123       c1[1] = vacc11;
124       c1[2] = vacc12;
125       c1[3] = vacc13;
126       c1 = (float*) ((uintptr_t) c1 + cn_stride);
127       c0[0] = vacc00;
128       c0[1] = vacc01;
129       c0[2] = vacc02;
130       c0[3] = vacc03;
131       c0 = (float*) ((uintptr_t) c0 + cn_stride);
132 
133       a3 = (const void*) ((uintptr_t) a3 - kc);
134       a2 = (const void*) ((uintptr_t) a2 - kc);
135       a1 = (const void*) ((uintptr_t) a1 - kc);
136       a0 = (const void*) ((uintptr_t) a0 - kc);
137 
138       nc -= 4;
139     } else {
140       if (nc & 2) {
141         c3[0] = vacc30;
142         c3[1] = vacc31;
143         vacc30 = vacc32;
144         c3 += 2;
145         c2[0] = vacc20;
146         c2[1] = vacc21;
147         vacc20 = vacc22;
148         c2 += 2;
149         c1[0] = vacc10;
150         c1[1] = vacc11;
151         vacc10 = vacc12;
152         c1 += 2;
153         c0[0] = vacc00;
154         c0[1] = vacc01;
155         vacc00 = vacc02;
156         c0 += 2;
157       }
158       if (nc & 1) {
159         c3[0] = vacc30;
160         c2[0] = vacc20;
161         c1[0] = vacc10;
162         c0[0] = vacc00;
163       }
164 
165       nc = 0;
166     }
167   } while (nc != 0);
168 }
169