xref: /aosp_15_r20/external/XNNPACK/src/f32-ibilinear-chw/sse.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2021 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert PIXEL_TILE >= 1
7$assert PIXEL_TILE % 4 == 0
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <immintrin.h>
12
13#include <xnnpack/ibilinear.h>
14
15
16void xnn_f32_ibilinear_chw_ukernel__sse_p${PIXEL_TILE}(
17    size_t output_pixels,
18    size_t channels,
19    const float**restrict input,
20    size_t input_offset,
21    const float*restrict weights,
22    float*restrict output,
23    size_t input_increment) XNN_OOB_READS
24{
25  assert(output_pixels != 0);
26  assert(channels != 0);
27  assert(input_increment % sizeof(float) == 0);
28
29  do {
30    const float** i = input;
31    const float* w = weights;
32    size_t p = output_pixels;
33    $if PIXEL_TILE > 4:
34      for (; p >= ${PIXEL_TILE}; p -= ${PIXEL_TILE}) {
35        $for P in range(PIXEL_TILE):
36          const float* itl${ABC[P]} = (const float*) ((uintptr_t) i[${2 * P}] + input_offset);
37          const float* ibl${ABC[P]} = (const float*) ((uintptr_t) i[${2 * P + 1}] + input_offset);
38        i += 2 * ${PIXEL_TILE};
39
40        $for P in range(0, PIXEL_TILE, 4):
41          const __m128 vw${ABC[P:P+4]}p0 = _mm_loadu_ps(w + ${2 * P});
42          const __m128 vw${ABC[P:P+4]}p1 = _mm_loadu_ps(w + ${2 * P + 4});
43        w += 2 * ${PIXEL_TILE};
44
45        $for P in range(0, PIXEL_TILE, 2):
46          const __m128 vtltr${ABC[P]} = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) itl${ABC[P]});
47          const __m128 vblbr${ABC[P]} = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) ibl${ABC[P]});
48
49        $for P in range(0, PIXEL_TILE, 4):
50          const __m128 valphah${ABC[P:P+4]} = _mm_shuffle_ps(vw${ABC[P:P+4]}p0, vw${ABC[P:P+4]}p1, _MM_SHUFFLE(2, 0, 2, 0));
51          const __m128 valphav${ABC[P:P+4]} = _mm_shuffle_ps(vw${ABC[P:P+4]}p0, vw${ABC[P:P+4]}p1, _MM_SHUFFLE(3, 1, 3, 1));
52
53        $for P in range(0, PIXEL_TILE, 2):
54          const __m128 vtltr${ABC[P:P+2]} = _mm_loadh_pi(vtltr${ABC[P]}, (const __m64*) itl${ABC[P+1]});
55          const __m128 vblbr${ABC[P:P+2]} = _mm_loadh_pi(vblbr${ABC[P]}, (const __m64*) ibl${ABC[P+1]});
56
57        $for P in range(0, PIXEL_TILE, 2):
58          const __m128 vldrd${ABC[P:P+2]} = _mm_sub_ps(vblbr${ABC[P:P+2]}, vtltr${ABC[P:P+2]});
59
60        $for P in range(0, PIXEL_TILE, 4):
61          const __m128 vld${ABC[P:P+4]} = _mm_shuffle_ps(vldrd${ABC[P:P+2]}, vldrd${ABC[P+2:P+4]}, _MM_SHUFFLE(2, 0, 2, 0));
62          const __m128 vrd${ABC[P:P+4]} = _mm_shuffle_ps(vldrd${ABC[P:P+2]}, vldrd${ABC[P+2:P+4]}, _MM_SHUFFLE(3, 1, 3, 1));
63
64        $for P in range(0, PIXEL_TILE, 4):
65          const __m128 vtl${ABC[P:P+4]} = _mm_shuffle_ps(vtltr${ABC[P:P+2]}, vtltr${ABC[P+2:P+4]}, _MM_SHUFFLE(2, 0, 2, 0));
66          const __m128 vtr${ABC[P:P+4]} = _mm_shuffle_ps(vtltr${ABC[P:P+2]}, vtltr${ABC[P+2:P+4]}, _MM_SHUFFLE(3, 1, 3, 1));
67
68        $for P in range(0, PIXEL_TILE, 4):
69          const __m128 vl${ABC[P:P+4]} = _mm_add_ps(vtl${ABC[P:P+4]}, _mm_mul_ps(vld${ABC[P:P+4]}, valphav${ABC[P:P+4]}));
70          const __m128 vr${ABC[P:P+4]} = _mm_add_ps(vtr${ABC[P:P+4]}, _mm_mul_ps(vrd${ABC[P:P+4]}, valphav${ABC[P:P+4]}));
71
72        $for P in range(0, PIXEL_TILE, 4):
73          const __m128 vd${ABC[P:P+4]} = _mm_sub_ps(vr${ABC[P:P+4]}, vl${ABC[P:P+4]});
74
75        $for P in range(0, PIXEL_TILE, 4):
76          const __m128 vo${ABC[P:P+4]} = _mm_add_ps(vl${ABC[P:P+4]}, _mm_mul_ps(vd${ABC[P:P+4]}, valphah${ABC[P:P+4]}));
77
78        $for P in range(0, PIXEL_TILE, 4):
79          _mm_storeu_ps(output + ${P}, vo${ABC[P:P+4]});
80        output += ${PIXEL_TILE};
81      }
82
83    for (; p >= 4; p -= 4) {
84      $for P in range(4):
85        const float* itl${P} = (const float*) ((uintptr_t) i[${2 * P}] + input_offset);
86        const float* ibl${P} = (const float*) ((uintptr_t) i[${2 * P + 1}] + input_offset);
87      i += 8;
88
89      const __m128 vw0 = _mm_loadu_ps(w);
90      const __m128 vw1 = _mm_loadu_ps(w + 4);
91      w += 8;
92
93      $for P in range(0, 4, 2):
94        const __m128 vtltr${ABC[P]} = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) itl${P});
95        const __m128 vblbr${ABC[P]} = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) ibl${P});
96
97      const __m128 valphah = _mm_shuffle_ps(vw0, vw1, _MM_SHUFFLE(2, 0, 2, 0));
98      const __m128 valphav = _mm_shuffle_ps(vw0, vw1, _MM_SHUFFLE(3, 1, 3, 1));
99
100      $for P in range(0, 4, 2):
101        const __m128 vtltr${ABC[P:P+2]} = _mm_loadh_pi(vtltr${ABC[P]}, (const __m64*) itl${P+1});
102        const __m128 vblbr${ABC[P:P+2]} = _mm_loadh_pi(vblbr${ABC[P]}, (const __m64*) ibl${P+1});
103
104      $for P in range(0, 4, 2):
105        const __m128 vldrd${ABC[P:P+2]} = _mm_sub_ps(vblbr${ABC[P:P+2]}, vtltr${ABC[P:P+2]});
106
107      const __m128 vld = _mm_shuffle_ps(vldrd01, vldrd23, _MM_SHUFFLE(2, 0, 2, 0));
108      const __m128 vrd = _mm_shuffle_ps(vldrd01, vldrd23, _MM_SHUFFLE(3, 1, 3, 1));
109
110      const __m128 vtl = _mm_shuffle_ps(vtltr01, vtltr23, _MM_SHUFFLE(2, 0, 2, 0));
111      const __m128 vtr = _mm_shuffle_ps(vtltr01, vtltr23, _MM_SHUFFLE(3, 1, 3, 1));
112
113      const __m128 vl = _mm_add_ps(vtl, _mm_mul_ps(vld, valphav));
114      const __m128 vr = _mm_add_ps(vtr, _mm_mul_ps(vrd, valphav));
115
116      const __m128 vd = _mm_sub_ps(vr, vl);
117      const __m128 vo = _mm_add_ps(vl, _mm_mul_ps(vd, valphah));
118
119      _mm_storeu_ps(output, vo);
120      output += 4;
121    }
122
123    if XNN_UNLIKELY(p != 0) {
124      if (p & 2) {
125        const __m128 vw = _mm_loadu_ps(w);
126        w += 4;
127
128        const __m128 valphah = _mm_shuffle_ps(vw, vw, _MM_SHUFFLE(2, 0, 2, 0));
129        const __m128 valphav = _mm_shuffle_ps(vw, vw, _MM_SHUFFLE(3, 1, 3, 1));
130
131        $for P in range(2):
132          const float* itl${P} = (const float*) ((uintptr_t) i[${2 * P}] + input_offset);
133          const float* ibl${P} = (const float*) ((uintptr_t) i[${2 * P + 1}] + input_offset);
134        i += 4;
135
136        const __m128 vtltr = _mm_loadh_pi(_mm_loadl_pi(_mm_undefined_ps(), (const __m64*) itl0), (const __m64*) itl1);
137        const __m128 vblbr = _mm_loadh_pi(_mm_loadl_pi(_mm_undefined_ps(), (const __m64*) ibl0), (const __m64*) ibl1);
138
139        const __m128 vldrd = _mm_sub_ps(vblbr, vtltr);
140        const __m128 vld = _mm_shuffle_ps(vldrd, vldrd, _MM_SHUFFLE(2, 0, 2, 0));
141        const __m128 vrd = _mm_shuffle_ps(vldrd, vldrd, _MM_SHUFFLE(3, 1, 3, 1));
142
143        const __m128 vtl = _mm_shuffle_ps(vtltr, vtltr, _MM_SHUFFLE(2, 0, 2, 0));
144        const __m128 vtr = _mm_shuffle_ps(vtltr, vtltr, _MM_SHUFFLE(3, 1, 3, 1));
145
146        const __m128 vl = _mm_add_ps(vtl, _mm_mul_ps(vld, valphav));
147        const __m128 vr = _mm_add_ps(vtr, _mm_mul_ps(vrd, valphav));
148
149        const __m128 vd = _mm_sub_ps(vr, vl);
150        const __m128 vo = _mm_add_ps(vl, _mm_mul_ps(vd, valphah));
151
152        _mm_storel_pi((__m64*) output, vo);
153        output += 2;
154      }
155
156      if (p & 1) {
157        // We are computing the following formula:
158        //   result = (1 - alpha_h) * (1 - alpha_v) * top_left +
159        //                 alpha_h  * (1 - alpha_v) * top_right +
160        //            (1 - alpha_h) *      alpha_v  * bottom_left +
161        //                 alpha_h  *      alpha_v  * bottom_right.
162        //
163        // Rearranging gives
164        //   result =    left + alpha_h * (right        - left),
165        // where
166        //   left =  top_left + alpha_v * (bottom_left  - top_left),
167        //  right = top_right + alpha_v * (bottom_right - top_right).
168
169        const float alphah = *w;
170        const __m128 valphav = _mm_load_ps1(w + 1);
171        w += 2;
172
173        const float* itl = (const float*) ((uintptr_t) i[0] + input_offset);
174        const float* ibl = (const float*) ((uintptr_t) i[1] + input_offset);
175        i += 2;
176
177        const __m128 vtltr = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) itl);
178        const __m128 vblbr = _mm_loadl_pi(_mm_undefined_ps(), (const __m64*) ibl);
179
180        // Compute at once
181        //    left_diff = bottom_left  - top_left
182        //   right_diff = bottom_right - top_right
183        const __m128 vldrd = _mm_sub_ps(vblbr, vtltr);
184        const __m128 vlr = _mm_add_ps(vtltr, _mm_mul_ps(vldrd, valphav));
185
186        // Extract them and compute the result.
187        const float l = _mm_cvtss_f32(vlr);
188        const float r = _mm_cvtss_f32(_mm_shuffle_ps(vlr, vlr, 1));
189
190        *output++ = l + alphah * (r - l);
191      }
192    }
193
194    input_offset += input_increment;
195  } while (--channels != 0);
196}
197