1 /*
2 * Copyright (c) 2022 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #ifndef __aarch64__
26
27 #include <arm_neon.h>
28 #include <cstddef>
29
30 namespace arm_conv {
31 namespace winograd {
32 namespace input_transform {
33
arm_fp32_6x6(unsigned int n_channels,const float * const input_base,const size_t input_row_stride,const size_t input_col_stride,float * outptr,const size_t matrix_stride)34 void arm_fp32_6x6(
35 unsigned int n_channels,
36 const float* const input_base,
37 const size_t input_row_stride,
38 const size_t input_col_stride,
39 float* outptr,
40 const size_t matrix_stride
41 )
42 {
43 constexpr int inner_tile_rows = 6;
44 constexpr int inner_tile_cols = 6;
45
46 // Get pointers into the input tile
47 const float *x_ptrs[inner_tile_rows][inner_tile_cols];
48 for (int i = 0, xi = 0; i < inner_tile_rows; i++, xi++)
49 {
50 // Get a pointer into the row
51 const float* const row_ptr = input_base + xi*input_row_stride;
52
53 for (int j = 0, xj = 0; j < inner_tile_cols; j++, xj++)
54 {
55 x_ptrs[i][j] = row_ptr + xj*input_col_stride;
56 }
57 }
58
59 // Matrices used/computed in this kernel.
60 float x[inner_tile_rows][inner_tile_cols];
61 float XTx[inner_tile_rows][inner_tile_cols];
62 float U[inner_tile_rows][inner_tile_cols];
63 for (int i = 0; i < inner_tile_rows; i++)
64 {
65 for (int j = 0; j < inner_tile_cols; j++)
66 {
67 x[i][j] = XTx[i][j] = 0.0f;
68 }
69 }
70
71 // Perform the Winograd input transformation for each channel in the input
72 // tensor.
73 int channels_remaining = n_channels;
74 for (; channels_remaining >= 2; channels_remaining -= 2)
75 {
76 // Matrices used/computed in this kernel
77 float32x2_t x[inner_tile_rows][inner_tile_cols];
78 float32x2_t XTx[inner_tile_rows][inner_tile_cols];
79 float32x2_t U[inner_tile_rows][inner_tile_cols];
80 for (int i = 0; i < inner_tile_rows; i++)
81 {
82 for (int j = 0; j < inner_tile_cols; j++)
83 {
84 x[i][j] = vdup_n_f32(0.0f);
85 XTx[i][j] = vdup_n_f32(0.0f);
86 }
87 }
88
89 // Read a 6x6 tile in the Winograd domain
90 for (int i = 0; i < inner_tile_rows; i++)
91 {
92 for (int j = 0; j < inner_tile_cols; j++)
93 {
94 x[i][j] = vld1_f32(x_ptrs[i][j]);
95 x_ptrs[i][j] += 2;
96 }
97 }
98
99 // Compute XT . x
100 for (int j = 0; j < inner_tile_cols; j++)
101 {
102 // XTx[0][j] = 4*x[0][j] + -5*x[2][j] + 1*x[4][j];
103 XTx[0][j] = vmls_n_f32(vmla_n_f32(x[4][j], x[0][j], 4.0f), x[2][j], 5.0f);
104
105 // XTx[1][j] = -4*x[1][j] + -4*x[2][j] + 1*x[3][j] + 1*x[4][j];
106 XTx[1][j] = vmls_n_f32(vadd_f32(x[3][j], x[4][j]), vadd_f32(x[1][j], x[2][j]), 4.0f);
107
108 // XTx[2][j] = 4*x[1][j] + -4*x[2][j] + -1*x[3][j] + 1*x[4][j];
109 XTx[2][j] = vmla_n_f32(vsub_f32(x[4][j], x[3][j]), vsub_f32(x[1][j], x[2][j]), 4.0f);
110
111 // XTx[3][j] = -2*x[1][j] + -1*x[2][j] + 2*x[3][j] + 1*x[4][j];
112 XTx[3][j] = vmla_n_f32(vsub_f32(x[4][j], x[2][j]), vsub_f32(x[3][j], x[1][j]), 2.0f);
113
114 // XTx[4][j] = 2*x[1][j] + -1*x[2][j] + -2*x[3][j] + 1*x[4][j];
115 XTx[4][j] = vmla_n_f32(vsub_f32(x[4][j], x[2][j]), vsub_f32(x[1][j], x[3][j]), 2.0f);
116
117 // XTx[5][j] = 4*x[1][j] + -5*x[3][j] + 1*x[5][j];
118 XTx[5][j] = vmls_n_f32(vmla_n_f32(x[5][j], x[1][j], 4.0f), x[3][j], 5.0f);
119 }
120
121 // Compute U = XT . x . X
122 for (int i = 0; i < inner_tile_rows; i++)
123 {
124 // U[i][0] = 4*XTx[i][0] + -5*XTx[i][2] + 1*XTx[i][4];
125 U[i][0] = vmls_n_f32(vmla_n_f32(XTx[i][4], XTx[i][0], 4.0f), XTx[i][2], 5.0f);
126
127 // U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] + 1*XTx[i][3] + 1*XTx[i][4];
128 U[i][1] = vmls_n_f32(vadd_f32(XTx[i][3], XTx[i][4]), vadd_f32(XTx[i][1], XTx[i][2]), 4.0f);
129
130 // U[i][2] = 4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] + 1*XTx[i][4];
131 U[i][2] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][3]), vsub_f32(XTx[i][1], XTx[i][2]), 4.0f);
132
133 // U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] + 2*XTx[i][3] + 1*XTx[i][4];
134 U[i][3] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][2]), vsub_f32(XTx[i][3], XTx[i][1]), 2.0f);
135
136 // U[i][4] = 2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] + 1*XTx[i][4];
137 U[i][4] = vmla_n_f32(vsub_f32(XTx[i][4], XTx[i][2]), vsub_f32(XTx[i][1], XTx[i][3]), 2.0f);
138
139 // U[i][5] = 4*XTx[i][1] + -5*XTx[i][3] + 1*XTx[i][5];
140 U[i][5] = vmls_n_f32(vmla_n_f32(XTx[i][5], XTx[i][1], 4.0f), XTx[i][3], 5.0f);
141 }
142
143 // Store the transformed matrix
144 for (int i = 0, m = 0; i < inner_tile_rows; i++)
145 {
146 for (int j = 0; j < inner_tile_cols; j++, m++)
147 {
148 vst1_f32(outptr + m*matrix_stride, U[i][j]);
149 }
150 }
151 outptr += 2;
152 }
153 for (; channels_remaining; channels_remaining--)
154 {
155 // Load x
156 for (int i = 0; i < inner_tile_rows; i++)
157 {
158 for (int j = 0; j < inner_tile_cols; j++)
159 {
160 x[i][j] = *(x_ptrs[i][j]++);
161 }
162 }
163
164 // Compute XT . x
165 for (int j = 0; j < inner_tile_cols; j++)
166 {
167 XTx[0][j] = 4*x[0][j] + -5*x[2][j] + 1*x[4][j];
168 XTx[1][j] = -4*x[1][j] + -4*x[2][j] + 1*x[3][j] + 1*x[4][j];
169 XTx[2][j] = 4*x[1][j] + -4*x[2][j] + -1*x[3][j] + 1*x[4][j];
170 XTx[3][j] = -2*x[1][j] + -1*x[2][j] + 2*x[3][j] + 1*x[4][j];
171 XTx[4][j] = 2*x[1][j] + -1*x[2][j] + -2*x[3][j] + 1*x[4][j];
172 XTx[5][j] = 4*x[1][j] + -5*x[3][j] + 1*x[5][j];
173 }
174
175 // Compute U = XT . x . X
176 for (int i = 0; i < inner_tile_rows; i++)
177 {
178 U[i][0] = 4*XTx[i][0] + -5*XTx[i][2] + 1*XTx[i][4];
179 U[i][1] = -4*XTx[i][1] + -4*XTx[i][2] + 1*XTx[i][3] + 1*XTx[i][4];
180 U[i][2] = 4*XTx[i][1] + -4*XTx[i][2] + -1*XTx[i][3] + 1*XTx[i][4];
181 U[i][3] = -2*XTx[i][1] + -1*XTx[i][2] + 2*XTx[i][3] + 1*XTx[i][4];
182 U[i][4] = 2*XTx[i][1] + -1*XTx[i][2] + -2*XTx[i][3] + 1*XTx[i][4];
183 U[i][5] = 4*XTx[i][1] + -5*XTx[i][3] + 1*XTx[i][5];
184 }
185
186 // Store the transformed matrix
187 for (int i = 0, m = 0; i < inner_tile_rows; i++)
188 {
189 for (int j = 0; j < inner_tile_cols; j++, m++)
190 {
191 *(outptr + m*matrix_stride) = U[i][j];
192 }
193 }
194 outptr++;
195 }
196 }
197
198 } // namespace input_transform
199 } // namespace winograd
200 } // namespace arm_conv
201
202 #endif // ! __aarch64__
203