xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/vc4/vc4_tiling_lt.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2017 Broadcom
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 /** @file vc4_tiling_lt.c
25  *
26  * Helper functions from vc4_tiling.c that will be compiled for using NEON
27  * assembly or not.
28  *
29  * If V3D_BUILD_NEON is set, then the functions will be suffixed with _neon.
30  * They will only use NEON assembly if __ARM_ARCH is also set, to keep the x86
31  * sim build working.
32  */
33 
34 #include <string.h>
35 #include "pipe/p_state.h"
36 #include "vc4_tiling.h"
37 #include "broadcom/common/v3d_cpu_tiling.h"
38 
39 #ifdef V3D_BUILD_NEON
40 #define NEON_TAG(x) x ## _neon
41 #else
42 #define NEON_TAG(x) x ## _base
43 #endif
44 
45 /** Returns the stride in bytes of a 64-byte microtile. */
46 static uint32_t
vc4_utile_stride(int cpp)47 vc4_utile_stride(int cpp)
48 {
49         switch (cpp) {
50         case 1:
51                 return 8;
52         case 2:
53         case 4:
54         case 8:
55                 return 16;
56         default:
57                 unreachable("bad cpp");
58         }
59 }
60 
61 /**
62  * Returns the X value into the address bits for LT tiling.
63  *
64  * The LT tile load/stores rely on the X bits not intersecting with the Y
65  * bits.  Because of this, we have to choose to put the utile index within the
66  * LT tile into one of the two values, and we do so in swizzle_lt_x() to make
67  * NPOT handling easier.
68  */
69 static uint32_t
swizzle_lt_x(int x,int cpp)70 swizzle_lt_x(int x, int cpp)
71 {
72         switch (cpp) {
73         case 1:
74                 /* 8x8 inside of 4x4 */
75                 return (((uint32_t)x & 0x7) << (0 - 0) |
76                         ((uint32_t)x & ~0x7) << (6 - 3));
77         case 2:
78                 /* 8x4 inside of 4x4 */
79                 return (((uint32_t)x & 0x7) << (1 - 0) |
80                         ((uint32_t)x & ~0x7) << (6 - 3));
81         case 4:
82                 /* 4x4 inside of 4x4 */
83                 return (((uint32_t)x & 0x3) << (2 - 0) |
84                         ((uint32_t)x & ~0x3) << (6 - 2));
85         case 8:
86                 /* 2x4 inside of 4x4 */
87                 return (((uint32_t)x & 0x1) << (3 - 0) |
88                         ((uint32_t)x & ~0x1) << (6 - 1));
89         default:
90                 unreachable("bad cpp");
91         }
92 }
93 
94 /**
95  * Returns the Y value into the address bits for LT tiling.
96  *
97  * The LT tile load/stores rely on the X bits not intersecting with the Y
98  * bits.
99  */
100 static uint32_t
swizzle_lt_y(int y,int cpp)101 swizzle_lt_y(int y, int cpp)
102 {
103 
104         switch (cpp) {
105         case 1:
106                 /* 8x8 inside of 4x4 */
107                 return ((y & 0x7) << 3);
108         case 2:
109                 /* 8x4 inside of 4x4 */
110                 return ((y & 0x3) << 4);
111         case 4:
112                 /* 4x4 inside of 4x4 */
113                 return ((y & 0x3) << 4);
114         case 8:
115                 /* 2x4 inside of 4x4 */
116                 return ((y & 0x3) << 4);
117         default:
118                 unreachable("bad cpp");
119         }
120 }
121 
122 /**
123  * Helper for loading or storing to an LT image, where the box is aligned
124  * to utiles.
125  *
126  * This just breaks the box down into calls to the fast
127  * vc4_load_utile/vc4_store_utile helpers.
128  */
129 static inline void
vc4_lt_image_aligned(void * gpu,uint32_t gpu_stride,void * cpu,uint32_t cpu_stride,int cpp,const struct pipe_box * box,bool to_cpu)130 vc4_lt_image_aligned(void *gpu, uint32_t gpu_stride,
131                      void *cpu, uint32_t cpu_stride,
132                      int cpp, const struct pipe_box *box, bool to_cpu)
133 {
134         uint32_t utile_w = vc4_utile_width(cpp);
135         uint32_t utile_h = vc4_utile_height(cpp);
136         uint32_t utile_stride = vc4_utile_stride(cpp);
137         uint32_t xstart = box->x;
138         uint32_t ystart = box->y;
139 
140         for (uint32_t y = 0; y < box->height; y += utile_h) {
141                 for (uint32_t x = 0; x < box->width; x += utile_w) {
142                         void *gpu_tile = gpu + ((ystart + y) * gpu_stride +
143                                                 (xstart + x) * 64 / utile_w);
144                         if (to_cpu) {
145                                 v3d_load_utile(cpu + (cpu_stride * y +
146                                                       x * cpp),
147                                                cpu_stride,
148                                                gpu_tile,
149                                                utile_stride);
150                         } else {
151                                 v3d_store_utile(gpu_tile,
152                                                 utile_stride,
153                                                 cpu + (cpu_stride * y +
154                                                        x * cpp),
155                                                 cpu_stride);
156                         }
157                 }
158         }
159 }
160 
161 /**
162  * Helper for loading or storing to an LT image, where the box is not aligned
163  * to utiles.
164  *
165  * This walks through the raster-order data, copying to/from the corresponding
166  * tiled pixel.  This means we don't get write-combining on stores, but the
167  * loop is very few CPU instructions since the memcpy will be inlined.
168  */
169 static inline void
vc4_lt_image_unaligned(void * gpu,uint32_t gpu_stride,void * cpu,uint32_t cpu_stride,int cpp,const struct pipe_box * box,bool to_cpu)170 vc4_lt_image_unaligned(void *gpu, uint32_t gpu_stride,
171                        void *cpu, uint32_t cpu_stride,
172                        int cpp, const struct pipe_box *box, bool to_cpu)
173 {
174 
175         /* These are the address bits for the start of the box, split out into
176          * x/y so that they can be incremented separately in their loops.
177          */
178         uint32_t offs_x0 = swizzle_lt_x(box->x, cpp);
179         uint32_t offs_y = swizzle_lt_y(box->y, cpp);
180         /* The *_mask values are "what bits of the address are from x or y" */
181         uint32_t x_mask = swizzle_lt_x(~0, cpp);
182         uint32_t y_mask = swizzle_lt_y(~0, cpp);
183         uint32_t incr_y = swizzle_lt_x(gpu_stride / cpp, cpp);
184 
185         assert(!(x_mask & y_mask));
186 
187         offs_x0 += incr_y * (box->y / vc4_utile_height(cpp));
188 
189         for (uint32_t y = 0; y < box->height; y++) {
190                 void *gpu_row = gpu + offs_y;
191 
192                 uint32_t offs_x = offs_x0;
193 
194                 for (uint32_t x = 0; x < box->width; x++) {
195                         /* Use a memcpy here to move a pixel's worth of data.
196                          * We're relying on this function to be inlined, so
197                          * this will get expanded into the appropriate 1, 2,
198                          * or 4-byte move.
199                          */
200                         if (to_cpu) {
201                                 memcpy(cpu + x * cpp, gpu_row + offs_x, cpp);
202                         } else {
203                                 memcpy(gpu_row + offs_x, cpu + x * cpp, cpp);
204                         }
205 
206                         /* This math trick with x_mask increments offs_x by 1
207                          * in x.
208                          */
209                         offs_x = (offs_x - x_mask) & x_mask;
210                 }
211 
212                 offs_y = (offs_y - y_mask) & y_mask;
213                 /* When offs_y wraps (we hit the end of the utile), we
214                  * increment offs_x0 by effectively the utile stride.
215                  */
216                 if (!offs_y)
217                         offs_x0 += incr_y;
218 
219                 cpu += cpu_stride;
220         }
221 }
222 
223 /**
224  * General LT image load/store helper.
225  */
226 static inline void
vc4_lt_image_helper(void * gpu,uint32_t gpu_stride,void * cpu,uint32_t cpu_stride,int cpp,const struct pipe_box * box,bool to_cpu)227 vc4_lt_image_helper(void *gpu, uint32_t gpu_stride,
228                     void *cpu, uint32_t cpu_stride,
229                     int cpp, const struct pipe_box *box, bool to_cpu)
230 {
231         if (box->x & (vc4_utile_width(cpp) - 1) ||
232             box->y & (vc4_utile_height(cpp) - 1) ||
233             box->width & (vc4_utile_width(cpp) - 1) ||
234             box->height & (vc4_utile_height(cpp) - 1)) {
235                 vc4_lt_image_unaligned(gpu, gpu_stride,
236                                        cpu, cpu_stride,
237                                        cpp, box, to_cpu);
238         } else {
239                 vc4_lt_image_aligned(gpu, gpu_stride,
240                                      cpu, cpu_stride,
241                                      cpp, box, to_cpu);
242         }
243 }
244 
245 static inline void
vc4_lt_image_cpp_helper(void * gpu,uint32_t gpu_stride,void * cpu,uint32_t cpu_stride,int cpp,const struct pipe_box * box,bool to_cpu)246 vc4_lt_image_cpp_helper(void *gpu, uint32_t gpu_stride,
247                         void *cpu, uint32_t cpu_stride,
248                         int cpp, const struct pipe_box *box, bool to_cpu)
249 {
250         switch (cpp) {
251         case 1:
252                 vc4_lt_image_helper(gpu, gpu_stride, cpu, cpu_stride, 1, box,
253                                     to_cpu);
254                 break;
255         case 2:
256                 vc4_lt_image_helper(gpu, gpu_stride, cpu, cpu_stride, 2, box,
257                                     to_cpu);
258                 break;
259         case 4:
260                 vc4_lt_image_helper(gpu, gpu_stride, cpu, cpu_stride, 4, box,
261                                     to_cpu);
262                 break;
263         case 8:
264                 vc4_lt_image_helper(gpu, gpu_stride, cpu, cpu_stride, 8, box,
265                                     to_cpu);
266                 break;
267         default:
268                 unreachable("bad cpp");
269         }
270 }
271 
272 void
NEON_TAG(vc4_load_lt_image)273 NEON_TAG(vc4_load_lt_image)(void *dst, uint32_t dst_stride,
274                             void *src, uint32_t src_stride,
275                             int cpp, const struct pipe_box *box)
276 {
277         vc4_lt_image_cpp_helper(src, src_stride, dst, dst_stride, cpp, box,
278                                 true);
279 }
280 
281 void
NEON_TAG(vc4_store_lt_image)282 NEON_TAG(vc4_store_lt_image)(void *dst, uint32_t dst_stride,
283                              void *src, uint32_t src_stride,
284                              int cpp, const struct pipe_box *box)
285 {
286         vc4_lt_image_cpp_helper(dst, dst_stride, src, src_stride, cpp, box,
287                                 false);
288 }
289