xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/freedreno/a5xx/fd5_gmem.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2016 Rob Clark <[email protected]>
3  * SPDX-License-Identifier: MIT
4  *
5  * Authors:
6  *    Rob Clark <[email protected]>
7  */
8 
9 #include "pipe/p_state.h"
10 #include "util/format/u_format.h"
11 #include "util/u_inlines.h"
12 #include "util/u_memory.h"
13 #include "util/u_string.h"
14 
15 #include "freedreno_draw.h"
16 #include "freedreno_resource.h"
17 #include "freedreno_state.h"
18 
19 #include "fd5_context.h"
20 #include "fd5_draw.h"
21 #include "fd5_emit.h"
22 #include "fd5_format.h"
23 #include "fd5_gmem.h"
24 #include "fd5_program.h"
25 #include "fd5_zsa.h"
26 
27 static void
emit_mrt(struct fd_ringbuffer * ring,unsigned nr_bufs,struct pipe_surface ** bufs,const struct fd_gmem_stateobj * gmem)28 emit_mrt(struct fd_ringbuffer *ring, unsigned nr_bufs,
29          struct pipe_surface **bufs, const struct fd_gmem_stateobj *gmem)
30 {
31    enum a5xx_tile_mode tile_mode;
32    unsigned i;
33 
34    for (i = 0; i < A5XX_MAX_RENDER_TARGETS; i++) {
35       enum a5xx_color_fmt format = 0;
36       enum a3xx_color_swap swap = WZYX;
37       bool srgb = false, sint = false, uint = false;
38       struct fd_resource *rsc = NULL;
39       uint32_t stride = 0;
40       uint32_t size = 0;
41       uint32_t base = 0;
42       uint32_t offset = 0;
43 
44       if (gmem) {
45          tile_mode = TILE5_2;
46       } else {
47          tile_mode = TILE5_LINEAR;
48       }
49 
50       if ((i < nr_bufs) && bufs[i]) {
51          struct pipe_surface *psurf = bufs[i];
52          enum pipe_format pformat = psurf->format;
53 
54          rsc = fd_resource(psurf->texture);
55 
56          format = fd5_pipe2color(pformat);
57          swap = fd5_pipe2swap(pformat);
58          srgb = util_format_is_srgb(pformat);
59          sint = util_format_is_pure_sint(pformat);
60          uint = util_format_is_pure_uint(pformat);
61 
62          assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
63 
64          offset = fd_resource_offset(rsc, psurf->u.tex.level,
65                                      psurf->u.tex.first_layer);
66 
67          if (gmem) {
68             stride = gmem->bin_w * gmem->cbuf_cpp[i];
69             size = stride * gmem->bin_h;
70             base = gmem->cbuf_base[i];
71          } else {
72             stride = fd_resource_pitch(rsc, psurf->u.tex.level);
73             size = fd_resource_layer_stride(rsc, psurf->u.tex.level);
74 
75             tile_mode =
76                fd_resource_tile_mode(psurf->texture, psurf->u.tex.level);
77          }
78       }
79 
80       OUT_PKT4(ring, REG_A5XX_RB_MRT_BUF_INFO(i), 5);
81       OUT_RING(
82          ring,
83          A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT(format) |
84             A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(tile_mode) |
85             A5XX_RB_MRT_BUF_INFO_COLOR_SWAP(swap) |
86             COND(gmem,
87                  0x800) | /* XXX 0x1000 for RECTLIST clear, 0x0 for BLIT.. */
88             COND(srgb, A5XX_RB_MRT_BUF_INFO_COLOR_SRGB));
89       OUT_RING(ring, A5XX_RB_MRT_PITCH(stride));
90       OUT_RING(ring, A5XX_RB_MRT_ARRAY_PITCH(size));
91       if (gmem || (i >= nr_bufs) || !bufs[i]) {
92          OUT_RING(ring, base);       /* RB_MRT[i].BASE_LO */
93          OUT_RING(ring, 0x00000000); /* RB_MRT[i].BASE_HI */
94       } else {
95          OUT_RELOC(ring, rsc->bo, offset, 0, 0); /* BASE_LO/HI */
96       }
97 
98       OUT_PKT4(ring, REG_A5XX_SP_FS_MRT_REG(i), 1);
99       OUT_RING(ring, A5XX_SP_FS_MRT_REG_COLOR_FORMAT(format) |
100                         COND(sint, A5XX_SP_FS_MRT_REG_COLOR_SINT) |
101                         COND(uint, A5XX_SP_FS_MRT_REG_COLOR_UINT) |
102                         COND(srgb, A5XX_SP_FS_MRT_REG_COLOR_SRGB));
103 
104       /* when we support UBWC, these would be the system memory
105        * addr/pitch/etc:
106        */
107       OUT_PKT4(ring, REG_A5XX_RB_MRT_FLAG_BUFFER(i), 4);
108       OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_LO */
109       OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_HI */
110       OUT_RING(ring, A5XX_RB_MRT_FLAG_BUFFER_PITCH(0));
111       OUT_RING(ring, A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(0));
112    }
113 }
114 
115 static void
emit_zs(struct fd_ringbuffer * ring,struct pipe_surface * zsbuf,const struct fd_gmem_stateobj * gmem)116 emit_zs(struct fd_ringbuffer *ring, struct pipe_surface *zsbuf,
117         const struct fd_gmem_stateobj *gmem)
118 {
119    if (zsbuf) {
120       struct fd_resource *rsc = fd_resource(zsbuf->texture);
121       enum a5xx_depth_format fmt = fd5_pipe2depth(zsbuf->format);
122       uint32_t cpp = rsc->layout.cpp;
123       uint32_t stride = 0;
124       uint32_t size = 0;
125 
126       if (gmem) {
127          stride = cpp * gmem->bin_w;
128          size = stride * gmem->bin_h;
129       } else {
130          stride = fd_resource_pitch(rsc, zsbuf->u.tex.level);
131          size = fd_resource_layer_stride(rsc, zsbuf->u.tex.level);
132       }
133 
134       OUT_PKT4(ring, REG_A5XX_RB_DEPTH_BUFFER_INFO, 5);
135       OUT_RING(ring, A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt));
136       if (gmem) {
137          OUT_RING(ring, gmem->zsbuf_base[0]); /* RB_DEPTH_BUFFER_BASE_LO */
138          OUT_RING(ring, 0x00000000);          /* RB_DEPTH_BUFFER_BASE_HI */
139       } else {
140          OUT_RELOC(ring, rsc->bo,
141             fd_resource_offset(rsc, zsbuf->u.tex.level, zsbuf->u.tex.first_layer),
142             0, 0); /* RB_DEPTH_BUFFER_BASE_LO/HI */
143       }
144       OUT_RING(ring, A5XX_RB_DEPTH_BUFFER_PITCH(stride));
145       OUT_RING(ring, A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH(size));
146 
147       OUT_PKT4(ring, REG_A5XX_GRAS_SU_DEPTH_BUFFER_INFO, 1);
148       OUT_RING(ring, A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(fmt));
149 
150       OUT_PKT4(ring, REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_LO, 3);
151       OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
152       OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
153       OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_PITCH */
154 
155       if (rsc->lrz) {
156          OUT_PKT4(ring, REG_A5XX_GRAS_LRZ_BUFFER_BASE_LO, 3);
157          OUT_RELOC(ring, rsc->lrz, 0x1000, 0, 0);
158          OUT_RING(ring, A5XX_GRAS_LRZ_BUFFER_PITCH(rsc->lrz_pitch));
159 
160          OUT_PKT4(ring, REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO, 2);
161          OUT_RELOC(ring, rsc->lrz, 0, 0, 0);
162       } else {
163          OUT_PKT4(ring, REG_A5XX_GRAS_LRZ_BUFFER_BASE_LO, 3);
164          OUT_RING(ring, 0x00000000);
165          OUT_RING(ring, 0x00000000);
166          OUT_RING(ring, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
167 
168          OUT_PKT4(ring, REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO, 2);
169          OUT_RING(ring, 0x00000000);
170          OUT_RING(ring, 0x00000000);
171       }
172 
173       if (rsc->stencil) {
174          if (gmem) {
175             stride = 1 * gmem->bin_w;
176             size = stride * gmem->bin_h;
177          } else {
178             stride = fd_resource_pitch(rsc->stencil, zsbuf->u.tex.level);
179             size = fd_resource_layer_stride(rsc, zsbuf->u.tex.level);
180          }
181 
182          OUT_PKT4(ring, REG_A5XX_RB_STENCIL_INFO, 5);
183          OUT_RING(ring, A5XX_RB_STENCIL_INFO_SEPARATE_STENCIL);
184          if (gmem) {
185             OUT_RING(ring, gmem->zsbuf_base[1]); /* RB_STENCIL_BASE_LO */
186             OUT_RING(ring, 0x00000000);          /* RB_STENCIL_BASE_HI */
187          } else {
188             OUT_RELOC(ring, rsc->stencil->bo,
189                fd_resource_offset(rsc->stencil, zsbuf->u.tex.level, zsbuf->u.tex.first_layer),
190                       0, 0); /* RB_STENCIL_BASE_LO/HI */
191          }
192          OUT_RING(ring, A5XX_RB_STENCIL_PITCH(stride));
193          OUT_RING(ring, A5XX_RB_STENCIL_ARRAY_PITCH(size));
194       } else {
195          OUT_PKT4(ring, REG_A5XX_RB_STENCIL_INFO, 1);
196          OUT_RING(ring, 0x00000000); /* RB_STENCIL_INFO */
197       }
198    } else {
199       OUT_PKT4(ring, REG_A5XX_RB_DEPTH_BUFFER_INFO, 5);
200       OUT_RING(ring, A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH5_NONE));
201       OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_LO */
202       OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_HI */
203       OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_PITCH */
204       OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_ARRAY_PITCH */
205 
206       OUT_PKT4(ring, REG_A5XX_GRAS_SU_DEPTH_BUFFER_INFO, 1);
207       OUT_RING(ring, A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH5_NONE));
208 
209       OUT_PKT4(ring, REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_LO, 3);
210       OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
211       OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
212       OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_PITCH */
213 
214       OUT_PKT4(ring, REG_A5XX_RB_STENCIL_INFO, 1);
215       OUT_RING(ring, 0x00000000); /* RB_STENCIL_INFO */
216    }
217 }
218 
219 static void
emit_msaa(struct fd_ringbuffer * ring,uint32_t nr_samples)220 emit_msaa(struct fd_ringbuffer *ring, uint32_t nr_samples)
221 {
222    enum a3xx_msaa_samples samples = fd_msaa_samples(nr_samples);
223 
224    OUT_PKT4(ring, REG_A5XX_TPL1_TP_RAS_MSAA_CNTL, 2);
225    OUT_RING(ring, A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES(samples));
226    OUT_RING(ring, A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES(samples) |
227                      COND(samples == MSAA_ONE,
228                           A5XX_TPL1_TP_DEST_MSAA_CNTL_MSAA_DISABLE));
229 
230    OUT_PKT4(ring, REG_A5XX_RB_RAS_MSAA_CNTL, 2);
231    OUT_RING(ring, A5XX_RB_RAS_MSAA_CNTL_SAMPLES(samples));
232    OUT_RING(ring,
233             A5XX_RB_DEST_MSAA_CNTL_SAMPLES(samples) |
234                COND(samples == MSAA_ONE, A5XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE));
235 
236    OUT_PKT4(ring, REG_A5XX_GRAS_SC_RAS_MSAA_CNTL, 2);
237    OUT_RING(ring, A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES(samples));
238    OUT_RING(ring, A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES(samples) |
239                      COND(samples == MSAA_ONE,
240                           A5XX_GRAS_SC_DEST_MSAA_CNTL_MSAA_DISABLE));
241 }
242 
243 static bool
use_hw_binning(struct fd_batch * batch)244 use_hw_binning(struct fd_batch *batch)
245 {
246    const struct fd_gmem_stateobj *gmem = batch->gmem_state;
247 
248    /* workaround: Like on a3xx, hw binning and scissor optimization
249     * don't play nice together.
250     *
251     * Disable binning if scissor optimization is used.
252     */
253    if (gmem->minx || gmem->miny)
254       return false;
255 
256    if ((gmem->maxpw * gmem->maxph) > 32)
257       return false;
258 
259    if ((gmem->maxpw > 15) || (gmem->maxph > 15))
260       return false;
261 
262    return fd_binning_enabled && ((gmem->nbins_x * gmem->nbins_y) > 2) &&
263           (batch->num_draws > 0);
264 }
265 
266 static void
patch_draws(struct fd_batch * batch,enum pc_di_vis_cull_mode vismode)267 patch_draws(struct fd_batch *batch, enum pc_di_vis_cull_mode vismode)
268 {
269    unsigned i;
270    for (i = 0; i < fd_patch_num_elements(&batch->draw_patches); i++) {
271       struct fd_cs_patch *patch = fd_patch_element(&batch->draw_patches, i);
272       *patch->cs = patch->val | DRAW4(0, 0, 0, vismode);
273    }
274    util_dynarray_clear(&batch->draw_patches);
275 }
276 
277 static void
update_vsc_pipe(struct fd_batch * batch)278 update_vsc_pipe(struct fd_batch *batch) assert_dt
279 {
280    struct fd_context *ctx = batch->ctx;
281    struct fd5_context *fd5_ctx = fd5_context(ctx);
282    const struct fd_gmem_stateobj *gmem = batch->gmem_state;
283    struct fd_ringbuffer *ring = batch->gmem;
284    int i;
285 
286    OUT_PKT4(ring, REG_A5XX_VSC_BIN_SIZE, 3);
287    OUT_RING(ring, A5XX_VSC_BIN_SIZE_WIDTH(gmem->bin_w) |
288                      A5XX_VSC_BIN_SIZE_HEIGHT(gmem->bin_h));
289    OUT_RELOC(ring, fd5_ctx->vsc_size_mem, 0, 0, 0); /* VSC_SIZE_ADDRESS_LO/HI */
290 
291    OUT_PKT4(ring, REG_A5XX_UNKNOWN_0BC5, 2);
292    OUT_RING(ring, 0x00000000); /* UNKNOWN_0BC5 */
293    OUT_RING(ring, 0x00000000); /* UNKNOWN_0BC6 */
294 
295    OUT_PKT4(ring, REG_A5XX_VSC_PIPE_CONFIG_REG(0), 16);
296    for (i = 0; i < 16; i++) {
297       const struct fd_vsc_pipe *pipe = &gmem->vsc_pipe[i];
298       OUT_RING(ring, A5XX_VSC_PIPE_CONFIG_REG_X(pipe->x) |
299                         A5XX_VSC_PIPE_CONFIG_REG_Y(pipe->y) |
300                         A5XX_VSC_PIPE_CONFIG_REG_W(pipe->w) |
301                         A5XX_VSC_PIPE_CONFIG_REG_H(pipe->h));
302    }
303 
304    OUT_PKT4(ring, REG_A5XX_VSC_PIPE_DATA_ADDRESS_LO(0), 32);
305    for (i = 0; i < 16; i++) {
306       if (!ctx->vsc_pipe_bo[i]) {
307          ctx->vsc_pipe_bo[i] = fd_bo_new(
308             ctx->dev, 0x20000, 0, "vsc_pipe[%u]", i);
309       }
310       OUT_RELOC(ring, ctx->vsc_pipe_bo[i], 0, 0,
311                 0); /* VSC_PIPE_DATA_ADDRESS[i].LO/HI */
312    }
313 
314    OUT_PKT4(ring, REG_A5XX_VSC_PIPE_DATA_LENGTH_REG(0), 16);
315    for (i = 0; i < 16; i++) {
316       OUT_RING(ring, fd_bo_size(ctx->vsc_pipe_bo[i]) -
317                         32); /* VSC_PIPE_DATA_LENGTH[i] */
318    }
319 }
320 
321 static void
emit_binning_pass(struct fd_batch * batch)322 emit_binning_pass(struct fd_batch *batch) assert_dt
323 {
324    struct fd_ringbuffer *ring = batch->gmem;
325    const struct fd_gmem_stateobj *gmem = batch->gmem_state;
326 
327    uint32_t x1 = gmem->minx;
328    uint32_t y1 = gmem->miny;
329    uint32_t x2 = gmem->minx + gmem->width - 1;
330    uint32_t y2 = gmem->miny + gmem->height - 1;
331 
332    fd5_set_render_mode(batch->ctx, ring, BINNING);
333 
334    OUT_PKT4(ring, REG_A5XX_RB_CNTL, 1);
335    OUT_RING(ring,
336             A5XX_RB_CNTL_WIDTH(gmem->bin_w) | A5XX_RB_CNTL_HEIGHT(gmem->bin_h));
337 
338    OUT_PKT4(ring, REG_A5XX_GRAS_SC_WINDOW_SCISSOR_TL, 2);
339    OUT_RING(ring, A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X(x1) |
340                      A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(y1));
341    OUT_RING(ring, A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X(x2) |
342                      A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(y2));
343 
344    OUT_PKT4(ring, REG_A5XX_RB_RESOLVE_CNTL_1, 2);
345    OUT_RING(ring, A5XX_RB_RESOLVE_CNTL_1_X(x1) | A5XX_RB_RESOLVE_CNTL_1_Y(y1));
346    OUT_RING(ring, A5XX_RB_RESOLVE_CNTL_2_X(x2) | A5XX_RB_RESOLVE_CNTL_2_Y(y2));
347 
348    update_vsc_pipe(batch);
349 
350    OUT_PKT4(ring, REG_A5XX_VPC_MODE_CNTL, 1);
351    OUT_RING(ring, A5XX_VPC_MODE_CNTL_BINNING_PASS);
352 
353    fd5_event_write(batch, ring, UNK_2C, false);
354 
355    OUT_PKT4(ring, REG_A5XX_RB_WINDOW_OFFSET, 1);
356    OUT_RING(ring, A5XX_RB_WINDOW_OFFSET_X(0) | A5XX_RB_WINDOW_OFFSET_Y(0));
357 
358    /* emit IB to binning drawcmds: */
359    fd5_emit_ib(ring, batch->binning);
360 
361    fd_reset_wfi(batch);
362 
363    fd5_event_write(batch, ring, UNK_2D, false);
364 
365    fd5_event_write(batch, ring, CACHE_FLUSH_TS, true);
366 
367    // TODO CP_COND_WRITE's for all the vsc buffers (check for overflow??)
368 
369    fd_wfi(batch, ring);
370 
371    OUT_PKT4(ring, REG_A5XX_VPC_MODE_CNTL, 1);
372    OUT_RING(ring, 0x0);
373 }
374 
375 /* before first tile */
376 static void
fd5_emit_tile_init(struct fd_batch * batch)377 fd5_emit_tile_init(struct fd_batch *batch) assert_dt
378 {
379    struct fd_context *ctx = batch->ctx;
380    struct fd_ringbuffer *ring = batch->gmem;
381    struct pipe_framebuffer_state *pfb = &batch->framebuffer;
382 
383    fd5_emit_restore(batch, ring);
384 
385    if (batch->prologue)
386       fd5_emit_ib(ring, batch->prologue);
387 
388    fd5_emit_lrz_flush(batch, ring);
389 
390    OUT_PKT4(ring, REG_A5XX_GRAS_CL_CNTL, 1);
391    OUT_RING(ring, 0x00000080); /* GRAS_CL_CNTL */
392 
393    OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
394    OUT_RING(ring, 0x0);
395 
396    OUT_PKT4(ring, REG_A5XX_PC_POWER_CNTL, 1);
397    OUT_RING(ring, ctx->screen->info->num_sp_cores - 1); /* PC_POWER_CNTL */
398 
399    OUT_PKT4(ring, REG_A5XX_VFD_POWER_CNTL, 1);
400    OUT_RING(ring, ctx->screen->info->num_sp_cores - 1); /* VFD_POWER_CNTL */
401 
402    /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
403    fd_wfi(batch, ring);
404    OUT_PKT4(ring, REG_A5XX_RB_CCU_CNTL, 1);
405    OUT_RING(ring, 0x7c13c080); /* RB_CCU_CNTL */
406 
407    emit_zs(ring, pfb->zsbuf, batch->gmem_state);
408    emit_mrt(ring, pfb->nr_cbufs, pfb->cbufs, batch->gmem_state);
409 
410    /* Enable stream output for the first pass (likely the binning). */
411    OUT_PKT4(ring, REG_A5XX_VPC_SO_OVERRIDE, 1);
412    OUT_RING(ring, 0);
413 
414    if (use_hw_binning(batch)) {
415       emit_binning_pass(batch);
416 
417       /* Disable stream output after binning, since each VS output should get
418        * streamed out once.
419        */
420       OUT_PKT4(ring, REG_A5XX_VPC_SO_OVERRIDE, 1);
421       OUT_RING(ring, A5XX_VPC_SO_OVERRIDE_SO_DISABLE);
422 
423       fd5_emit_lrz_flush(batch, ring);
424       patch_draws(batch, USE_VISIBILITY);
425    } else {
426       patch_draws(batch, IGNORE_VISIBILITY);
427    }
428 
429    fd5_set_render_mode(batch->ctx, ring, GMEM);
430 
431    /* XXX If we're in gmem mode but not doing HW binning, then after the first
432     * tile we should disable stream output (fd6_gmem.c doesn't do that either).
433     */
434 }
435 
436 /* before mem2gmem */
437 static void
fd5_emit_tile_prep(struct fd_batch * batch,const struct fd_tile * tile)438 fd5_emit_tile_prep(struct fd_batch *batch, const struct fd_tile *tile) assert_dt
439 {
440    struct fd_context *ctx = batch->ctx;
441    const struct fd_gmem_stateobj *gmem = batch->gmem_state;
442    struct fd5_context *fd5_ctx = fd5_context(ctx);
443    struct fd_ringbuffer *ring = batch->gmem;
444 
445    uint32_t x1 = tile->xoff;
446    uint32_t y1 = tile->yoff;
447    uint32_t x2 = tile->xoff + tile->bin_w - 1;
448    uint32_t y2 = tile->yoff + tile->bin_h - 1;
449 
450    OUT_PKT4(ring, REG_A5XX_GRAS_SC_WINDOW_SCISSOR_TL, 2);
451    OUT_RING(ring, A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X(x1) |
452                      A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(y1));
453    OUT_RING(ring, A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X(x2) |
454                      A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(y2));
455 
456    OUT_PKT4(ring, REG_A5XX_RB_RESOLVE_CNTL_1, 2);
457    OUT_RING(ring, A5XX_RB_RESOLVE_CNTL_1_X(x1) | A5XX_RB_RESOLVE_CNTL_1_Y(y1));
458    OUT_RING(ring, A5XX_RB_RESOLVE_CNTL_2_X(x2) | A5XX_RB_RESOLVE_CNTL_2_Y(y2));
459 
460    if (use_hw_binning(batch)) {
461       const struct fd_vsc_pipe *pipe = &gmem->vsc_pipe[tile->p];
462       struct fd_bo *pipe_bo = ctx->vsc_pipe_bo[tile->p];
463 
464       OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
465 
466       OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
467       OUT_RING(ring, 0x0);
468 
469       OUT_PKT7(ring, CP_SET_BIN_DATA5, 5);
470       OUT_RING(ring, CP_SET_BIN_DATA5_0_VSC_SIZE(pipe->w * pipe->h) |
471                         CP_SET_BIN_DATA5_0_VSC_N(tile->n));
472       OUT_RELOC(ring, pipe_bo, 0, 0, 0);     /* VSC_PIPE[p].DATA_ADDRESS */
473       OUT_RELOC(ring, fd5_ctx->vsc_size_mem, /* VSC_SIZE_ADDRESS + (p * 4) */
474                 (tile->p * 4), 0, 0);
475    } else {
476       OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
477       OUT_RING(ring, 0x1);
478    }
479 
480    OUT_PKT4(ring, REG_A5XX_RB_WINDOW_OFFSET, 1);
481    OUT_RING(ring, A5XX_RB_WINDOW_OFFSET_X(x1) | A5XX_RB_WINDOW_OFFSET_Y(y1));
482 }
483 
484 /*
485  * transfer from system memory to gmem
486  */
487 
488 static void
emit_mem2gmem_surf(struct fd_batch * batch,uint32_t base,struct pipe_surface * psurf,enum a5xx_blit_buf buf)489 emit_mem2gmem_surf(struct fd_batch *batch, uint32_t base,
490                    struct pipe_surface *psurf, enum a5xx_blit_buf buf)
491 {
492    struct fd_ringbuffer *ring = batch->gmem;
493    const struct fd_gmem_stateobj *gmem = batch->gmem_state;
494    struct fd_resource *rsc = fd_resource(psurf->texture);
495    uint32_t stride, size;
496 
497    assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
498 
499    if (buf == BLIT_S)
500       rsc = rsc->stencil;
501 
502    if ((buf == BLIT_ZS) || (buf == BLIT_S)) {
503       // XXX hack import via BLIT_MRT0 instead of BLIT_ZS, since I don't
504       // know otherwise how to go from linear in sysmem to tiled in gmem.
505       // possibly we want to flip this around gmem2mem and keep depth
506       // tiled in sysmem (and fixup sampler state to assume tiled).. this
507       // might be required for doing depth/stencil in bypass mode?
508       enum a5xx_color_fmt format =
509          fd5_pipe2color(fd_gmem_restore_format(rsc->b.b.format));
510 
511       OUT_PKT4(ring, REG_A5XX_RB_MRT_BUF_INFO(0), 5);
512       OUT_RING(ring,
513                A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT(format) |
514                   A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(rsc->layout.tile_mode) |
515                   A5XX_RB_MRT_BUF_INFO_COLOR_SWAP(WZYX));
516       OUT_RING(ring, A5XX_RB_MRT_PITCH(fd_resource_pitch(rsc, psurf->u.tex.level)));
517       OUT_RING(ring, A5XX_RB_MRT_ARRAY_PITCH(fd_resource_layer_stride(rsc, psurf->u.tex.level)));
518       OUT_RELOC(ring, rsc->bo,
519          fd_resource_offset(rsc, psurf->u.tex.level, psurf->u.tex.first_layer),
520          0, 0); /* BASE_LO/HI */
521 
522       buf = BLIT_MRT0;
523    }
524 
525    stride = gmem->bin_w << fdl_cpp_shift(&rsc->layout);
526    size = stride * gmem->bin_h;
527 
528    OUT_PKT4(ring, REG_A5XX_RB_BLIT_FLAG_DST_LO, 4);
529    OUT_RING(ring, 0x00000000); /* RB_BLIT_FLAG_DST_LO */
530    OUT_RING(ring, 0x00000000); /* RB_BLIT_FLAG_DST_HI */
531    OUT_RING(ring, 0x00000000); /* RB_BLIT_FLAG_DST_PITCH */
532    OUT_RING(ring, 0x00000000); /* RB_BLIT_FLAG_DST_ARRAY_PITCH */
533 
534    OUT_PKT4(ring, REG_A5XX_RB_RESOLVE_CNTL_3, 5);
535    OUT_RING(ring, 0x00000000); /* RB_RESOLVE_CNTL_3 */
536    OUT_RING(ring, base);       /* RB_BLIT_DST_LO */
537    OUT_RING(ring, 0x00000000); /* RB_BLIT_DST_HI */
538    OUT_RING(ring, A5XX_RB_BLIT_DST_PITCH(stride));
539    OUT_RING(ring, A5XX_RB_BLIT_DST_ARRAY_PITCH(size));
540 
541    OUT_PKT4(ring, REG_A5XX_RB_BLIT_CNTL, 1);
542    OUT_RING(ring, A5XX_RB_BLIT_CNTL_BUF(buf));
543 
544    fd5_emit_blit(batch, ring);
545 }
546 
547 static void
fd5_emit_tile_mem2gmem(struct fd_batch * batch,const struct fd_tile * tile)548 fd5_emit_tile_mem2gmem(struct fd_batch *batch, const struct fd_tile *tile)
549 {
550    struct fd_ringbuffer *ring = batch->gmem;
551    const struct fd_gmem_stateobj *gmem = batch->gmem_state;
552    struct pipe_framebuffer_state *pfb = &batch->framebuffer;
553 
554    /*
555     * setup mrt and zs with system memory base addresses:
556     */
557 
558    emit_mrt(ring, pfb->nr_cbufs, pfb->cbufs, NULL);
559    //	emit_zs(ring, pfb->zsbuf, NULL);
560 
561    OUT_PKT4(ring, REG_A5XX_RB_CNTL, 1);
562    OUT_RING(ring, A5XX_RB_CNTL_WIDTH(gmem->bin_w) |
563                      A5XX_RB_CNTL_HEIGHT(gmem->bin_h) | A5XX_RB_CNTL_BYPASS);
564 
565    if (fd_gmem_needs_restore(batch, tile, FD_BUFFER_COLOR)) {
566       unsigned i;
567       for (i = 0; i < pfb->nr_cbufs; i++) {
568          if (!pfb->cbufs[i])
569             continue;
570          if (!(batch->restore & (PIPE_CLEAR_COLOR0 << i)))
571             continue;
572          emit_mem2gmem_surf(batch, gmem->cbuf_base[i], pfb->cbufs[i],
573                             BLIT_MRT0 + i);
574       }
575    }
576 
577    if (fd_gmem_needs_restore(batch, tile,
578                              FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
579       struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
580 
581       if (!rsc->stencil || fd_gmem_needs_restore(batch, tile, FD_BUFFER_DEPTH))
582          emit_mem2gmem_surf(batch, gmem->zsbuf_base[0], pfb->zsbuf, BLIT_ZS);
583       if (rsc->stencil && fd_gmem_needs_restore(batch, tile, FD_BUFFER_STENCIL))
584          emit_mem2gmem_surf(batch, gmem->zsbuf_base[1], pfb->zsbuf, BLIT_S);
585    }
586 }
587 
588 /* before IB to rendering cmds: */
589 static void
fd5_emit_tile_renderprep(struct fd_batch * batch,const struct fd_tile * tile)590 fd5_emit_tile_renderprep(struct fd_batch *batch, const struct fd_tile *tile)
591 {
592    struct fd_ringbuffer *ring = batch->gmem;
593    const struct fd_gmem_stateobj *gmem = batch->gmem_state;
594    struct pipe_framebuffer_state *pfb = &batch->framebuffer;
595 
596    OUT_PKT4(ring, REG_A5XX_RB_CNTL, 1);
597    OUT_RING(ring,
598             A5XX_RB_CNTL_WIDTH(gmem->bin_w) | A5XX_RB_CNTL_HEIGHT(gmem->bin_h));
599 
600    emit_zs(ring, pfb->zsbuf, gmem);
601    emit_mrt(ring, pfb->nr_cbufs, pfb->cbufs, gmem);
602    emit_msaa(ring, pfb->samples);
603 }
604 
605 /*
606  * transfer from gmem to system memory (ie. normal RAM)
607  */
608 
609 static void
emit_gmem2mem_surf(struct fd_batch * batch,uint32_t base,struct pipe_surface * psurf,enum a5xx_blit_buf buf)610 emit_gmem2mem_surf(struct fd_batch *batch, uint32_t base,
611                    struct pipe_surface *psurf, enum a5xx_blit_buf buf)
612 {
613    struct fd_ringbuffer *ring = batch->gmem;
614    struct fd_resource *rsc = fd_resource(psurf->texture);
615    bool tiled;
616    uint32_t offset, pitch;
617 
618    if (!rsc->valid)
619       return;
620 
621    if (buf == BLIT_S)
622       rsc = rsc->stencil;
623 
624    offset =
625       fd_resource_offset(rsc, psurf->u.tex.level, psurf->u.tex.first_layer);
626    pitch = fd_resource_pitch(rsc, psurf->u.tex.level);
627 
628    assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
629 
630    OUT_PKT4(ring, REG_A5XX_RB_BLIT_FLAG_DST_LO, 4);
631    OUT_RING(ring, 0x00000000); /* RB_BLIT_FLAG_DST_LO */
632    OUT_RING(ring, 0x00000000); /* RB_BLIT_FLAG_DST_HI */
633    OUT_RING(ring, 0x00000000); /* RB_BLIT_FLAG_DST_PITCH */
634    OUT_RING(ring, 0x00000000); /* RB_BLIT_FLAG_DST_ARRAY_PITCH */
635 
636    tiled = fd_resource_tile_mode(psurf->texture, psurf->u.tex.level);
637 
638    OUT_PKT4(ring, REG_A5XX_RB_RESOLVE_CNTL_3, 5);
639    OUT_RING(ring, 0x00000004 | /* XXX RB_RESOLVE_CNTL_3 */
640                      COND(tiled, A5XX_RB_RESOLVE_CNTL_3_TILED));
641    OUT_RELOC(ring, rsc->bo, offset, 0, 0); /* RB_BLIT_DST_LO/HI */
642    OUT_RING(ring, A5XX_RB_BLIT_DST_PITCH(pitch));
643    OUT_RING(ring, A5XX_RB_BLIT_DST_ARRAY_PITCH(fd_resource_layer_stride(rsc, psurf->u.tex.level)));
644 
645    OUT_PKT4(ring, REG_A5XX_RB_BLIT_CNTL, 1);
646    OUT_RING(ring, A5XX_RB_BLIT_CNTL_BUF(buf));
647 
648    //	bool msaa_resolve = pfb->samples > 1;
649    bool msaa_resolve = false;
650    OUT_PKT4(ring, REG_A5XX_RB_CLEAR_CNTL, 1);
651    OUT_RING(ring, COND(msaa_resolve, A5XX_RB_CLEAR_CNTL_MSAA_RESOLVE));
652 
653    fd5_emit_blit(batch, ring);
654 }
655 
656 static void
fd5_emit_tile_gmem2mem(struct fd_batch * batch,const struct fd_tile * tile)657 fd5_emit_tile_gmem2mem(struct fd_batch *batch, const struct fd_tile *tile)
658 {
659    const struct fd_gmem_stateobj *gmem = batch->gmem_state;
660    struct pipe_framebuffer_state *pfb = &batch->framebuffer;
661 
662    if (batch->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
663       struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
664 
665       if (!rsc->stencil || (batch->resolve & FD_BUFFER_DEPTH))
666          emit_gmem2mem_surf(batch, gmem->zsbuf_base[0], pfb->zsbuf, BLIT_ZS);
667       if (rsc->stencil && (batch->resolve & FD_BUFFER_STENCIL))
668          emit_gmem2mem_surf(batch, gmem->zsbuf_base[1], pfb->zsbuf, BLIT_S);
669    }
670 
671    if (batch->resolve & FD_BUFFER_COLOR) {
672       unsigned i;
673       for (i = 0; i < pfb->nr_cbufs; i++) {
674          if (!pfb->cbufs[i])
675             continue;
676          if (!(batch->resolve & (PIPE_CLEAR_COLOR0 << i)))
677             continue;
678          emit_gmem2mem_surf(batch, gmem->cbuf_base[i], pfb->cbufs[i],
679                             BLIT_MRT0 + i);
680       }
681    }
682 }
683 
684 static void
fd5_emit_tile_fini(struct fd_batch * batch)685 fd5_emit_tile_fini(struct fd_batch *batch) assert_dt
686 {
687    struct fd_ringbuffer *ring = batch->gmem;
688 
689    OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
690    OUT_RING(ring, 0x0);
691 
692    fd5_emit_lrz_flush(batch, ring);
693 
694    fd5_cache_flush(batch, ring);
695    fd5_set_render_mode(batch->ctx, ring, BYPASS);
696 }
697 
698 static void
fd5_emit_sysmem_prep(struct fd_batch * batch)699 fd5_emit_sysmem_prep(struct fd_batch *batch) assert_dt
700 {
701    struct fd_context *ctx = batch->ctx;
702    struct fd_ringbuffer *ring = batch->gmem;
703 
704    fd5_emit_restore(batch, ring);
705 
706    fd5_emit_lrz_flush(batch, ring);
707 
708    if (batch->prologue)
709       fd5_emit_ib(ring, batch->prologue);
710 
711    OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
712    OUT_RING(ring, 0x0);
713 
714    fd5_event_write(batch, ring, PC_CCU_INVALIDATE_COLOR, false);
715 
716    OUT_PKT4(ring, REG_A5XX_PC_POWER_CNTL, 1);
717    OUT_RING(ring, ctx->screen->info->num_sp_cores - 1); /* PC_POWER_CNTL */
718 
719    OUT_PKT4(ring, REG_A5XX_VFD_POWER_CNTL, 1);
720    OUT_RING(ring, ctx->screen->info->num_sp_cores - 1); /* VFD_POWER_CNTL */
721 
722    /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
723    fd_wfi(batch, ring);
724    OUT_PKT4(ring, REG_A5XX_RB_CCU_CNTL, 1);
725    OUT_RING(ring, 0x10000000); /* RB_CCU_CNTL */
726 
727    OUT_PKT4(ring, REG_A5XX_RB_CNTL, 1);
728    OUT_RING(ring, A5XX_RB_CNTL_WIDTH(0) | A5XX_RB_CNTL_HEIGHT(0) |
729                      A5XX_RB_CNTL_BYPASS);
730 
731    /* remaining setup below here does not apply to blit/compute: */
732    if (batch->nondraw)
733       return;
734 
735    struct pipe_framebuffer_state *pfb = &batch->framebuffer;
736 
737    OUT_PKT4(ring, REG_A5XX_GRAS_SC_WINDOW_SCISSOR_TL, 2);
738    OUT_RING(ring, A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X(0) |
739                      A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(0));
740    OUT_RING(ring, A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X(pfb->width - 1) |
741                      A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(pfb->height - 1));
742 
743    OUT_PKT4(ring, REG_A5XX_RB_RESOLVE_CNTL_1, 2);
744    OUT_RING(ring, A5XX_RB_RESOLVE_CNTL_1_X(0) | A5XX_RB_RESOLVE_CNTL_1_Y(0));
745    OUT_RING(ring, A5XX_RB_RESOLVE_CNTL_2_X(pfb->width - 1) |
746                      A5XX_RB_RESOLVE_CNTL_2_Y(pfb->height - 1));
747 
748    OUT_PKT4(ring, REG_A5XX_RB_WINDOW_OFFSET, 1);
749    OUT_RING(ring, A5XX_RB_WINDOW_OFFSET_X(0) | A5XX_RB_WINDOW_OFFSET_Y(0));
750 
751    /* Enable stream output, since there's no binning pass to put it in. */
752    OUT_PKT4(ring, REG_A5XX_VPC_SO_OVERRIDE, 1);
753    OUT_RING(ring, 0);
754 
755    OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
756    OUT_RING(ring, 0x1);
757 
758    patch_draws(batch, IGNORE_VISIBILITY);
759 
760    emit_zs(ring, pfb->zsbuf, NULL);
761    emit_mrt(ring, pfb->nr_cbufs, pfb->cbufs, NULL);
762    emit_msaa(ring, pfb->samples);
763 }
764 
765 static void
fd5_emit_sysmem_fini(struct fd_batch * batch)766 fd5_emit_sysmem_fini(struct fd_batch *batch)
767 {
768    struct fd_ringbuffer *ring = batch->gmem;
769 
770    OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
771    OUT_RING(ring, 0x0);
772 
773    fd5_emit_lrz_flush(batch, ring);
774 
775    fd5_event_write(batch, ring, PC_CCU_FLUSH_COLOR_TS, true);
776    fd5_event_write(batch, ring, PC_CCU_FLUSH_DEPTH_TS, true);
777 }
778 
779 void
fd5_gmem_init(struct pipe_context * pctx)780 fd5_gmem_init(struct pipe_context *pctx) disable_thread_safety_analysis
781 {
782    struct fd_context *ctx = fd_context(pctx);
783 
784    ctx->emit_tile_init = fd5_emit_tile_init;
785    ctx->emit_tile_prep = fd5_emit_tile_prep;
786    ctx->emit_tile_mem2gmem = fd5_emit_tile_mem2gmem;
787    ctx->emit_tile_renderprep = fd5_emit_tile_renderprep;
788    ctx->emit_tile_gmem2mem = fd5_emit_tile_gmem2mem;
789    ctx->emit_tile_fini = fd5_emit_tile_fini;
790    ctx->emit_sysmem_prep = fd5_emit_sysmem_prep;
791    ctx->emit_sysmem_fini = fd5_emit_sysmem_fini;
792 }
793