1 /*
2 * Copyright © 2014 Rob Clark <[email protected]>
3 * SPDX-License-Identifier: MIT
4 *
5 * Authors:
6 * Rob Clark <[email protected]>
7 */
8
9 #include "pipe/p_state.h"
10 #include "util/format/u_format.h"
11 #include "util/u_inlines.h"
12 #include "util/u_memory.h"
13 #include "util/u_string.h"
14
15 #include "freedreno_draw.h"
16 #include "freedreno_resource.h"
17 #include "freedreno_state.h"
18
19 #include "fd4_context.h"
20 #include "fd4_draw.h"
21 #include "fd4_emit.h"
22 #include "fd4_format.h"
23 #include "fd4_gmem.h"
24 #include "fd4_program.h"
25 #include "fd4_zsa.h"
26
27 static void
fd4_gmem_emit_set_prog(struct fd_context * ctx,struct fd4_emit * emit,struct fd_program_stateobj * prog)28 fd4_gmem_emit_set_prog(struct fd_context *ctx, struct fd4_emit *emit,
29 struct fd_program_stateobj *prog)
30 {
31 emit->skip_consts = true;
32 emit->key.vs = prog->vs;
33 emit->key.fs = prog->fs;
34 emit->prog = fd4_program_state(
35 ir3_cache_lookup(ctx->shader_cache, &emit->key, &ctx->debug));
36 /* reset the fd4_emit_get_*p cache */
37 emit->vs = NULL;
38 emit->fs = NULL;
39 }
40
41 static void
emit_mrt(struct fd_ringbuffer * ring,unsigned nr_bufs,struct pipe_surface ** bufs,const uint32_t * bases,uint32_t bin_w,bool decode_srgb)42 emit_mrt(struct fd_ringbuffer *ring, unsigned nr_bufs,
43 struct pipe_surface **bufs, const uint32_t *bases, uint32_t bin_w,
44 bool decode_srgb)
45 {
46 enum a4xx_tile_mode tile_mode;
47 unsigned i;
48
49 if (bin_w) {
50 tile_mode = 2;
51 } else {
52 tile_mode = TILE4_LINEAR;
53 }
54
55 for (i = 0; i < A4XX_MAX_RENDER_TARGETS; i++) {
56 enum a4xx_color_fmt format = 0;
57 enum a3xx_color_swap swap = WZYX;
58 bool srgb = false;
59 struct fd_resource *rsc = NULL;
60 uint32_t stride = 0;
61 uint32_t base = 0;
62 uint32_t offset = 0;
63
64 if ((i < nr_bufs) && bufs[i]) {
65 struct pipe_surface *psurf = bufs[i];
66 enum pipe_format pformat = psurf->format;
67
68 rsc = fd_resource(psurf->texture);
69
70 /* In case we're drawing to Z32F_S8, the "color" actually goes to
71 * the stencil
72 */
73 if (rsc->stencil) {
74 rsc = rsc->stencil;
75 pformat = rsc->b.b.format;
76 if (bases)
77 bases++;
78 }
79
80 format = fd4_pipe2color(pformat);
81 swap = fd4_pipe2swap(pformat);
82
83 if (decode_srgb)
84 srgb = util_format_is_srgb(pformat);
85 else
86 pformat = util_format_linear(pformat);
87
88 assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
89
90 offset = fd_resource_offset(rsc, psurf->u.tex.level,
91 psurf->u.tex.first_layer);
92
93 if (bin_w) {
94 stride = bin_w << fdl_cpp_shift(&rsc->layout);
95
96 if (bases) {
97 base = bases[i];
98 }
99 } else {
100 stride = fd_resource_pitch(rsc, psurf->u.tex.level);
101 }
102 } else if ((i < nr_bufs) && bases) {
103 base = bases[i];
104 }
105
106 OUT_PKT0(ring, REG_A4XX_RB_MRT_BUF_INFO(i), 3);
107 OUT_RING(ring, A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT(format) |
108 A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(tile_mode) |
109 A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(stride) |
110 A4XX_RB_MRT_BUF_INFO_COLOR_SWAP(swap) |
111 COND(srgb, A4XX_RB_MRT_BUF_INFO_COLOR_SRGB));
112 if (bin_w || (i >= nr_bufs) || !bufs[i]) {
113 OUT_RING(ring, base);
114 OUT_RING(ring, A4XX_RB_MRT_CONTROL3_STRIDE(stride));
115 } else {
116 OUT_RELOC(ring, rsc->bo, offset, 0, 0);
117 /* RB_MRT[i].CONTROL3.STRIDE not emitted by c2d..
118 * not sure if we need to skip it for bypass or
119 * not.
120 */
121 OUT_RING(ring, A4XX_RB_MRT_CONTROL3_STRIDE(0));
122 }
123 }
124 }
125
126 static bool
use_hw_binning(struct fd_batch * batch)127 use_hw_binning(struct fd_batch *batch)
128 {
129 const struct fd_gmem_stateobj *gmem = batch->gmem_state;
130
131 /* workaround: Like on a3xx, hw binning and scissor optimization
132 * don't play nice together.
133 *
134 * Disable binning if scissor optimization is used.
135 */
136 if (gmem->minx || gmem->miny)
137 return false;
138
139 if ((gmem->maxpw * gmem->maxph) > 32)
140 return false;
141
142 if ((gmem->maxpw > 15) || (gmem->maxph > 15))
143 return false;
144
145 return fd_binning_enabled && ((gmem->nbins_x * gmem->nbins_y) > 2);
146 }
147
148 /* transfer from gmem to system memory (ie. normal RAM) */
149
150 static void
emit_gmem2mem_surf(struct fd_batch * batch,bool stencil,uint32_t base,struct pipe_surface * psurf)151 emit_gmem2mem_surf(struct fd_batch *batch, bool stencil, uint32_t base,
152 struct pipe_surface *psurf)
153 {
154 struct fd_ringbuffer *ring = batch->gmem;
155 struct fd_resource *rsc = fd_resource(psurf->texture);
156 enum pipe_format pformat = psurf->format;
157 uint32_t offset, pitch;
158
159 if (!rsc->valid)
160 return;
161
162 if (stencil) {
163 assert(rsc->stencil);
164 rsc = rsc->stencil;
165 pformat = rsc->b.b.format;
166 }
167
168 offset =
169 fd_resource_offset(rsc, psurf->u.tex.level, psurf->u.tex.first_layer);
170 pitch = fd_resource_pitch(rsc, psurf->u.tex.level);
171
172 assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
173
174 OUT_PKT0(ring, REG_A4XX_RB_COPY_CONTROL, 4);
175 OUT_RING(ring, A4XX_RB_COPY_CONTROL_MSAA_RESOLVE(MSAA_ONE) |
176 A4XX_RB_COPY_CONTROL_MODE(RB_COPY_RESOLVE) |
177 A4XX_RB_COPY_CONTROL_GMEM_BASE(base));
178 OUT_RELOC(ring, rsc->bo, offset, 0, 0); /* RB_COPY_DEST_BASE */
179 OUT_RING(ring, A4XX_RB_COPY_DEST_PITCH_PITCH(pitch));
180 OUT_RING(ring, A4XX_RB_COPY_DEST_INFO_TILE(TILE4_LINEAR) |
181 A4XX_RB_COPY_DEST_INFO_FORMAT(fd4_pipe2color(pformat)) |
182 A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(0xf) |
183 A4XX_RB_COPY_DEST_INFO_ENDIAN(ENDIAN_NONE) |
184 A4XX_RB_COPY_DEST_INFO_SWAP(fd4_pipe2swap(pformat)));
185
186 fd4_draw(batch, ring, DI_PT_RECTLIST, IGNORE_VISIBILITY,
187 DI_SRC_SEL_AUTO_INDEX, 2, 1, INDEX4_SIZE_8_BIT, 0, 0, NULL);
188 }
189
190 static void
fd4_emit_tile_gmem2mem(struct fd_batch * batch,const struct fd_tile * tile)191 fd4_emit_tile_gmem2mem(struct fd_batch *batch,
192 const struct fd_tile *tile) assert_dt
193 {
194 struct fd_context *ctx = batch->ctx;
195 const struct fd_gmem_stateobj *gmem = batch->gmem_state;
196 struct fd_ringbuffer *ring = batch->gmem;
197 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
198 struct fd4_emit emit = {
199 .debug = &ctx->debug,
200 .vtx = &ctx->solid_vbuf_state,
201 };
202 fd4_gmem_emit_set_prog(ctx, &emit, &ctx->solid_prog);
203
204 OUT_PKT0(ring, REG_A4XX_RB_DEPTH_CONTROL, 1);
205 OUT_RING(ring, A4XX_RB_DEPTH_CONTROL_ZFUNC(FUNC_NEVER));
206
207 OUT_PKT0(ring, REG_A4XX_RB_STENCIL_CONTROL, 2);
208 OUT_RING(ring, A4XX_RB_STENCIL_CONTROL_FUNC(FUNC_NEVER) |
209 A4XX_RB_STENCIL_CONTROL_FAIL(STENCIL_KEEP) |
210 A4XX_RB_STENCIL_CONTROL_ZPASS(STENCIL_KEEP) |
211 A4XX_RB_STENCIL_CONTROL_ZFAIL(STENCIL_KEEP) |
212 A4XX_RB_STENCIL_CONTROL_FUNC_BF(FUNC_NEVER) |
213 A4XX_RB_STENCIL_CONTROL_FAIL_BF(STENCIL_KEEP) |
214 A4XX_RB_STENCIL_CONTROL_ZPASS_BF(STENCIL_KEEP) |
215 A4XX_RB_STENCIL_CONTROL_ZFAIL_BF(STENCIL_KEEP));
216 OUT_RING(ring, 0x00000000); /* RB_STENCIL_CONTROL2 */
217
218 OUT_PKT0(ring, REG_A4XX_RB_STENCILREFMASK, 2);
219 OUT_RING(ring, 0xff000000 | A4XX_RB_STENCILREFMASK_STENCILREF(0) |
220 A4XX_RB_STENCILREFMASK_STENCILMASK(0) |
221 A4XX_RB_STENCILREFMASK_STENCILWRITEMASK(0xff));
222 OUT_RING(ring, 0xff000000 | A4XX_RB_STENCILREFMASK_BF_STENCILREF(0) |
223 A4XX_RB_STENCILREFMASK_BF_STENCILMASK(0) |
224 A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(0xff));
225
226 OUT_PKT0(ring, REG_A4XX_GRAS_SU_MODE_CONTROL, 1);
227 OUT_RING(ring, A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(0));
228
229 fd_wfi(batch, ring);
230
231 OUT_PKT0(ring, REG_A4XX_GRAS_CL_CLIP_CNTL, 1);
232 OUT_RING(ring, 0x80000); /* GRAS_CL_CLIP_CNTL */
233
234 OUT_PKT0(ring, REG_A4XX_GRAS_CL_VPORT_XOFFSET_0, 6);
235 OUT_RING(ring, A4XX_GRAS_CL_VPORT_XOFFSET_0((float)pfb->width / 2.0f));
236 OUT_RING(ring, A4XX_GRAS_CL_VPORT_XSCALE_0((float)pfb->width / 2.0f));
237 OUT_RING(ring, A4XX_GRAS_CL_VPORT_YOFFSET_0((float)pfb->height / 2.0f));
238 OUT_RING(ring, A4XX_GRAS_CL_VPORT_YSCALE_0(-(float)pfb->height / 2.0f));
239 OUT_RING(ring, A4XX_GRAS_CL_VPORT_ZOFFSET_0(0.0f));
240 OUT_RING(ring, A4XX_GRAS_CL_VPORT_ZSCALE_0(1.0f));
241
242 OUT_PKT0(ring, REG_A4XX_RB_RENDER_CONTROL, 1);
243 OUT_RING(ring, A4XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE | 0xa); /* XXX */
244
245 OUT_PKT0(ring, REG_A4XX_GRAS_SC_CONTROL, 1);
246 OUT_RING(ring, A4XX_GRAS_SC_CONTROL_RENDER_MODE(RB_RESOLVE_PASS) |
247 A4XX_GRAS_SC_CONTROL_MSAA_DISABLE |
248 A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES(MSAA_ONE) |
249 A4XX_GRAS_SC_CONTROL_RASTER_MODE(1));
250
251 OUT_PKT0(ring, REG_A4XX_PC_PRIM_VTX_CNTL, 1);
252 OUT_RING(ring, A4XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST);
253
254 OUT_PKT0(ring, REG_A4XX_GRAS_ALPHA_CONTROL, 1);
255 OUT_RING(ring, 0x00000002);
256
257 OUT_PKT0(ring, REG_A4XX_GRAS_SC_WINDOW_SCISSOR_BR, 2);
258 OUT_RING(ring, A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X(pfb->width - 1) |
259 A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(pfb->height - 1));
260 OUT_RING(ring, A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X(0) |
261 A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(0));
262
263 OUT_PKT0(ring, REG_A4XX_VFD_INDEX_OFFSET, 2);
264 OUT_RING(ring, 0); /* VFD_INDEX_OFFSET */
265 OUT_RING(ring, 0); /* ??? UNKNOWN_2209 */
266
267 fd4_program_emit(ring, &emit, 0, NULL);
268 fd4_emit_vertex_bufs(ring, &emit);
269
270 if (batch->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
271 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
272 if (!rsc->stencil || (batch->resolve & FD_BUFFER_DEPTH))
273 emit_gmem2mem_surf(batch, false, gmem->zsbuf_base[0], pfb->zsbuf);
274 if (rsc->stencil && (batch->resolve & FD_BUFFER_STENCIL))
275 emit_gmem2mem_surf(batch, true, gmem->zsbuf_base[1], pfb->zsbuf);
276 }
277
278 if (batch->resolve & FD_BUFFER_COLOR) {
279 unsigned i;
280 for (i = 0; i < pfb->nr_cbufs; i++) {
281 if (!pfb->cbufs[i])
282 continue;
283 if (!(batch->resolve & (PIPE_CLEAR_COLOR0 << i)))
284 continue;
285 emit_gmem2mem_surf(batch, false, gmem->cbuf_base[i], pfb->cbufs[i]);
286 }
287 }
288
289 OUT_PKT0(ring, REG_A4XX_GRAS_SC_CONTROL, 1);
290 OUT_RING(ring, A4XX_GRAS_SC_CONTROL_RENDER_MODE(RB_RENDERING_PASS) |
291 A4XX_GRAS_SC_CONTROL_MSAA_DISABLE |
292 A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES(MSAA_ONE) |
293 A4XX_GRAS_SC_CONTROL_RASTER_MODE(0));
294 }
295
296 /* transfer from system memory to gmem */
297
298 static void
emit_mem2gmem_surf(struct fd_batch * batch,const uint32_t * bases,struct pipe_surface ** bufs,uint32_t nr_bufs,uint32_t bin_w)299 emit_mem2gmem_surf(struct fd_batch *batch, const uint32_t *bases,
300 struct pipe_surface **bufs, uint32_t nr_bufs, uint32_t bin_w)
301 {
302 struct fd_ringbuffer *ring = batch->gmem;
303 struct pipe_surface *zsbufs[2];
304
305 emit_mrt(ring, nr_bufs, bufs, bases, bin_w, false);
306
307 if (bufs[0] && (bufs[0]->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT)) {
308 /* The gmem_restore_tex logic will put the first buffer's stencil
309 * as color. Supply it with the proper information to make that
310 * happen.
311 */
312 zsbufs[0] = zsbufs[1] = bufs[0];
313 bufs = zsbufs;
314 nr_bufs = 2;
315 }
316
317 fd4_emit_gmem_restore_tex(ring, nr_bufs, bufs);
318
319 fd4_draw(batch, ring, DI_PT_RECTLIST, IGNORE_VISIBILITY,
320 DI_SRC_SEL_AUTO_INDEX, 2, 1, INDEX4_SIZE_8_BIT, 0, 0, NULL);
321 }
322
323 static void
fd4_emit_tile_mem2gmem(struct fd_batch * batch,const struct fd_tile * tile)324 fd4_emit_tile_mem2gmem(struct fd_batch *batch,
325 const struct fd_tile *tile) assert_dt
326 {
327 struct fd_context *ctx = batch->ctx;
328 const struct fd_gmem_stateobj *gmem = batch->gmem_state;
329 struct fd_ringbuffer *ring = batch->gmem;
330 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
331 struct fd4_emit emit = {
332 .debug = &ctx->debug,
333 .vtx = &ctx->blit_vbuf_state,
334 .sprite_coord_enable = 1,
335 .no_decode_srgb = true,
336 };
337 /* NOTE: They all use the same VP, this is for vtx bufs. */
338 fd4_gmem_emit_set_prog(ctx, &emit, &ctx->blit_prog[0]);
339
340 unsigned char mrt_comp[A4XX_MAX_RENDER_TARGETS] = {0};
341 float x0, y0, x1, y1;
342 unsigned bin_w = tile->bin_w;
343 unsigned bin_h = tile->bin_h;
344 unsigned i;
345
346 /* write texture coordinates to vertexbuf: */
347 x0 = ((float)tile->xoff) / ((float)pfb->width);
348 x1 = ((float)tile->xoff + bin_w) / ((float)pfb->width);
349 y0 = ((float)tile->yoff) / ((float)pfb->height);
350 y1 = ((float)tile->yoff + bin_h) / ((float)pfb->height);
351
352 OUT_PKT3(ring, CP_MEM_WRITE, 5);
353 OUT_RELOC(ring, fd_resource(ctx->blit_texcoord_vbuf)->bo, 0, 0, 0);
354 OUT_RING(ring, fui(x0));
355 OUT_RING(ring, fui(y0));
356 OUT_RING(ring, fui(x1));
357 OUT_RING(ring, fui(y1));
358
359 for (i = 0; i < A4XX_MAX_RENDER_TARGETS; i++) {
360 mrt_comp[i] = ((i < pfb->nr_cbufs) && pfb->cbufs[i]) ? 0xf : 0;
361
362 OUT_PKT0(ring, REG_A4XX_RB_MRT_CONTROL(i), 1);
363 OUT_RING(ring, A4XX_RB_MRT_CONTROL_ROP_CODE(ROP_COPY) |
364 A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE(0xf));
365
366 OUT_PKT0(ring, REG_A4XX_RB_MRT_BLEND_CONTROL(i), 1);
367 OUT_RING(
368 ring,
369 A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(FACTOR_ONE) |
370 A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(BLEND_DST_PLUS_SRC) |
371 A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(FACTOR_ZERO) |
372 A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(FACTOR_ONE) |
373 A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(BLEND_DST_PLUS_SRC) |
374 A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(FACTOR_ZERO));
375 }
376
377 OUT_PKT0(ring, REG_A4XX_RB_RENDER_COMPONENTS, 1);
378 OUT_RING(ring, A4XX_RB_RENDER_COMPONENTS_RT0(mrt_comp[0]) |
379 A4XX_RB_RENDER_COMPONENTS_RT1(mrt_comp[1]) |
380 A4XX_RB_RENDER_COMPONENTS_RT2(mrt_comp[2]) |
381 A4XX_RB_RENDER_COMPONENTS_RT3(mrt_comp[3]) |
382 A4XX_RB_RENDER_COMPONENTS_RT4(mrt_comp[4]) |
383 A4XX_RB_RENDER_COMPONENTS_RT5(mrt_comp[5]) |
384 A4XX_RB_RENDER_COMPONENTS_RT6(mrt_comp[6]) |
385 A4XX_RB_RENDER_COMPONENTS_RT7(mrt_comp[7]));
386
387 OUT_PKT0(ring, REG_A4XX_RB_RENDER_CONTROL, 1);
388 OUT_RING(ring, 0x8); /* XXX RB_RENDER_CONTROL */
389
390 OUT_PKT0(ring, REG_A4XX_RB_DEPTH_CONTROL, 1);
391 OUT_RING(ring, A4XX_RB_DEPTH_CONTROL_ZFUNC(FUNC_LESS));
392
393 OUT_PKT0(ring, REG_A4XX_GRAS_CL_CLIP_CNTL, 1);
394 OUT_RING(ring, 0x280000); /* XXX GRAS_CL_CLIP_CNTL */
395
396 OUT_PKT0(ring, REG_A4XX_GRAS_SU_MODE_CONTROL, 1);
397 OUT_RING(ring, A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(0) |
398 A4XX_GRAS_SU_MODE_CONTROL_RENDERING_PASS);
399
400 OUT_PKT0(ring, REG_A4XX_GRAS_CL_VPORT_XOFFSET_0, 6);
401 OUT_RING(ring, A4XX_GRAS_CL_VPORT_XOFFSET_0((float)bin_w / 2.0f));
402 OUT_RING(ring, A4XX_GRAS_CL_VPORT_XSCALE_0((float)bin_w / 2.0f));
403 OUT_RING(ring, A4XX_GRAS_CL_VPORT_YOFFSET_0((float)bin_h / 2.0f));
404 OUT_RING(ring, A4XX_GRAS_CL_VPORT_YSCALE_0(-(float)bin_h / 2.0f));
405 OUT_RING(ring, A4XX_GRAS_CL_VPORT_ZOFFSET_0(0.0f));
406 OUT_RING(ring, A4XX_GRAS_CL_VPORT_ZSCALE_0(1.0f));
407
408 OUT_PKT0(ring, REG_A4XX_GRAS_SC_WINDOW_SCISSOR_BR, 2);
409 OUT_RING(ring, A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X(bin_w - 1) |
410 A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(bin_h - 1));
411 OUT_RING(ring, A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X(0) |
412 A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(0));
413
414 OUT_PKT0(ring, REG_A4XX_GRAS_SC_SCREEN_SCISSOR_TL, 2);
415 OUT_RING(ring, A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X(0) |
416 A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(0));
417 OUT_RING(ring, A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X(bin_w - 1) |
418 A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(bin_h - 1));
419
420 OUT_PKT0(ring, REG_A4XX_RB_MODE_CONTROL, 1);
421 OUT_RING(ring, A4XX_RB_MODE_CONTROL_WIDTH(gmem->bin_w) |
422 A4XX_RB_MODE_CONTROL_HEIGHT(gmem->bin_h));
423
424 OUT_PKT0(ring, REG_A4XX_RB_STENCIL_CONTROL, 2);
425 OUT_RING(ring, A4XX_RB_STENCIL_CONTROL_FUNC(FUNC_ALWAYS) |
426 A4XX_RB_STENCIL_CONTROL_FAIL(STENCIL_KEEP) |
427 A4XX_RB_STENCIL_CONTROL_ZPASS(STENCIL_KEEP) |
428 A4XX_RB_STENCIL_CONTROL_ZFAIL(STENCIL_KEEP) |
429 A4XX_RB_STENCIL_CONTROL_FUNC_BF(FUNC_ALWAYS) |
430 A4XX_RB_STENCIL_CONTROL_FAIL_BF(STENCIL_KEEP) |
431 A4XX_RB_STENCIL_CONTROL_ZPASS_BF(STENCIL_KEEP) |
432 A4XX_RB_STENCIL_CONTROL_ZFAIL_BF(STENCIL_KEEP));
433 OUT_RING(ring, 0x00000000); /* RB_STENCIL_CONTROL2 */
434
435 OUT_PKT0(ring, REG_A4XX_GRAS_SC_CONTROL, 1);
436 OUT_RING(ring, A4XX_GRAS_SC_CONTROL_RENDER_MODE(RB_RENDERING_PASS) |
437 A4XX_GRAS_SC_CONTROL_MSAA_DISABLE |
438 A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES(MSAA_ONE) |
439 A4XX_GRAS_SC_CONTROL_RASTER_MODE(1));
440
441 OUT_PKT0(ring, REG_A4XX_PC_PRIM_VTX_CNTL, 1);
442 OUT_RING(ring, A4XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST |
443 A4XX_PC_PRIM_VTX_CNTL_VAROUT(1));
444
445 OUT_PKT0(ring, REG_A4XX_VFD_INDEX_OFFSET, 2);
446 OUT_RING(ring, 0); /* VFD_INDEX_OFFSET */
447 OUT_RING(ring, 0); /* ??? UNKNOWN_2209 */
448
449 fd4_emit_vertex_bufs(ring, &emit);
450
451 /* for gmem pitch/base calculations, we need to use the non-
452 * truncated tile sizes:
453 */
454 bin_w = gmem->bin_w;
455 bin_h = gmem->bin_h;
456
457 if (fd_gmem_needs_restore(batch, tile, FD_BUFFER_COLOR)) {
458 fd4_gmem_emit_set_prog(ctx, &emit, &ctx->blit_prog[pfb->nr_cbufs - 1]);
459 fd4_program_emit(ring, &emit, pfb->nr_cbufs, pfb->cbufs);
460 emit_mem2gmem_surf(batch, gmem->cbuf_base, pfb->cbufs, pfb->nr_cbufs,
461 bin_w);
462 }
463
464 if (fd_gmem_needs_restore(batch, tile,
465 FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
466 switch (pfb->zsbuf->format) {
467 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
468 case PIPE_FORMAT_Z32_FLOAT:
469 if (pfb->zsbuf->format == PIPE_FORMAT_Z32_FLOAT)
470 fd4_gmem_emit_set_prog(ctx, &emit, &ctx->blit_z);
471 else
472 fd4_gmem_emit_set_prog(ctx, &emit, &ctx->blit_zs);
473
474 OUT_PKT0(ring, REG_A4XX_RB_DEPTH_CONTROL, 1);
475 OUT_RING(ring, A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE |
476 A4XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE |
477 A4XX_RB_DEPTH_CONTROL_ZFUNC(FUNC_ALWAYS) |
478 A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE);
479
480 OUT_PKT0(ring, REG_A4XX_GRAS_ALPHA_CONTROL, 1);
481 OUT_RING(ring, A4XX_GRAS_ALPHA_CONTROL_ALPHA_TEST_ENABLE);
482
483 OUT_PKT0(ring, REG_A4XX_GRAS_CL_CLIP_CNTL, 1);
484 OUT_RING(ring, 0x80000); /* GRAS_CL_CLIP_CNTL */
485
486 break;
487 default:
488 /* Non-float can use a regular color write. It's split over 8-bit
489 * components, so half precision is always sufficient.
490 */
491 fd4_gmem_emit_set_prog(ctx, &emit, &ctx->blit_prog[0]);
492 break;
493 }
494 fd4_program_emit(ring, &emit, 1, &pfb->zsbuf);
495 emit_mem2gmem_surf(batch, gmem->zsbuf_base, &pfb->zsbuf, 1, bin_w);
496 }
497
498 OUT_PKT0(ring, REG_A4XX_GRAS_SC_CONTROL, 1);
499 OUT_RING(ring, A4XX_GRAS_SC_CONTROL_RENDER_MODE(RB_RENDERING_PASS) |
500 A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES(MSAA_ONE) |
501 A4XX_GRAS_SC_CONTROL_RASTER_MODE(0));
502
503 OUT_PKT0(ring, REG_A4XX_RB_MODE_CONTROL, 1);
504 OUT_RING(ring, A4XX_RB_MODE_CONTROL_WIDTH(gmem->bin_w) |
505 A4XX_RB_MODE_CONTROL_HEIGHT(gmem->bin_h) |
506 0x00010000); /* XXX */
507 }
508
509 static void
patch_draws(struct fd_batch * batch,enum pc_di_vis_cull_mode vismode)510 patch_draws(struct fd_batch *batch, enum pc_di_vis_cull_mode vismode)
511 {
512 unsigned i;
513 for (i = 0; i < fd_patch_num_elements(&batch->draw_patches); i++) {
514 struct fd_cs_patch *patch = fd_patch_element(&batch->draw_patches, i);
515 *patch->cs = patch->val | DRAW4(0, 0, 0, vismode);
516 }
517 util_dynarray_clear(&batch->draw_patches);
518 }
519
520 /* for rendering directly to system memory: */
521 static void
fd4_emit_sysmem_prep(struct fd_batch * batch)522 fd4_emit_sysmem_prep(struct fd_batch *batch) assert_dt
523 {
524 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
525 struct fd_ringbuffer *ring = batch->gmem;
526
527 fd4_emit_restore(batch, ring);
528
529 OUT_PKT0(ring, REG_A4XX_RB_FRAME_BUFFER_DIMENSION, 1);
530 OUT_RING(ring, A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(pfb->width) |
531 A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(pfb->height));
532
533 emit_mrt(ring, pfb->nr_cbufs, pfb->cbufs, NULL, 0, true);
534
535 /* setup scissor/offset for current tile: */
536 OUT_PKT0(ring, REG_A4XX_RB_BIN_OFFSET, 1);
537 OUT_RING(ring, A4XX_RB_BIN_OFFSET_X(0) | A4XX_RB_BIN_OFFSET_Y(0));
538
539 OUT_PKT0(ring, REG_A4XX_GRAS_SC_SCREEN_SCISSOR_TL, 2);
540 OUT_RING(ring, A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X(0) |
541 A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(0));
542 OUT_RING(ring, A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X(pfb->width - 1) |
543 A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(pfb->height - 1));
544
545 OUT_PKT0(ring, REG_A4XX_RB_MODE_CONTROL, 1);
546 OUT_RING(ring, A4XX_RB_MODE_CONTROL_WIDTH(0) |
547 A4XX_RB_MODE_CONTROL_HEIGHT(0) | 0x00c00000); /* XXX */
548
549 OUT_PKT0(ring, REG_A4XX_RB_RENDER_CONTROL, 1);
550 OUT_RING(ring, 0x8);
551
552 patch_draws(batch, IGNORE_VISIBILITY);
553 }
554
555 static void
update_vsc_pipe(struct fd_batch * batch)556 update_vsc_pipe(struct fd_batch *batch) assert_dt
557 {
558 struct fd_context *ctx = batch->ctx;
559 const struct fd_gmem_stateobj *gmem = batch->gmem_state;
560 struct fd4_context *fd4_ctx = fd4_context(ctx);
561 struct fd_ringbuffer *ring = batch->gmem;
562 int i;
563
564 OUT_PKT0(ring, REG_A4XX_VSC_SIZE_ADDRESS, 1);
565 OUT_RELOC(ring, fd4_ctx->vsc_size_mem, 0, 0, 0); /* VSC_SIZE_ADDRESS */
566
567 OUT_PKT0(ring, REG_A4XX_VSC_PIPE_CONFIG_REG(0), 8);
568 for (i = 0; i < 8; i++) {
569 const struct fd_vsc_pipe *pipe = &gmem->vsc_pipe[i];
570 OUT_RING(ring, A4XX_VSC_PIPE_CONFIG_REG_X(pipe->x) |
571 A4XX_VSC_PIPE_CONFIG_REG_Y(pipe->y) |
572 A4XX_VSC_PIPE_CONFIG_REG_W(pipe->w) |
573 A4XX_VSC_PIPE_CONFIG_REG_H(pipe->h));
574 }
575
576 OUT_PKT0(ring, REG_A4XX_VSC_PIPE_DATA_ADDRESS_REG(0), 8);
577 for (i = 0; i < 8; i++) {
578 if (!ctx->vsc_pipe_bo[i]) {
579 ctx->vsc_pipe_bo[i] = fd_bo_new(
580 ctx->dev, 0x40000, 0, "vsc_pipe[%u]", i);
581 }
582 OUT_RELOC(ring, ctx->vsc_pipe_bo[i], 0, 0,
583 0); /* VSC_PIPE_DATA_ADDRESS[i] */
584 }
585
586 OUT_PKT0(ring, REG_A4XX_VSC_PIPE_DATA_LENGTH_REG(0), 8);
587 for (i = 0; i < 8; i++) {
588 OUT_RING(ring, fd_bo_size(ctx->vsc_pipe_bo[i]) -
589 32); /* VSC_PIPE_DATA_LENGTH[i] */
590 }
591 }
592
593 static void
emit_binning_pass(struct fd_batch * batch)594 emit_binning_pass(struct fd_batch *batch) assert_dt
595 {
596 const struct fd_gmem_stateobj *gmem = batch->gmem_state;
597 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
598 struct fd_ringbuffer *ring = batch->gmem;
599 int i;
600
601 uint32_t x1 = gmem->minx;
602 uint32_t y1 = gmem->miny;
603 uint32_t x2 = gmem->minx + gmem->width - 1;
604 uint32_t y2 = gmem->miny + gmem->height - 1;
605
606 OUT_PKT0(ring, REG_A4XX_PC_BINNING_COMMAND, 1);
607 OUT_RING(ring, A4XX_PC_BINNING_COMMAND_BINNING_ENABLE);
608
609 OUT_PKT0(ring, REG_A4XX_GRAS_SC_CONTROL, 1);
610 OUT_RING(ring, A4XX_GRAS_SC_CONTROL_RENDER_MODE(RB_TILING_PASS) |
611 A4XX_GRAS_SC_CONTROL_MSAA_DISABLE |
612 A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES(MSAA_ONE) |
613 A4XX_GRAS_SC_CONTROL_RASTER_MODE(0));
614
615 OUT_PKT0(ring, REG_A4XX_RB_FRAME_BUFFER_DIMENSION, 1);
616 OUT_RING(ring, A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(pfb->width) |
617 A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(pfb->height));
618
619 /* setup scissor/offset for whole screen: */
620 OUT_PKT0(ring, REG_A4XX_RB_BIN_OFFSET, 1);
621 OUT_RING(ring, A4XX_RB_BIN_OFFSET_X(x1) | A4XX_RB_BIN_OFFSET_Y(y1));
622
623 OUT_PKT0(ring, REG_A4XX_GRAS_SC_SCREEN_SCISSOR_TL, 2);
624 OUT_RING(ring, A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X(x1) |
625 A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(y1));
626 OUT_RING(ring, A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X(x2) |
627 A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(y2));
628
629 for (i = 0; i < A4XX_MAX_RENDER_TARGETS; i++) {
630 OUT_PKT0(ring, REG_A4XX_RB_MRT_CONTROL(i), 1);
631 OUT_RING(ring, A4XX_RB_MRT_CONTROL_ROP_CODE(ROP_CLEAR) |
632 A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE(0xf));
633 }
634
635 /* emit IB to binning drawcmds: */
636 fd4_emit_ib(ring, batch->binning);
637
638 fd_reset_wfi(batch);
639 fd_wfi(batch, ring);
640
641 /* and then put stuff back the way it was: */
642
643 OUT_PKT0(ring, REG_A4XX_PC_BINNING_COMMAND, 1);
644 OUT_RING(ring, 0x00000000);
645
646 OUT_PKT0(ring, REG_A4XX_GRAS_SC_CONTROL, 1);
647 OUT_RING(ring, A4XX_GRAS_SC_CONTROL_RENDER_MODE(RB_RENDERING_PASS) |
648 A4XX_GRAS_SC_CONTROL_MSAA_DISABLE |
649 A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES(MSAA_ONE) |
650 A4XX_GRAS_SC_CONTROL_RASTER_MODE(0));
651
652 fd_event_write(batch, ring, CACHE_FLUSH);
653 fd_wfi(batch, ring);
654 }
655
656 /* before first tile */
657 static void
fd4_emit_tile_init(struct fd_batch * batch)658 fd4_emit_tile_init(struct fd_batch *batch) assert_dt
659 {
660 struct fd_ringbuffer *ring = batch->gmem;
661 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
662 const struct fd_gmem_stateobj *gmem = batch->gmem_state;
663
664 fd4_emit_restore(batch, ring);
665
666 OUT_PKT0(ring, REG_A4XX_VSC_BIN_SIZE, 1);
667 OUT_RING(ring, A4XX_VSC_BIN_SIZE_WIDTH(gmem->bin_w) |
668 A4XX_VSC_BIN_SIZE_HEIGHT(gmem->bin_h));
669
670 update_vsc_pipe(batch);
671
672 fd_wfi(batch, ring);
673 OUT_PKT0(ring, REG_A4XX_RB_FRAME_BUFFER_DIMENSION, 1);
674 OUT_RING(ring, A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(pfb->width) |
675 A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(pfb->height));
676
677 if (use_hw_binning(batch)) {
678 OUT_PKT0(ring, REG_A4XX_RB_MODE_CONTROL, 1);
679 OUT_RING(ring, A4XX_RB_MODE_CONTROL_WIDTH(gmem->bin_w) |
680 A4XX_RB_MODE_CONTROL_HEIGHT(gmem->bin_h));
681
682 OUT_PKT0(ring, REG_A4XX_RB_RENDER_CONTROL, 1);
683 OUT_RING(ring, A4XX_RB_RENDER_CONTROL_BINNING_PASS |
684 A4XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE | 0x8);
685
686 /* emit hw binning pass: */
687 emit_binning_pass(batch);
688
689 patch_draws(batch, USE_VISIBILITY);
690 } else {
691 patch_draws(batch, IGNORE_VISIBILITY);
692 }
693
694 OUT_PKT0(ring, REG_A4XX_RB_MODE_CONTROL, 1);
695 OUT_RING(ring, A4XX_RB_MODE_CONTROL_WIDTH(gmem->bin_w) |
696 A4XX_RB_MODE_CONTROL_HEIGHT(gmem->bin_h) |
697 A4XX_RB_MODE_CONTROL_ENABLE_GMEM);
698 }
699
700 /* before mem2gmem */
701 static void
fd4_emit_tile_prep(struct fd_batch * batch,const struct fd_tile * tile)702 fd4_emit_tile_prep(struct fd_batch *batch, const struct fd_tile *tile)
703 {
704 struct fd_ringbuffer *ring = batch->gmem;
705 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
706 const struct fd_gmem_stateobj *gmem = batch->gmem_state;
707
708 if (pfb->zsbuf) {
709 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
710 uint32_t cpp = rsc->layout.cpp;
711
712 OUT_PKT0(ring, REG_A4XX_RB_DEPTH_INFO, 3);
713 OUT_RING(ring, A4XX_RB_DEPTH_INFO_DEPTH_BASE(gmem->zsbuf_base[0]) |
714 A4XX_RB_DEPTH_INFO_DEPTH_FORMAT(
715 fd4_pipe2depth(pfb->zsbuf->format)));
716 OUT_RING(ring, A4XX_RB_DEPTH_PITCH(cpp * gmem->bin_w));
717 OUT_RING(ring, A4XX_RB_DEPTH_PITCH2(cpp * gmem->bin_w));
718
719 OUT_PKT0(ring, REG_A4XX_RB_STENCIL_INFO, 2);
720 if (rsc->stencil) {
721 OUT_RING(ring,
722 A4XX_RB_STENCIL_INFO_SEPARATE_STENCIL |
723 A4XX_RB_STENCIL_INFO_STENCIL_BASE(gmem->zsbuf_base[1]));
724 OUT_RING(ring, A4XX_RB_STENCIL_PITCH(rsc->stencil->layout.cpp *
725 gmem->bin_w));
726 } else {
727 OUT_RING(ring, 0x00000000);
728 OUT_RING(ring, 0x00000000);
729 }
730 } else {
731 OUT_PKT0(ring, REG_A4XX_RB_DEPTH_INFO, 3);
732 OUT_RING(ring, 0x00000000);
733 OUT_RING(ring, 0x00000000);
734 OUT_RING(ring, 0x00000000);
735
736 OUT_PKT0(ring, REG_A4XX_RB_STENCIL_INFO, 2);
737 OUT_RING(ring, 0); /* RB_STENCIL_INFO */
738 OUT_RING(ring, 0); /* RB_STENCIL_PITCH */
739 }
740
741 OUT_PKT0(ring, REG_A4XX_GRAS_DEPTH_CONTROL, 1);
742 if (pfb->zsbuf) {
743 OUT_RING(ring, A4XX_GRAS_DEPTH_CONTROL_FORMAT(
744 fd4_pipe2depth(pfb->zsbuf->format)));
745 } else {
746 OUT_RING(ring, A4XX_GRAS_DEPTH_CONTROL_FORMAT(DEPTH4_NONE));
747 }
748 }
749
750 /* before IB to rendering cmds: */
751 static void
fd4_emit_tile_renderprep(struct fd_batch * batch,const struct fd_tile * tile)752 fd4_emit_tile_renderprep(struct fd_batch *batch,
753 const struct fd_tile *tile) assert_dt
754 {
755 struct fd_context *ctx = batch->ctx;
756 struct fd4_context *fd4_ctx = fd4_context(ctx);
757 struct fd_ringbuffer *ring = batch->gmem;
758 const struct fd_gmem_stateobj *gmem = batch->gmem_state;
759 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
760
761 uint32_t x1 = tile->xoff;
762 uint32_t y1 = tile->yoff;
763 uint32_t x2 = tile->xoff + tile->bin_w - 1;
764 uint32_t y2 = tile->yoff + tile->bin_h - 1;
765
766 if (use_hw_binning(batch)) {
767 const struct fd_vsc_pipe *pipe = &gmem->vsc_pipe[tile->p];
768 struct fd_bo *pipe_bo = ctx->vsc_pipe_bo[tile->p];
769
770 assert(pipe->w && pipe->h);
771
772 fd_event_write(batch, ring, HLSQ_FLUSH);
773 fd_wfi(batch, ring);
774
775 OUT_PKT0(ring, REG_A4XX_PC_VSTREAM_CONTROL, 1);
776 OUT_RING(ring, A4XX_PC_VSTREAM_CONTROL_SIZE(pipe->w * pipe->h) |
777 A4XX_PC_VSTREAM_CONTROL_N(tile->n));
778
779 OUT_PKT3(ring, CP_SET_BIN_DATA, 2);
780 OUT_RELOC(ring, pipe_bo, 0, 0,
781 0); /* BIN_DATA_ADDR <- VSC_PIPE[p].DATA_ADDRESS */
782 OUT_RELOC(ring, fd4_ctx->vsc_size_mem, /* BIN_SIZE_ADDR <-
783 VSC_SIZE_ADDRESS + (p * 4) */
784 (tile->p * 4), 0, 0);
785 } else {
786 OUT_PKT0(ring, REG_A4XX_PC_VSTREAM_CONTROL, 1);
787 OUT_RING(ring, 0x00000000);
788 }
789
790 OUT_PKT3(ring, CP_SET_BIN, 3);
791 OUT_RING(ring, 0x00000000);
792 OUT_RING(ring, CP_SET_BIN_1_X1(x1) | CP_SET_BIN_1_Y1(y1));
793 OUT_RING(ring, CP_SET_BIN_2_X2(x2) | CP_SET_BIN_2_Y2(y2));
794
795 emit_mrt(ring, pfb->nr_cbufs, pfb->cbufs, gmem->cbuf_base, gmem->bin_w,
796 true);
797
798 /* setup scissor/offset for current tile: */
799 OUT_PKT0(ring, REG_A4XX_RB_BIN_OFFSET, 1);
800 OUT_RING(ring, A4XX_RB_BIN_OFFSET_X(tile->xoff) |
801 A4XX_RB_BIN_OFFSET_Y(tile->yoff));
802
803 OUT_PKT0(ring, REG_A4XX_GRAS_SC_SCREEN_SCISSOR_TL, 2);
804 OUT_RING(ring, A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X(x1) |
805 A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(y1));
806 OUT_RING(ring, A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X(x2) |
807 A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(y2));
808
809 OUT_PKT0(ring, REG_A4XX_RB_RENDER_CONTROL, 1);
810 OUT_RING(ring, 0x8);
811 }
812
813 void
fd4_gmem_init(struct pipe_context * pctx)814 fd4_gmem_init(struct pipe_context *pctx) disable_thread_safety_analysis
815 {
816 struct fd_context *ctx = fd_context(pctx);
817
818 ctx->emit_sysmem_prep = fd4_emit_sysmem_prep;
819 ctx->emit_tile_init = fd4_emit_tile_init;
820 ctx->emit_tile_prep = fd4_emit_tile_prep;
821 ctx->emit_tile_mem2gmem = fd4_emit_tile_mem2gmem;
822 ctx->emit_tile_renderprep = fd4_emit_tile_renderprep;
823 ctx->emit_tile_gmem2mem = fd4_emit_tile_gmem2mem;
824 }
825