1 /*
2 * Copyright © 2013 Rob Clark <[email protected]>
3 * SPDX-License-Identifier: MIT
4 *
5 * Authors:
6 * Rob Clark <[email protected]>
7 */
8
9 #include "pipe/p_state.h"
10 #include "util/format/u_format.h"
11 #include "util/u_helpers.h"
12 #include "util/u_memory.h"
13 #include "util/u_string.h"
14 #include "util/u_viewport.h"
15
16 #include "freedreno_query_hw.h"
17 #include "freedreno_resource.h"
18
19 #include "fd3_blend.h"
20 #include "fd3_context.h"
21 #include "fd3_emit.h"
22 #include "fd3_format.h"
23 #include "fd3_program.h"
24 #include "fd3_rasterizer.h"
25 #include "fd3_texture.h"
26 #include "fd3_zsa.h"
27
28 #define emit_const_user fd3_emit_const_user
29 #define emit_const_bo fd3_emit_const_bo
30 #include "ir3_const.h"
31
32 static const enum adreno_state_block sb[] = {
33 [MESA_SHADER_VERTEX] = SB_VERT_SHADER,
34 [MESA_SHADER_FRAGMENT] = SB_FRAG_SHADER,
35 };
36
37 /* regid: base const register
38 * prsc or dwords: buffer containing constant values
39 * sizedwords: size of const value buffer
40 */
41 static void
fd3_emit_const_user(struct fd_ringbuffer * ring,const struct ir3_shader_variant * v,uint32_t regid,uint32_t sizedwords,const uint32_t * dwords)42 fd3_emit_const_user(struct fd_ringbuffer *ring,
43 const struct ir3_shader_variant *v, uint32_t regid,
44 uint32_t sizedwords, const uint32_t *dwords)
45 {
46 emit_const_asserts(ring, v, regid, sizedwords);
47
48 OUT_PKT3(ring, CP_LOAD_STATE, 2 + sizedwords);
49 OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(regid / 2) |
50 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
51 CP_LOAD_STATE_0_STATE_BLOCK(sb[v->type]) |
52 CP_LOAD_STATE_0_NUM_UNIT(sizedwords / 2));
53 OUT_RING(ring, CP_LOAD_STATE_1_EXT_SRC_ADDR(0) |
54 CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS));
55 for (int i = 0; i < sizedwords; i++)
56 OUT_RING(ring, dwords[i]);
57 }
58
59 static void
fd3_emit_const_bo(struct fd_ringbuffer * ring,const struct ir3_shader_variant * v,uint32_t regid,uint32_t offset,uint32_t sizedwords,struct fd_bo * bo)60 fd3_emit_const_bo(struct fd_ringbuffer *ring,
61 const struct ir3_shader_variant *v, uint32_t regid,
62 uint32_t offset, uint32_t sizedwords, struct fd_bo *bo)
63 {
64 uint32_t dst_off = regid / 2;
65 /* The blob driver aligns all const uploads dst_off to 64. We've been
66 * successfully aligning to 8 vec4s as const_upload_unit so far with no
67 * ill effects.
68 */
69 assert(dst_off % 16 == 0);
70 uint32_t num_unit = sizedwords / 2;
71 assert(num_unit % 2 == 0);
72
73 emit_const_asserts(ring, v, regid, sizedwords);
74
75 OUT_PKT3(ring, CP_LOAD_STATE, 2);
76 OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(dst_off) |
77 CP_LOAD_STATE_0_STATE_SRC(SS_INDIRECT) |
78 CP_LOAD_STATE_0_STATE_BLOCK(sb[v->type]) |
79 CP_LOAD_STATE_0_NUM_UNIT(num_unit));
80 OUT_RELOC(ring, bo, offset, CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS), 0);
81 }
82
83 static void
fd3_emit_const_ptrs(struct fd_ringbuffer * ring,gl_shader_stage type,uint32_t regid,uint32_t num,struct fd_bo ** bos,uint32_t * offsets)84 fd3_emit_const_ptrs(struct fd_ringbuffer *ring, gl_shader_stage type,
85 uint32_t regid, uint32_t num, struct fd_bo **bos,
86 uint32_t *offsets)
87 {
88 uint32_t anum = align(num, 4);
89 uint32_t i;
90
91 assert((regid % 4) == 0);
92
93 OUT_PKT3(ring, CP_LOAD_STATE, 2 + anum);
94 OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(regid / 2) |
95 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
96 CP_LOAD_STATE_0_STATE_BLOCK(sb[type]) |
97 CP_LOAD_STATE_0_NUM_UNIT(anum / 2));
98 OUT_RING(ring, CP_LOAD_STATE_1_EXT_SRC_ADDR(0) |
99 CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS));
100
101 for (i = 0; i < num; i++) {
102 if (bos[i]) {
103 OUT_RELOC(ring, bos[i], offsets[i], 0, 0);
104 } else {
105 OUT_RING(ring, 0xbad00000 | (i << 16));
106 }
107 }
108
109 for (; i < anum; i++)
110 OUT_RING(ring, 0xffffffff);
111 }
112
113 static bool
is_stateobj(struct fd_ringbuffer * ring)114 is_stateobj(struct fd_ringbuffer *ring)
115 {
116 return false;
117 }
118
119 static void
emit_const_ptrs(struct fd_ringbuffer * ring,const struct ir3_shader_variant * v,uint32_t dst_offset,uint32_t num,struct fd_bo ** bos,uint32_t * offsets)120 emit_const_ptrs(struct fd_ringbuffer *ring, const struct ir3_shader_variant *v,
121 uint32_t dst_offset, uint32_t num, struct fd_bo **bos,
122 uint32_t *offsets)
123 {
124 /* TODO inline this */
125 assert(dst_offset + num <= v->constlen * 4);
126 fd3_emit_const_ptrs(ring, v->type, dst_offset, num, bos, offsets);
127 }
128
129 #define VERT_TEX_OFF 0
130 #define FRAG_TEX_OFF 16
131 #define BASETABLE_SZ A3XX_MAX_MIP_LEVELS
132
133 static void
emit_textures(struct fd_context * ctx,struct fd_ringbuffer * ring,enum adreno_state_block sb,struct fd_texture_stateobj * tex)134 emit_textures(struct fd_context *ctx, struct fd_ringbuffer *ring,
135 enum adreno_state_block sb, struct fd_texture_stateobj *tex)
136 {
137 static const unsigned tex_off[] = {
138 [SB_VERT_TEX] = VERT_TEX_OFF,
139 [SB_FRAG_TEX] = FRAG_TEX_OFF,
140 };
141 static const enum adreno_state_block mipaddr[] = {
142 [SB_VERT_TEX] = SB_VERT_MIPADDR,
143 [SB_FRAG_TEX] = SB_FRAG_MIPADDR,
144 };
145 static const uint32_t bcolor_reg[] = {
146 [SB_VERT_TEX] = REG_A3XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR,
147 [SB_FRAG_TEX] = REG_A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR,
148 };
149 struct fd3_context *fd3_ctx = fd3_context(ctx);
150 bool needs_border = false;
151 unsigned i, j;
152
153 if (tex->num_samplers > 0) {
154 /* output sampler state: */
155 OUT_PKT3(ring, CP_LOAD_STATE, 2 + (2 * tex->num_samplers));
156 OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(tex_off[sb]) |
157 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
158 CP_LOAD_STATE_0_STATE_BLOCK(sb) |
159 CP_LOAD_STATE_0_NUM_UNIT(tex->num_samplers));
160 OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_SHADER) |
161 CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
162 for (i = 0; i < tex->num_samplers; i++) {
163 static const struct fd3_sampler_stateobj dummy_sampler = {};
164 const struct fd3_sampler_stateobj *sampler =
165 tex->samplers[i] ? fd3_sampler_stateobj(tex->samplers[i])
166 : &dummy_sampler;
167
168 OUT_RING(ring, sampler->texsamp0);
169 OUT_RING(ring, sampler->texsamp1);
170
171 needs_border |= sampler->needs_border;
172 }
173 }
174
175 if (tex->num_textures > 0) {
176 /* emit texture state: */
177 OUT_PKT3(ring, CP_LOAD_STATE, 2 + (4 * tex->num_textures));
178 OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(tex_off[sb]) |
179 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
180 CP_LOAD_STATE_0_STATE_BLOCK(sb) |
181 CP_LOAD_STATE_0_NUM_UNIT(tex->num_textures));
182 OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS) |
183 CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
184 for (i = 0; i < tex->num_textures; i++) {
185 static const struct fd3_pipe_sampler_view dummy_view = {};
186 const struct fd3_pipe_sampler_view *view =
187 tex->textures[i] ? fd3_pipe_sampler_view(tex->textures[i])
188 : &dummy_view;
189 OUT_RING(ring, view->texconst0);
190 OUT_RING(ring, view->texconst1);
191 OUT_RING(ring,
192 view->texconst2 | A3XX_TEX_CONST_2_INDX(BASETABLE_SZ * i));
193 OUT_RING(ring, view->texconst3);
194 }
195
196 /* emit mipaddrs: */
197 OUT_PKT3(ring, CP_LOAD_STATE, 2 + (BASETABLE_SZ * tex->num_textures));
198 OUT_RING(ring,
199 CP_LOAD_STATE_0_DST_OFF(BASETABLE_SZ * tex_off[sb]) |
200 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
201 CP_LOAD_STATE_0_STATE_BLOCK(mipaddr[sb]) |
202 CP_LOAD_STATE_0_NUM_UNIT(BASETABLE_SZ * tex->num_textures));
203 OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS) |
204 CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
205 for (i = 0; i < tex->num_textures; i++) {
206 static const struct fd3_pipe_sampler_view dummy_view = {
207 .base.target = PIPE_TEXTURE_1D, /* anything !PIPE_BUFFER */
208 .base.u.tex.first_level = 1,
209 };
210 const struct fd3_pipe_sampler_view *view =
211 tex->textures[i] ? fd3_pipe_sampler_view(tex->textures[i])
212 : &dummy_view;
213 struct fd_resource *rsc = fd_resource(view->base.texture);
214 if (rsc && rsc->b.b.target == PIPE_BUFFER) {
215 OUT_RELOC(ring, rsc->bo, view->base.u.buf.offset, 0, 0);
216 j = 1;
217 } else {
218 unsigned start = fd_sampler_first_level(&view->base);
219 unsigned end = fd_sampler_last_level(&view->base);
220
221 for (j = 0; j < (end - start + 1); j++) {
222 struct fdl_slice *slice = fd_resource_slice(rsc, j + start);
223 OUT_RELOC(ring, rsc->bo, slice->offset, 0, 0);
224 }
225 }
226
227 /* pad the remaining entries w/ null: */
228 for (; j < BASETABLE_SZ; j++) {
229 OUT_RING(ring, 0x00000000);
230 }
231 }
232 }
233
234 if (needs_border) {
235 unsigned off;
236 void *ptr;
237
238 u_upload_alloc(fd3_ctx->border_color_uploader, 0,
239 BORDER_COLOR_UPLOAD_SIZE, BORDER_COLOR_UPLOAD_SIZE, &off,
240 &fd3_ctx->border_color_buf, &ptr);
241
242 fd_setup_border_colors(tex, ptr, tex_off[sb]);
243
244 OUT_PKT0(ring, bcolor_reg[sb], 1);
245 OUT_RELOC(ring, fd_resource(fd3_ctx->border_color_buf)->bo, off, 0, 0);
246
247 u_upload_unmap(fd3_ctx->border_color_uploader);
248 }
249 }
250
251 /* emit texture state for mem->gmem restore operation.. eventually it would
252 * be good to get rid of this and use normal CSO/etc state for more of these
253 * special cases, but for now the compiler is not sufficient..
254 *
255 * Also, for using normal state, not quite sure how to handle the special
256 * case format (fd3_gmem_restore_format()) stuff for restoring depth/stencil.
257 */
258 void
fd3_emit_gmem_restore_tex(struct fd_ringbuffer * ring,struct pipe_surface ** psurf,int bufs)259 fd3_emit_gmem_restore_tex(struct fd_ringbuffer *ring,
260 struct pipe_surface **psurf, int bufs)
261 {
262 int i, j;
263
264 /* output sampler state: */
265 OUT_PKT3(ring, CP_LOAD_STATE, 2 + 2 * bufs);
266 OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(FRAG_TEX_OFF) |
267 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
268 CP_LOAD_STATE_0_STATE_BLOCK(SB_FRAG_TEX) |
269 CP_LOAD_STATE_0_NUM_UNIT(bufs));
270 OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_SHADER) |
271 CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
272 for (i = 0; i < bufs; i++) {
273 OUT_RING(ring, A3XX_TEX_SAMP_0_XY_MAG(A3XX_TEX_NEAREST) |
274 A3XX_TEX_SAMP_0_XY_MIN(A3XX_TEX_NEAREST) |
275 A3XX_TEX_SAMP_0_WRAP_S(A3XX_TEX_CLAMP_TO_EDGE) |
276 A3XX_TEX_SAMP_0_WRAP_T(A3XX_TEX_CLAMP_TO_EDGE) |
277 A3XX_TEX_SAMP_0_WRAP_R(A3XX_TEX_REPEAT));
278 OUT_RING(ring, 0x00000000);
279 }
280
281 /* emit texture state: */
282 OUT_PKT3(ring, CP_LOAD_STATE, 2 + 4 * bufs);
283 OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(FRAG_TEX_OFF) |
284 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
285 CP_LOAD_STATE_0_STATE_BLOCK(SB_FRAG_TEX) |
286 CP_LOAD_STATE_0_NUM_UNIT(bufs));
287 OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS) |
288 CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
289 for (i = 0; i < bufs; i++) {
290 if (!psurf[i]) {
291 OUT_RING(ring, A3XX_TEX_CONST_0_TYPE(A3XX_TEX_2D) |
292 A3XX_TEX_CONST_0_SWIZ_X(A3XX_TEX_ONE) |
293 A3XX_TEX_CONST_0_SWIZ_Y(A3XX_TEX_ONE) |
294 A3XX_TEX_CONST_0_SWIZ_Z(A3XX_TEX_ONE) |
295 A3XX_TEX_CONST_0_SWIZ_W(A3XX_TEX_ONE));
296 OUT_RING(ring, 0x00000000);
297 OUT_RING(ring, A3XX_TEX_CONST_2_INDX(BASETABLE_SZ * i));
298 OUT_RING(ring, 0x00000000);
299 continue;
300 }
301
302 struct fd_resource *rsc = fd_resource(psurf[i]->texture);
303 enum pipe_format format = fd_gmem_restore_format(psurf[i]->format);
304 /* The restore blit_zs shader expects stencil in sampler 0, and depth
305 * in sampler 1
306 */
307 if (rsc->stencil && i == 0) {
308 rsc = rsc->stencil;
309 format = fd_gmem_restore_format(rsc->b.b.format);
310 }
311
312 /* note: PIPE_BUFFER disallowed for surfaces */
313 unsigned lvl = psurf[i]->u.tex.level;
314
315 assert(psurf[i]->u.tex.first_layer == psurf[i]->u.tex.last_layer);
316
317 OUT_RING(ring, A3XX_TEX_CONST_0_TILE_MODE(rsc->layout.tile_mode) |
318 A3XX_TEX_CONST_0_FMT(fd3_pipe2tex(format)) |
319 A3XX_TEX_CONST_0_TYPE(A3XX_TEX_2D) |
320 fd3_tex_swiz(format, PIPE_SWIZZLE_X, PIPE_SWIZZLE_Y,
321 PIPE_SWIZZLE_Z, PIPE_SWIZZLE_W));
322 OUT_RING(ring, A3XX_TEX_CONST_1_WIDTH(psurf[i]->width) |
323 A3XX_TEX_CONST_1_HEIGHT(psurf[i]->height));
324 OUT_RING(ring, A3XX_TEX_CONST_2_PITCH(fd_resource_pitch(rsc, lvl)) |
325 A3XX_TEX_CONST_2_INDX(BASETABLE_SZ * i));
326 OUT_RING(ring, 0x00000000);
327 }
328
329 /* emit mipaddrs: */
330 OUT_PKT3(ring, CP_LOAD_STATE, 2 + BASETABLE_SZ * bufs);
331 OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(BASETABLE_SZ * FRAG_TEX_OFF) |
332 CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
333 CP_LOAD_STATE_0_STATE_BLOCK(SB_FRAG_MIPADDR) |
334 CP_LOAD_STATE_0_NUM_UNIT(BASETABLE_SZ * bufs));
335 OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS) |
336 CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
337 for (i = 0; i < bufs; i++) {
338 if (psurf[i]) {
339 struct fd_resource *rsc = fd_resource(psurf[i]->texture);
340 /* Matches above logic for blit_zs shader */
341 if (rsc->stencil && i == 0)
342 rsc = rsc->stencil;
343 unsigned lvl = psurf[i]->u.tex.level;
344 uint32_t offset =
345 fd_resource_offset(rsc, lvl, psurf[i]->u.tex.first_layer);
346 OUT_RELOC(ring, rsc->bo, offset, 0, 0);
347 } else {
348 OUT_RING(ring, 0x00000000);
349 }
350
351 /* pad the remaining entries w/ null: */
352 for (j = 1; j < BASETABLE_SZ; j++) {
353 OUT_RING(ring, 0x00000000);
354 }
355 }
356 }
357
358 void
fd3_emit_vertex_bufs(struct fd_ringbuffer * ring,struct fd3_emit * emit)359 fd3_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd3_emit *emit)
360 {
361 int32_t i, j, last = -1;
362 uint32_t total_in = 0;
363 const struct fd_vertex_state *vtx = emit->vtx;
364 const struct ir3_shader_variant *vp = fd3_emit_get_vp(emit);
365 unsigned vertex_regid = regid(63, 0);
366 unsigned instance_regid = regid(63, 0);
367 unsigned vtxcnt_regid = regid(63, 0);
368
369 /* Note that sysvals come *after* normal inputs: */
370 for (i = 0; i < vp->inputs_count; i++) {
371 if (!vp->inputs[i].compmask)
372 continue;
373 if (vp->inputs[i].sysval) {
374 switch (vp->inputs[i].slot) {
375 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE:
376 vertex_regid = vp->inputs[i].regid;
377 break;
378 case SYSTEM_VALUE_INSTANCE_ID:
379 instance_regid = vp->inputs[i].regid;
380 break;
381 case SYSTEM_VALUE_VERTEX_CNT:
382 vtxcnt_regid = vp->inputs[i].regid;
383 break;
384 default:
385 unreachable("invalid system value");
386 break;
387 }
388 } else if (i < vtx->vtx->num_elements) {
389 last = i;
390 }
391 }
392
393 for (i = 0, j = 0; i <= last; i++) {
394 assert(!vp->inputs[i].sysval);
395 if (vp->inputs[i].compmask) {
396 struct pipe_vertex_element *elem = &vtx->vtx->pipe[i];
397 const struct pipe_vertex_buffer *vb =
398 &vtx->vertexbuf.vb[elem->vertex_buffer_index];
399 struct fd_resource *rsc = fd_resource(vb->buffer.resource);
400 enum pipe_format pfmt = elem->src_format;
401 enum a3xx_vtx_fmt fmt = fd3_pipe2vtx(pfmt);
402 bool switchnext = (i != last) || (vertex_regid != regid(63, 0)) ||
403 (instance_regid != regid(63, 0)) ||
404 (vtxcnt_regid != regid(63, 0));
405 bool isint = util_format_is_pure_integer(pfmt);
406 uint32_t off = vb->buffer_offset + elem->src_offset;
407 uint32_t fs = util_format_get_blocksize(pfmt);
408
409 assert(fmt != VFMT_NONE);
410
411 OUT_PKT0(ring, REG_A3XX_VFD_FETCH(j), 2);
412 OUT_RING(ring, A3XX_VFD_FETCH_INSTR_0_FETCHSIZE(fs - 1) |
413 A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE(elem->src_stride) |
414 COND(switchnext, A3XX_VFD_FETCH_INSTR_0_SWITCHNEXT) |
415 A3XX_VFD_FETCH_INSTR_0_INDEXCODE(j) |
416 COND(elem->instance_divisor,
417 A3XX_VFD_FETCH_INSTR_0_INSTANCED) |
418 A3XX_VFD_FETCH_INSTR_0_STEPRATE(
419 MAX2(1, elem->instance_divisor)));
420 OUT_RELOC(ring, rsc->bo, off, 0, 0);
421
422 OUT_PKT0(ring, REG_A3XX_VFD_DECODE_INSTR(j), 1);
423 OUT_RING(ring,
424 A3XX_VFD_DECODE_INSTR_CONSTFILL |
425 A3XX_VFD_DECODE_INSTR_WRITEMASK(vp->inputs[i].compmask) |
426 A3XX_VFD_DECODE_INSTR_FORMAT(fmt) |
427 A3XX_VFD_DECODE_INSTR_SWAP(fd3_pipe2swap(pfmt)) |
428 A3XX_VFD_DECODE_INSTR_REGID(vp->inputs[i].regid) |
429 A3XX_VFD_DECODE_INSTR_SHIFTCNT(fs) |
430 A3XX_VFD_DECODE_INSTR_LASTCOMPVALID |
431 COND(isint, A3XX_VFD_DECODE_INSTR_INT) |
432 COND(switchnext, A3XX_VFD_DECODE_INSTR_SWITCHNEXT));
433
434 total_in += util_bitcount(vp->inputs[i].compmask);
435 j++;
436 }
437 }
438
439 /* hw doesn't like to be configured for zero vbo's, it seems: */
440 if (last < 0) {
441 /* just recycle the shader bo, we just need to point to *something*
442 * valid:
443 */
444 struct fd_bo *dummy_vbo = vp->bo;
445 bool switchnext = (vertex_regid != regid(63, 0)) ||
446 (instance_regid != regid(63, 0)) ||
447 (vtxcnt_regid != regid(63, 0));
448
449 OUT_PKT0(ring, REG_A3XX_VFD_FETCH(0), 2);
450 OUT_RING(ring, A3XX_VFD_FETCH_INSTR_0_FETCHSIZE(0) |
451 A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE(0) |
452 COND(switchnext, A3XX_VFD_FETCH_INSTR_0_SWITCHNEXT) |
453 A3XX_VFD_FETCH_INSTR_0_INDEXCODE(0) |
454 A3XX_VFD_FETCH_INSTR_0_STEPRATE(1));
455 OUT_RELOC(ring, dummy_vbo, 0, 0, 0);
456
457 OUT_PKT0(ring, REG_A3XX_VFD_DECODE_INSTR(0), 1);
458 OUT_RING(ring, A3XX_VFD_DECODE_INSTR_CONSTFILL |
459 A3XX_VFD_DECODE_INSTR_WRITEMASK(0x1) |
460 A3XX_VFD_DECODE_INSTR_FORMAT(VFMT_8_UNORM) |
461 A3XX_VFD_DECODE_INSTR_SWAP(XYZW) |
462 A3XX_VFD_DECODE_INSTR_REGID(regid(0, 0)) |
463 A3XX_VFD_DECODE_INSTR_SHIFTCNT(1) |
464 A3XX_VFD_DECODE_INSTR_LASTCOMPVALID |
465 COND(switchnext, A3XX_VFD_DECODE_INSTR_SWITCHNEXT));
466
467 total_in = 1;
468 j = 1;
469 }
470
471 OUT_PKT0(ring, REG_A3XX_VFD_CONTROL_0, 2);
472 OUT_RING(ring, A3XX_VFD_CONTROL_0_TOTALATTRTOVS(total_in) |
473 A3XX_VFD_CONTROL_0_PACKETSIZE(2) |
474 A3XX_VFD_CONTROL_0_STRMDECINSTRCNT(j) |
475 A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(j));
476 OUT_RING(ring, A3XX_VFD_CONTROL_1_MAXSTORAGE(1) | // XXX
477 A3XX_VFD_CONTROL_1_REGID4VTX(vertex_regid) |
478 A3XX_VFD_CONTROL_1_REGID4INST(instance_regid));
479
480 OUT_PKT0(ring, REG_A3XX_VFD_VS_THREADING_THRESHOLD, 1);
481 OUT_RING(ring,
482 A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD(15) |
483 A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT(vtxcnt_regid));
484 }
485
486 void
fd3_emit_state(struct fd_context * ctx,struct fd_ringbuffer * ring,struct fd3_emit * emit)487 fd3_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
488 struct fd3_emit *emit)
489 {
490 const struct ir3_shader_variant *vp = fd3_emit_get_vp(emit);
491 const struct ir3_shader_variant *fp = fd3_emit_get_fp(emit);
492 const enum fd_dirty_3d_state dirty = emit->dirty;
493
494 emit_marker(ring, 5);
495
496 if (dirty & FD_DIRTY_SAMPLE_MASK) {
497 OUT_PKT0(ring, REG_A3XX_RB_MSAA_CONTROL, 1);
498 OUT_RING(ring, A3XX_RB_MSAA_CONTROL_DISABLE |
499 A3XX_RB_MSAA_CONTROL_SAMPLES(MSAA_ONE) |
500 A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(ctx->sample_mask));
501 }
502
503 if ((dirty & (FD_DIRTY_ZSA | FD_DIRTY_RASTERIZER | FD_DIRTY_PROG |
504 FD_DIRTY_BLEND_DUAL)) &&
505 !emit->binning_pass) {
506 uint32_t val = fd3_zsa_stateobj(ctx->zsa)->rb_render_control |
507 fd3_blend_stateobj(ctx->blend)->rb_render_control;
508
509 val |= COND(fp->frag_face, A3XX_RB_RENDER_CONTROL_FACENESS);
510 val |= COND(fp->fragcoord_compmask != 0,
511 A3XX_RB_RENDER_CONTROL_COORD_MASK(fp->fragcoord_compmask));
512 val |= COND(ctx->rasterizer->rasterizer_discard,
513 A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE);
514
515 /* I suppose if we needed to (which I don't *think* we need
516 * to), we could emit this for binning pass too. But we
517 * would need to keep a different patch-list for binning
518 * vs render pass.
519 */
520
521 OUT_PKT0(ring, REG_A3XX_RB_RENDER_CONTROL, 1);
522 OUT_RINGP(ring, val, &ctx->batch->rbrc_patches);
523 }
524
525 if (dirty & (FD_DIRTY_ZSA | FD_DIRTY_STENCIL_REF)) {
526 struct fd3_zsa_stateobj *zsa = fd3_zsa_stateobj(ctx->zsa);
527 struct pipe_stencil_ref *sr = &ctx->stencil_ref;
528
529 OUT_PKT0(ring, REG_A3XX_RB_ALPHA_REF, 1);
530 OUT_RING(ring, zsa->rb_alpha_ref);
531
532 OUT_PKT0(ring, REG_A3XX_RB_STENCIL_CONTROL, 1);
533 OUT_RING(ring, zsa->rb_stencil_control);
534
535 OUT_PKT0(ring, REG_A3XX_RB_STENCILREFMASK, 2);
536 OUT_RING(ring, zsa->rb_stencilrefmask |
537 A3XX_RB_STENCILREFMASK_STENCILREF(sr->ref_value[0]));
538 OUT_RING(ring, zsa->rb_stencilrefmask_bf |
539 A3XX_RB_STENCILREFMASK_BF_STENCILREF(sr->ref_value[1]));
540 }
541
542 if (dirty & (FD_DIRTY_ZSA | FD_DIRTY_RASTERIZER | FD_DIRTY_PROG)) {
543 uint32_t val = fd3_zsa_stateobj(ctx->zsa)->rb_depth_control;
544 if (fp->writes_pos) {
545 val |= A3XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z;
546 val |= A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE;
547 }
548 if (fp->no_earlyz || fp->has_kill) {
549 val |= A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE;
550 }
551 if (!ctx->rasterizer->depth_clip_near) {
552 val |= A3XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE;
553 }
554 OUT_PKT0(ring, REG_A3XX_RB_DEPTH_CONTROL, 1);
555 OUT_RING(ring, val);
556 }
557
558 if (dirty & FD_DIRTY_RASTERIZER) {
559 struct fd3_rasterizer_stateobj *rasterizer =
560 fd3_rasterizer_stateobj(ctx->rasterizer);
561
562 OUT_PKT0(ring, REG_A3XX_GRAS_SU_MODE_CONTROL, 1);
563 OUT_RING(ring, rasterizer->gras_su_mode_control);
564
565 OUT_PKT0(ring, REG_A3XX_GRAS_SU_POINT_MINMAX, 2);
566 OUT_RING(ring, rasterizer->gras_su_point_minmax);
567 OUT_RING(ring, rasterizer->gras_su_point_size);
568
569 OUT_PKT0(ring, REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE, 2);
570 OUT_RING(ring, rasterizer->gras_su_poly_offset_scale);
571 OUT_RING(ring, rasterizer->gras_su_poly_offset_offset);
572 }
573
574 if (dirty & (FD_DIRTY_RASTERIZER | FD_DIRTY_PROG)) {
575 uint32_t val =
576 fd3_rasterizer_stateobj(ctx->rasterizer)->gras_cl_clip_cntl;
577 uint8_t planes = ctx->rasterizer->clip_plane_enable;
578 val |= CONDREG(
579 ir3_find_sysval_regid(fp, SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL),
580 A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER);
581 val |= CONDREG(
582 ir3_find_sysval_regid(fp, SYSTEM_VALUE_BARYCENTRIC_LINEAR_PIXEL),
583 A3XX_GRAS_CL_CLIP_CNTL_IJ_NON_PERSP_CENTER);
584 val |= CONDREG(
585 ir3_find_sysval_regid(fp, SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID),
586 A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTROID);
587 val |= CONDREG(
588 ir3_find_sysval_regid(fp, SYSTEM_VALUE_BARYCENTRIC_LINEAR_CENTROID),
589 A3XX_GRAS_CL_CLIP_CNTL_IJ_NON_PERSP_CENTROID);
590 /* docs say enable at least one of IJ_PERSP_CENTER/CENTROID when fragcoord
591 * is used */
592 val |= CONDREG(ir3_find_sysval_regid(fp, SYSTEM_VALUE_FRAG_COORD),
593 A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER);
594 val |= COND(fp->writes_pos, A3XX_GRAS_CL_CLIP_CNTL_ZCLIP_DISABLE);
595 val |=
596 COND(fp->fragcoord_compmask != 0,
597 A3XX_GRAS_CL_CLIP_CNTL_ZCOORD | A3XX_GRAS_CL_CLIP_CNTL_WCOORD);
598 if (!emit->key.key.ucp_enables)
599 val |= A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES(
600 MIN2(util_bitcount(planes), 6));
601 OUT_PKT0(ring, REG_A3XX_GRAS_CL_CLIP_CNTL, 1);
602 OUT_RING(ring, val);
603 }
604
605 if (dirty & (FD_DIRTY_RASTERIZER | FD_DIRTY_PROG | FD_DIRTY_UCP)) {
606 uint32_t planes = ctx->rasterizer->clip_plane_enable;
607 int count = 0;
608
609 if (emit->key.key.ucp_enables)
610 planes = 0;
611
612 while (planes && count < 6) {
613 int i = ffs(planes) - 1;
614
615 planes &= ~(1U << i);
616 fd_wfi(ctx->batch, ring);
617 OUT_PKT0(ring, REG_A3XX_GRAS_CL_USER_PLANE(count++), 4);
618 OUT_RING(ring, fui(ctx->ucp.ucp[i][0]));
619 OUT_RING(ring, fui(ctx->ucp.ucp[i][1]));
620 OUT_RING(ring, fui(ctx->ucp.ucp[i][2]));
621 OUT_RING(ring, fui(ctx->ucp.ucp[i][3]));
622 }
623 }
624
625 /* NOTE: since primitive_restart is not actually part of any
626 * state object, we need to make sure that we always emit
627 * PRIM_VTX_CNTL.. either that or be more clever and detect
628 * when it changes.
629 */
630 if (emit->info) {
631 const struct pipe_draw_info *info = emit->info;
632 uint32_t val = fd3_rasterizer_stateobj(ctx->rasterizer)->pc_prim_vtx_cntl;
633
634 if (!emit->binning_pass) {
635 uint32_t stride_in_vpc = align(fp->total_in, 4) / 4;
636 if (stride_in_vpc > 0)
637 stride_in_vpc = MAX2(stride_in_vpc, 2);
638 val |= A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC(stride_in_vpc);
639 }
640
641 if (info->index_size && info->primitive_restart) {
642 val |= A3XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART;
643 }
644
645 val |= COND(vp->writes_psize, A3XX_PC_PRIM_VTX_CNTL_PSIZE);
646
647 OUT_PKT0(ring, REG_A3XX_PC_PRIM_VTX_CNTL, 1);
648 OUT_RING(ring, val);
649 }
650
651 if (dirty & (FD_DIRTY_SCISSOR | FD_DIRTY_RASTERIZER | FD_DIRTY_VIEWPORT)) {
652 struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx);
653 int minx = scissor->minx;
654 int miny = scissor->miny;
655 int maxx = scissor->maxx;
656 int maxy = scissor->maxy;
657
658 /* Unfortunately there is no separate depth clip disable, only an all
659 * or nothing deal. So when we disable clipping, we must handle the
660 * viewport clip via scissors.
661 */
662 if (!ctx->rasterizer->depth_clip_near) {
663 struct pipe_viewport_state *vp = &ctx->viewport[0];
664
665 minx = MAX2(minx, (int)floorf(vp->translate[0] - fabsf(vp->scale[0])));
666 miny = MAX2(miny, (int)floorf(vp->translate[1] - fabsf(vp->scale[1])));
667 maxx = MIN2(maxx + 1, (int)ceilf(vp->translate[0] + fabsf(vp->scale[0]))) - 1;
668 maxy = MIN2(maxy + 1, (int)ceilf(vp->translate[1] + fabsf(vp->scale[1]))) - 1;
669 }
670
671 OUT_PKT0(ring, REG_A3XX_GRAS_SC_WINDOW_SCISSOR_TL, 2);
672 OUT_RING(ring, A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X(minx) |
673 A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(miny));
674 OUT_RING(ring, A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X(maxx) |
675 A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(maxy));
676
677 ctx->batch->max_scissor.minx = MIN2(ctx->batch->max_scissor.minx, minx);
678 ctx->batch->max_scissor.miny = MIN2(ctx->batch->max_scissor.miny, miny);
679 ctx->batch->max_scissor.maxx = MAX2(ctx->batch->max_scissor.maxx, maxx);
680 ctx->batch->max_scissor.maxy = MAX2(ctx->batch->max_scissor.maxy, maxy);
681 }
682
683 if (dirty & FD_DIRTY_VIEWPORT) {
684 struct pipe_viewport_state *vp = &ctx->viewport[0];
685
686 fd_wfi(ctx->batch, ring);
687
688 OUT_PKT0(ring, REG_A3XX_GRAS_CL_VPORT_XOFFSET, 6);
689 OUT_RING(ring,
690 A3XX_GRAS_CL_VPORT_XOFFSET(vp->translate[0] - 0.5f));
691 OUT_RING(ring, A3XX_GRAS_CL_VPORT_XSCALE(vp->scale[0]));
692 OUT_RING(ring,
693 A3XX_GRAS_CL_VPORT_YOFFSET(vp->translate[1] - 0.5f));
694 OUT_RING(ring, A3XX_GRAS_CL_VPORT_YSCALE(vp->scale[1]));
695 OUT_RING(ring, A3XX_GRAS_CL_VPORT_ZOFFSET(vp->translate[2]));
696 OUT_RING(ring, A3XX_GRAS_CL_VPORT_ZSCALE(vp->scale[2]));
697 }
698
699 if (dirty &
700 (FD_DIRTY_VIEWPORT | FD_DIRTY_RASTERIZER | FD_DIRTY_FRAMEBUFFER)) {
701 float zmin, zmax;
702 int depth = 24;
703 if (ctx->batch->framebuffer.zsbuf) {
704 depth = util_format_get_component_bits(
705 pipe_surface_format(ctx->batch->framebuffer.zsbuf),
706 UTIL_FORMAT_COLORSPACE_ZS, 0);
707 }
708 util_viewport_zmin_zmax(&ctx->viewport[0], ctx->rasterizer->clip_halfz,
709 &zmin, &zmax);
710
711 OUT_PKT0(ring, REG_A3XX_RB_Z_CLAMP_MIN, 2);
712 if (depth == 32) {
713 OUT_RING(ring, (uint32_t)(zmin * (float)0xffffffff));
714 OUT_RING(ring, (uint32_t)(zmax * (float)0xffffffff));
715 } else if (depth == 16) {
716 OUT_RING(ring, (uint32_t)(zmin * 0xffff));
717 OUT_RING(ring, (uint32_t)(zmax * 0xffff));
718 } else {
719 OUT_RING(ring, (uint32_t)(zmin * 0xffffff));
720 OUT_RING(ring, (uint32_t)(zmax * 0xffffff));
721 }
722 }
723
724 if (dirty & (FD_DIRTY_PROG | FD_DIRTY_FRAMEBUFFER | FD_DIRTY_BLEND_DUAL)) {
725 struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
726 int nr_cbufs = pfb->nr_cbufs;
727 if (fd3_blend_stateobj(ctx->blend)->rb_render_control &
728 A3XX_RB_RENDER_CONTROL_DUAL_COLOR_IN_ENABLE)
729 nr_cbufs++;
730 fd3_program_emit(ring, emit, nr_cbufs, pfb->cbufs);
731 }
732
733 /* TODO we should not need this or fd_wfi() before emit_constants():
734 */
735 OUT_PKT3(ring, CP_EVENT_WRITE, 1);
736 OUT_RING(ring, HLSQ_FLUSH);
737
738 if (!emit->skip_consts) {
739 ir3_emit_vs_consts(vp, ring, ctx, emit->info, emit->indirect, emit->draw);
740 if (!emit->binning_pass)
741 ir3_emit_fs_consts(fp, ring, ctx);
742 }
743
744 if (dirty & (FD_DIRTY_BLEND | FD_DIRTY_FRAMEBUFFER)) {
745 struct fd3_blend_stateobj *blend = fd3_blend_stateobj(ctx->blend);
746 uint32_t i;
747
748 for (i = 0; i < ARRAY_SIZE(blend->rb_mrt); i++) {
749 enum pipe_format format =
750 pipe_surface_format(ctx->batch->framebuffer.cbufs[i]);
751 const struct util_format_description *desc =
752 util_format_description(format);
753 bool is_float = util_format_is_float(format);
754 bool is_int = util_format_is_pure_integer(format);
755 bool has_alpha = util_format_has_alpha(format);
756 uint32_t control = blend->rb_mrt[i].control;
757
758 if (is_int) {
759 control &= (A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK |
760 A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK);
761 control |= A3XX_RB_MRT_CONTROL_ROP_CODE(ROP_COPY);
762 }
763
764 if (format == PIPE_FORMAT_NONE)
765 control &= ~A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
766
767 if (!has_alpha) {
768 control &= ~A3XX_RB_MRT_CONTROL_BLEND2;
769 }
770
771 if (format && util_format_get_component_bits(
772 format, UTIL_FORMAT_COLORSPACE_RGB, 0) < 8) {
773 const struct pipe_rt_blend_state *rt;
774 if (ctx->blend->independent_blend_enable)
775 rt = &ctx->blend->rt[i];
776 else
777 rt = &ctx->blend->rt[0];
778
779 if (!util_format_colormask_full(desc, rt->colormask))
780 control |= A3XX_RB_MRT_CONTROL_READ_DEST_ENABLE;
781 }
782
783 OUT_PKT0(ring, REG_A3XX_RB_MRT_CONTROL(i), 1);
784 OUT_RING(ring, control);
785
786 OUT_PKT0(ring, REG_A3XX_RB_MRT_BLEND_CONTROL(i), 1);
787 OUT_RING(ring,
788 blend->rb_mrt[i].blend_control |
789 COND(!is_float, A3XX_RB_MRT_BLEND_CONTROL_CLAMP_ENABLE));
790 }
791 }
792
793 if (dirty & FD_DIRTY_BLEND_COLOR) {
794 struct pipe_blend_color *bcolor = &ctx->blend_color;
795 OUT_PKT0(ring, REG_A3XX_RB_BLEND_RED, 4);
796 OUT_RING(ring, A3XX_RB_BLEND_RED_UINT(CLAMP(bcolor->color[0], 0.f, 1.f) * 0xff) |
797 A3XX_RB_BLEND_RED_FLOAT(bcolor->color[0]));
798 OUT_RING(ring, A3XX_RB_BLEND_GREEN_UINT(CLAMP(bcolor->color[1], 0.f, 1.f) * 0xff) |
799 A3XX_RB_BLEND_GREEN_FLOAT(bcolor->color[1]));
800 OUT_RING(ring, A3XX_RB_BLEND_BLUE_UINT(CLAMP(bcolor->color[2], 0.f, 1.f) * 0xff) |
801 A3XX_RB_BLEND_BLUE_FLOAT(bcolor->color[2]));
802 OUT_RING(ring, A3XX_RB_BLEND_ALPHA_UINT(CLAMP(bcolor->color[3], 0.f, 1.f) * 0xff) |
803 A3XX_RB_BLEND_ALPHA_FLOAT(bcolor->color[3]));
804 }
805
806 if (dirty & FD_DIRTY_TEX)
807 fd_wfi(ctx->batch, ring);
808
809 if (ctx->dirty_shader[PIPE_SHADER_VERTEX] & FD_DIRTY_SHADER_TEX)
810 emit_textures(ctx, ring, SB_VERT_TEX, &ctx->tex[PIPE_SHADER_VERTEX]);
811
812 if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_TEX)
813 emit_textures(ctx, ring, SB_FRAG_TEX, &ctx->tex[PIPE_SHADER_FRAGMENT]);
814 }
815
816 /* emit setup at begin of new cmdstream buffer (don't rely on previous
817 * state, there could have been a context switch between ioctls):
818 */
819 void
fd3_emit_restore(struct fd_batch * batch,struct fd_ringbuffer * ring)820 fd3_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring)
821 {
822 struct fd_context *ctx = batch->ctx;
823 struct fd3_context *fd3_ctx = fd3_context(ctx);
824 int i;
825
826 if (ctx->screen->gpu_id == 320) {
827 OUT_PKT3(ring, CP_REG_RMW, 3);
828 OUT_RING(ring, REG_A3XX_RBBM_CLOCK_CTL);
829 OUT_RING(ring, 0xfffcffff);
830 OUT_RING(ring, 0x00000000);
831 }
832
833 fd_wfi(batch, ring);
834 OUT_PKT3(ring, CP_INVALIDATE_STATE, 1);
835 OUT_RING(ring, 0x00007fff);
836
837 OUT_PKT0(ring, REG_A3XX_SP_VS_PVT_MEM_PARAM_REG, 3);
838 OUT_RING(ring, 0x08000001); /* SP_VS_PVT_MEM_CTRL_REG */
839 OUT_RELOC(ring, fd3_ctx->vs_pvt_mem, 0, 0, 0); /* SP_VS_PVT_MEM_ADDR_REG */
840 OUT_RING(ring, 0x00000000); /* SP_VS_PVT_MEM_SIZE_REG */
841
842 OUT_PKT0(ring, REG_A3XX_SP_FS_PVT_MEM_PARAM_REG, 3);
843 OUT_RING(ring, 0x08000001); /* SP_FS_PVT_MEM_CTRL_REG */
844 OUT_RELOC(ring, fd3_ctx->fs_pvt_mem, 0, 0, 0); /* SP_FS_PVT_MEM_ADDR_REG */
845 OUT_RING(ring, 0x00000000); /* SP_FS_PVT_MEM_SIZE_REG */
846
847 OUT_PKT0(ring, REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL, 1);
848 OUT_RING(ring, 0x0000000b); /* PC_VERTEX_REUSE_BLOCK_CNTL */
849
850 OUT_PKT0(ring, REG_A3XX_GRAS_SC_CONTROL, 1);
851 OUT_RING(ring, A3XX_GRAS_SC_CONTROL_RENDER_MODE(RB_RENDERING_PASS) |
852 A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES(MSAA_ONE) |
853 A3XX_GRAS_SC_CONTROL_RASTER_MODE(0));
854
855 OUT_PKT0(ring, REG_A3XX_RB_MSAA_CONTROL, 2);
856 OUT_RING(ring, A3XX_RB_MSAA_CONTROL_DISABLE |
857 A3XX_RB_MSAA_CONTROL_SAMPLES(MSAA_ONE) |
858 A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(0xffff));
859 OUT_RING(ring, 0x00000000); /* RB_ALPHA_REF */
860
861 OUT_PKT0(ring, REG_A3XX_GRAS_CL_GB_CLIP_ADJ, 1);
862 OUT_RING(ring, A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ(0) |
863 A3XX_GRAS_CL_GB_CLIP_ADJ_VERT(0));
864
865 OUT_PKT0(ring, REG_A3XX_GRAS_TSE_DEBUG_ECO, 1);
866 OUT_RING(ring, 0x00000001); /* GRAS_TSE_DEBUG_ECO */
867
868 OUT_PKT0(ring, REG_A3XX_TPL1_TP_VS_TEX_OFFSET, 1);
869 OUT_RING(ring, A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET(VERT_TEX_OFF) |
870 A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET(VERT_TEX_OFF) |
871 A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR(BASETABLE_SZ *
872 VERT_TEX_OFF));
873
874 OUT_PKT0(ring, REG_A3XX_TPL1_TP_FS_TEX_OFFSET, 1);
875 OUT_RING(ring, A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET(FRAG_TEX_OFF) |
876 A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET(FRAG_TEX_OFF) |
877 A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR(BASETABLE_SZ *
878 FRAG_TEX_OFF));
879
880 OUT_PKT0(ring, REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_0, 2);
881 OUT_RING(ring, 0x00000000); /* VPC_VARY_CYLWRAP_ENABLE_0 */
882 OUT_RING(ring, 0x00000000); /* VPC_VARY_CYLWRAP_ENABLE_1 */
883
884 OUT_PKT0(ring, REG_A3XX_UNKNOWN_0E43, 1);
885 OUT_RING(ring, 0x00000001); /* UNKNOWN_0E43 */
886
887 OUT_PKT0(ring, REG_A3XX_UNKNOWN_0F03, 1);
888 OUT_RING(ring, 0x00000001); /* UNKNOWN_0F03 */
889
890 OUT_PKT0(ring, REG_A3XX_UNKNOWN_0EE0, 1);
891 OUT_RING(ring, 0x00000003); /* UNKNOWN_0EE0 */
892
893 OUT_PKT0(ring, REG_A3XX_UNKNOWN_0C3D, 1);
894 OUT_RING(ring, 0x00000001); /* UNKNOWN_0C3D */
895
896 OUT_PKT0(ring, REG_A3XX_HLSQ_PERFCOUNTER0_SELECT, 1);
897 OUT_RING(ring, 0x00000000); /* HLSQ_PERFCOUNTER0_SELECT */
898
899 OUT_PKT0(ring, REG_A3XX_HLSQ_CONST_VSPRESV_RANGE_REG, 2);
900 OUT_RING(ring, A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY(0) |
901 A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(0));
902 OUT_RING(ring, A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(0) |
903 A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(0));
904
905 fd3_emit_cache_flush(batch, ring);
906
907 OUT_PKT0(ring, REG_A3XX_GRAS_CL_CLIP_CNTL, 1);
908 OUT_RING(ring, 0x00000000); /* GRAS_CL_CLIP_CNTL */
909
910 OUT_PKT0(ring, REG_A3XX_GRAS_SU_POINT_MINMAX, 2);
911 OUT_RING(ring, 0xffc00010); /* GRAS_SU_POINT_MINMAX */
912 OUT_RING(ring, 0x00000008); /* GRAS_SU_POINT_SIZE */
913
914 OUT_PKT0(ring, REG_A3XX_PC_RESTART_INDEX, 1);
915 OUT_RING(ring, 0xffffffff); /* PC_RESTART_INDEX */
916
917 OUT_PKT0(ring, REG_A3XX_RB_WINDOW_OFFSET, 1);
918 OUT_RING(ring, A3XX_RB_WINDOW_OFFSET_X(0) | A3XX_RB_WINDOW_OFFSET_Y(0));
919
920 OUT_PKT0(ring, REG_A3XX_RB_BLEND_RED, 4);
921 OUT_RING(ring, A3XX_RB_BLEND_RED_UINT(0) | A3XX_RB_BLEND_RED_FLOAT(0.0f));
922 OUT_RING(ring, A3XX_RB_BLEND_GREEN_UINT(0) | A3XX_RB_BLEND_GREEN_FLOAT(0.0f));
923 OUT_RING(ring, A3XX_RB_BLEND_BLUE_UINT(0) | A3XX_RB_BLEND_BLUE_FLOAT(0.0f));
924 OUT_RING(ring,
925 A3XX_RB_BLEND_ALPHA_UINT(0xff) | A3XX_RB_BLEND_ALPHA_FLOAT(1.0f));
926
927 for (i = 0; i < 6; i++) {
928 OUT_PKT0(ring, REG_A3XX_GRAS_CL_USER_PLANE(i), 4);
929 OUT_RING(ring, 0x00000000); /* GRAS_CL_USER_PLANE[i].X */
930 OUT_RING(ring, 0x00000000); /* GRAS_CL_USER_PLANE[i].Y */
931 OUT_RING(ring, 0x00000000); /* GRAS_CL_USER_PLANE[i].Z */
932 OUT_RING(ring, 0x00000000); /* GRAS_CL_USER_PLANE[i].W */
933 }
934
935 OUT_PKT0(ring, REG_A3XX_PC_VSTREAM_CONTROL, 1);
936 OUT_RING(ring, 0x00000000);
937
938 fd_event_write(batch, ring, CACHE_FLUSH);
939
940 if (is_a3xx_p0(ctx->screen)) {
941 OUT_PKT3(ring, CP_DRAW_INDX, 3);
942 OUT_RING(ring, 0x00000000);
943 OUT_RING(ring, DRAW(1, DI_SRC_SEL_AUTO_INDEX, INDEX_SIZE_IGN,
944 IGNORE_VISIBILITY, 0));
945 OUT_RING(ring, 0); /* NumIndices */
946 }
947
948 OUT_PKT3(ring, CP_NOP, 4);
949 OUT_RING(ring, 0x00000000);
950 OUT_RING(ring, 0x00000000);
951 OUT_RING(ring, 0x00000000);
952 OUT_RING(ring, 0x00000000);
953
954 fd_wfi(batch, ring);
955
956 fd_hw_query_enable(batch, ring);
957 }
958
959 void
fd3_emit_init_screen(struct pipe_screen * pscreen)960 fd3_emit_init_screen(struct pipe_screen *pscreen)
961 {
962 struct fd_screen *screen = fd_screen(pscreen);
963 screen->emit_ib = fd3_emit_ib;
964 }
965
966 void
fd3_emit_init(struct pipe_context * pctx)967 fd3_emit_init(struct pipe_context *pctx)
968 {
969 }
970