xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/radeonsi/si_gfx_cs.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2010 Jerome Glisse <[email protected]>
3  * Copyright 2018 Advanced Micro Devices, Inc.
4  *
5  * SPDX-License-Identifier: MIT
6  */
7 
8 #include "si_build_pm4.h"
9 #include "si_pipe.h"
10 #include "sid.h"
11 #include "util/os_time.h"
12 #include "util/u_log.h"
13 #include "util/u_upload_mgr.h"
14 #include "ac_debug.h"
15 #include "si_utrace.h"
16 
si_reset_debug_log_buffer(struct si_context * sctx)17 void si_reset_debug_log_buffer(struct si_context *sctx)
18 {
19 #if SHADER_DEBUG_LOG
20    /* Create and bind the debug log buffer. */
21    unsigned size = 256 * 16 + 4;
22    struct pipe_resource *buf = &si_aligned_buffer_create(sctx->b.screen, SI_RESOURCE_FLAG_CLEAR,
23                                                          PIPE_USAGE_STAGING, size, 256)->b.b;
24    si_set_internal_shader_buffer(sctx, SI_RING_SHADER_LOG,
25                                  &(struct pipe_shader_buffer){
26                                     .buffer = buf,
27                                     .buffer_size = size});
28    pipe_resource_reference(&buf, NULL);
29 #endif
30 }
31 
si_dump_debug_log(struct si_context * sctx,bool sync)32 static void si_dump_debug_log(struct si_context *sctx, bool sync)
33 {
34    struct pipe_resource *buf = sctx->internal_bindings.buffers[SI_RING_SHADER_LOG];
35    if (!buf)
36       return;
37 
38    struct pipe_transfer *transfer = NULL;
39    unsigned size = sctx->descriptors[SI_DESCS_INTERNAL].list[SI_RING_SHADER_LOG * 4 + 2];
40    unsigned max_entries = (size - 4) / 16;
41 
42    /* If not syncing (e.g. expecting a GPU hang), wait some time and then just print
43     * the log buffer.
44     */
45    if (!sync)
46       usleep(1000000);
47 
48    fprintf(stderr, "Reading shader log...\n");
49 
50    uint32_t *map = pipe_buffer_map(&sctx->b, buf,
51                                    PIPE_MAP_READ | (sync ? 0 : PIPE_MAP_UNSYNCHRONIZED),
52                                    &transfer);
53    unsigned num = map[0];
54    fprintf(stderr, "Shader log items: %u\n", num);
55 
56    if (!num) {
57       pipe_buffer_unmap(&sctx->b, transfer);
58       return;
59    }
60 
61 
62    unsigned first = num > max_entries ? num - max_entries : 0;
63    map++;
64 
65    for (unsigned i = first; i < num; i++) {
66       unsigned idx = i % max_entries;
67 
68       fprintf(stderr, "   [%u(%u)] = {%u, %u, %u, %u}\n", i, idx,
69               map[idx * 4], map[idx * 4 + 1], map[idx * 4 + 2], map[idx * 4 + 3]);
70    }
71    pipe_buffer_unmap(&sctx->b, transfer);
72 
73    si_reset_debug_log_buffer(sctx);
74 }
75 
si_flush_gfx_cs(struct si_context * ctx,unsigned flags,struct pipe_fence_handle ** fence)76 void si_flush_gfx_cs(struct si_context *ctx, unsigned flags, struct pipe_fence_handle **fence)
77 {
78    struct radeon_cmdbuf *cs = &ctx->gfx_cs;
79    struct radeon_winsys *ws = ctx->ws;
80    struct si_screen *sscreen = ctx->screen;
81    const unsigned wait_ps_cs = SI_BARRIER_SYNC_PS | SI_BARRIER_SYNC_CS;
82    unsigned wait_flags = 0;
83 
84    if (ctx->gfx_flush_in_progress)
85       return;
86 
87    /* The amdgpu kernel driver synchronizes execution for shared DMABUFs between
88     * processes on DRM >= 3.39.0, so we don't have to wait at the end of IBs to
89     * make sure everything is idle.
90     *
91     * The amdgpu winsys synchronizes execution for buffers shared by different
92     * contexts within the same process.
93     *
94     * Interop with AMDVLK, RADV, or OpenCL within the same process requires
95     * explicit fences or glFinish.
96     */
97    if (sscreen->info.is_amdgpu && sscreen->info.drm_minor >= 39)
98       flags |= RADEON_FLUSH_START_NEXT_GFX_IB_NOW;
99 
100    if (ctx->gfx_level == GFX6) {
101       /* The kernel flushes L2 before shaders are finished. */
102       wait_flags |= wait_ps_cs;
103    } else if (!(flags & RADEON_FLUSH_START_NEXT_GFX_IB_NOW) ||
104               ((flags & RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION) &&
105                 !ws->cs_is_secure(cs))) {
106       /* TODO: this workaround fixes subtitles rendering with mpv -vo=vaapi and
107        * tmz but shouldn't be necessary.
108        */
109       wait_flags |= wait_ps_cs;
110    }
111 
112    /* Drop this flush if it's a no-op. */
113    if (!radeon_emitted(cs, ctx->initial_gfx_cs_size) &&
114        (!wait_flags || !ctx->gfx_last_ib_is_busy) &&
115        !(flags & RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION)) {
116       tc_driver_internal_flush_notify(ctx->tc);
117       return;
118    }
119 
120    /* Non-aux contexts must set up no-op API dispatch on GPU resets. This is
121     * similar to si_get_reset_status but here we can ignore soft-recoveries,
122     * while si_get_reset_status can't. */
123    if (!(ctx->context_flags & SI_CONTEXT_FLAG_AUX) &&
124        ctx->device_reset_callback.reset) {
125       enum pipe_reset_status status = ctx->ws->ctx_query_reset_status(ctx->ctx, true, NULL, NULL);
126       if (status != PIPE_NO_RESET)
127          ctx->device_reset_callback.reset(ctx->device_reset_callback.data, status);
128    }
129 
130    if (sscreen->debug_flags & DBG(CHECK_VM))
131       flags &= ~PIPE_FLUSH_ASYNC;
132 
133    ctx->gfx_flush_in_progress = true;
134 
135    if (ctx->has_graphics) {
136       if (!list_is_empty(&ctx->active_queries))
137          si_suspend_queries(ctx);
138 
139       ctx->streamout.suspended = false;
140       if (ctx->streamout.begin_emitted) {
141          si_emit_streamout_end(ctx);
142          ctx->streamout.suspended = true;
143 
144          /* Make sure streamout is idle because the next process might change
145           * GE_GS_ORDERED_ID_BASE (which must not be changed when streamout is busy)
146           * and make this process guilty of hanging.
147           */
148          if (ctx->gfx_level >= GFX12)
149             wait_flags |= SI_BARRIER_SYNC_VS;
150       }
151    }
152 
153    /* Make sure CP DMA is idle at the end of IBs after L2 prefetches
154     * because the kernel doesn't wait for it. */
155    if (ctx->gfx_level >= GFX7 && ctx->screen->info.has_cp_dma)
156       si_cp_dma_wait_for_idle(ctx, &ctx->gfx_cs);
157 
158    /* If we use s_sendmsg to set tess factors to all 0 or all 1 instead of writing to the tess
159     * factor buffer, we need this at the end of command buffers:
160     */
161    if ((ctx->gfx_level == GFX11 || ctx->gfx_level == GFX11_5) && ctx->has_tessellation) {
162       radeon_begin(cs);
163       radeon_event_write(V_028A90_SQ_NON_EVENT);
164       radeon_end();
165    }
166 
167    /* Wait for draw calls to finish if needed. */
168    if (wait_flags) {
169       ctx->barrier_flags |= wait_flags;
170       si_emit_barrier_direct(ctx);
171    }
172    ctx->gfx_last_ib_is_busy = (wait_flags & wait_ps_cs) != wait_ps_cs;
173 
174    if (ctx->current_saved_cs) {
175       si_trace_emit(ctx);
176 
177       /* Save the IB for debug contexts. */
178       si_save_cs(ws, cs, &ctx->current_saved_cs->gfx, true);
179       ctx->current_saved_cs->flushed = true;
180       ctx->current_saved_cs->time_flush = os_time_get_nano();
181 
182       si_log_hw_flush(ctx);
183    }
184 
185    if (sscreen->debug_flags & DBG(IB))
186       si_print_current_ib(ctx, stderr);
187 
188    if (sscreen->context_roll_log_filename)
189       si_gather_context_rolls(ctx);
190 
191    if (ctx->is_noop)
192       flags |= RADEON_FLUSH_NOOP;
193 
194    uint64_t start_ts = 0, submission_id = 0;
195    if (u_trace_perfetto_active(&ctx->ds.trace_context)) {
196       start_ts = si_ds_begin_submit(&ctx->ds_queue);
197       submission_id = ctx->ds_queue.submission_id;
198    }
199 
200    /* Flush the CS. */
201    ws->cs_flush(cs, flags, &ctx->last_gfx_fence);
202 
203    if (u_trace_perfetto_active(&ctx->ds.trace_context) && start_ts > 0) {
204       si_ds_end_submit(&ctx->ds_queue, start_ts);
205    }
206 
207    tc_driver_internal_flush_notify(ctx->tc);
208    if (fence)
209       ws->fence_reference(ws, fence, ctx->last_gfx_fence);
210 
211    ctx->num_gfx_cs_flushes++;
212 
213    /* Check VM faults if needed. */
214    if (sscreen->debug_flags & DBG(CHECK_VM)) {
215       /* Use conservative timeout 800ms, after which we won't wait any
216        * longer and assume the GPU is hung.
217        */
218       ctx->ws->fence_wait(ctx->ws, ctx->last_gfx_fence, 800 * 1000 * 1000);
219 
220       si_check_vm_faults(ctx, &ctx->current_saved_cs->gfx);
221    }
222 
223    if (unlikely(ctx->sqtt && (flags & PIPE_FLUSH_END_OF_FRAME))) {
224       si_handle_sqtt(ctx, &ctx->gfx_cs);
225    }
226 
227    if (ctx->current_saved_cs)
228       si_saved_cs_reference(&ctx->current_saved_cs, NULL);
229 
230    if (u_trace_perfetto_active(&ctx->ds.trace_context))
231       si_utrace_flush(ctx, submission_id);
232 
233    si_begin_new_gfx_cs(ctx, false);
234    ctx->gfx_flush_in_progress = false;
235 
236 #if SHADER_DEBUG_LOG
237    if (debug_get_bool_option("shaderlog", false))
238       si_dump_debug_log(ctx, false);
239 #endif
240 }
241 
si_begin_gfx_cs_debug(struct si_context * ctx)242 static void si_begin_gfx_cs_debug(struct si_context *ctx)
243 {
244    static const uint32_t zeros[1];
245    assert(!ctx->current_saved_cs);
246 
247    ctx->current_saved_cs = calloc(1, sizeof(*ctx->current_saved_cs));
248    if (!ctx->current_saved_cs)
249       return;
250 
251    pipe_reference_init(&ctx->current_saved_cs->reference, 1);
252 
253    ctx->current_saved_cs->trace_buf =
254       si_resource(pipe_buffer_create(ctx->b.screen, 0, PIPE_USAGE_STAGING, 4));
255    if (!ctx->current_saved_cs->trace_buf) {
256       free(ctx->current_saved_cs);
257       ctx->current_saved_cs = NULL;
258       return;
259    }
260 
261    pipe_buffer_write_nooverlap(&ctx->b, &ctx->current_saved_cs->trace_buf->b.b, 0, sizeof(zeros),
262                                zeros);
263    ctx->current_saved_cs->trace_id = 0;
264 
265    si_trace_emit(ctx);
266 
267    radeon_add_to_buffer_list(ctx, &ctx->gfx_cs, ctx->current_saved_cs->trace_buf,
268                              RADEON_USAGE_READWRITE | RADEON_PRIO_FENCE_TRACE);
269 }
270 
si_add_gds_to_buffer_list(struct si_context * sctx)271 static void si_add_gds_to_buffer_list(struct si_context *sctx)
272 {
273 }
274 
si_set_tracked_regs_to_clear_state(struct si_context * ctx)275 void si_set_tracked_regs_to_clear_state(struct si_context *ctx)
276 {
277    assert(ctx->gfx_level < GFX12);
278    STATIC_ASSERT(SI_NUM_ALL_TRACKED_REGS <= sizeof(ctx->tracked_regs.reg_saved_mask) * 8);
279 
280    ctx->tracked_regs.reg_value[SI_TRACKED_DB_RENDER_CONTROL] = 0;
281    ctx->tracked_regs.reg_value[SI_TRACKED_DB_COUNT_CONTROL] = 0;
282 
283    ctx->tracked_regs.reg_value[SI_TRACKED_DB_DEPTH_CONTROL] = 0;
284    ctx->tracked_regs.reg_value[SI_TRACKED_DB_STENCIL_CONTROL] = 0;
285    ctx->tracked_regs.reg_value[SI_TRACKED_DB_DEPTH_BOUNDS_MIN] = 0;
286    ctx->tracked_regs.reg_value[SI_TRACKED_DB_DEPTH_BOUNDS_MAX] = 0;
287 
288    ctx->tracked_regs.reg_value[SI_TRACKED_SPI_INTERP_CONTROL_0] = 0;
289    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_POINT_SIZE] = 0;
290    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_POINT_MINMAX] = 0;
291    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_LINE_CNTL] = 0;
292    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_MODE_CNTL_0] = 0;
293    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_SC_MODE_CNTL] = 0x4;
294    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_EDGERULE] = 0xaa99aaaa;
295 
296    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_POLY_OFFSET_DB_FMT_CNTL] = 0;
297    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_POLY_OFFSET_CLAMP] = 0;
298    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_POLY_OFFSET_FRONT_SCALE] = 0;
299    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_POLY_OFFSET_FRONT_OFFSET] = 0;
300    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_POLY_OFFSET_BACK_SCALE] = 0;
301    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_POLY_OFFSET_BACK_OFFSET] = 0;
302 
303    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_LINE_CNTL] = 0x1000;
304    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_AA_CONFIG] = 0;
305 
306    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_VTX_CNTL] = 0x5;
307    ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_GB_VERT_CLIP_ADJ] = 0x3f800000;
308    ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_GB_VERT_DISC_ADJ] = 0x3f800000;
309    ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_GB_HORZ_CLIP_ADJ] = 0x3f800000;
310    ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_GB_HORZ_DISC_ADJ] = 0x3f800000;
311 
312    ctx->tracked_regs.reg_value[SI_TRACKED_SPI_SHADER_POS_FORMAT] = 0;
313 
314    ctx->tracked_regs.reg_value[SI_TRACKED_SPI_SHADER_Z_FORMAT] = 0;
315    ctx->tracked_regs.reg_value[SI_TRACKED_SPI_SHADER_COL_FORMAT] = 0;
316    ctx->tracked_regs.reg_value[SI_TRACKED_SPI_BARYC_CNTL] = 0;
317    ctx->tracked_regs.reg_value[SI_TRACKED_SPI_PS_INPUT_ENA] = 0;
318    ctx->tracked_regs.reg_value[SI_TRACKED_SPI_PS_INPUT_ADDR] = 0;
319 
320    ctx->tracked_regs.reg_value[SI_TRACKED_DB_EQAA] = 0;
321    ctx->tracked_regs.reg_value[SI_TRACKED_DB_RENDER_OVERRIDE2] = 0;
322    ctx->tracked_regs.reg_value[SI_TRACKED_DB_SHADER_CONTROL] = 0;
323    ctx->tracked_regs.reg_value[SI_TRACKED_CB_SHADER_MASK] = 0xffffffff;
324    ctx->tracked_regs.reg_value[SI_TRACKED_CB_TARGET_MASK] = 0xffffffff;
325    ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_CLIP_CNTL] = 0x90000;
326    ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_VS_OUT_CNTL] = 0;
327    ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_VTE_CNTL] = 0;
328    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_CLIPRECT_RULE] = 0xffff;
329    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_LINE_STIPPLE] = 0;
330    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_MODE_CNTL_1] = 0;
331    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_HARDWARE_SCREEN_OFFSET] = 0;
332    ctx->tracked_regs.reg_value[SI_TRACKED_SPI_PS_IN_CONTROL] = 0x2;
333    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_INSTANCE_CNT] = 0;
334    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_MAX_VERT_OUT] = 0;
335    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_SHADER_STAGES_EN] = 0;
336    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_LS_HS_CONFIG] = 0;
337    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_TF_PARAM] = 0;
338    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_SMALL_PRIM_FILTER_CNTL] = 0;
339    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_BINNER_CNTL_0] = 0x3;
340    ctx->tracked_regs.reg_value[SI_TRACKED_GE_MAX_OUTPUT_PER_SUBGROUP] = 0;
341    ctx->tracked_regs.reg_value[SI_TRACKED_GE_NGG_SUBGRP_CNTL] = 0;
342    ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_NGG_CNTL] = 0;
343    ctx->tracked_regs.reg_value[SI_TRACKED_DB_PA_SC_VRS_OVERRIDE_CNTL] = 0;
344 
345    ctx->tracked_regs.reg_value[SI_TRACKED_SX_PS_DOWNCONVERT] = 0;
346    ctx->tracked_regs.reg_value[SI_TRACKED_SX_BLEND_OPT_EPSILON] = 0;
347    ctx->tracked_regs.reg_value[SI_TRACKED_SX_BLEND_OPT_CONTROL] = 0;
348 
349    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_ESGS_RING_ITEMSIZE] = 0;
350    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_REUSE_OFF] = 0;
351    ctx->tracked_regs.reg_value[SI_TRACKED_IA_MULTI_VGT_PARAM] = 0xff;
352 
353    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_MAX_PRIMS_PER_SUBGROUP] = 0;
354    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_ONCHIP_CNTL] = 0;
355 
356    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GSVS_RING_ITEMSIZE] = 0;
357    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_MODE] = 0;
358    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL] = 0x1e;
359    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_OUT_PRIM_TYPE] = 0;
360 
361    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GSVS_RING_OFFSET_1] = 0;
362    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GSVS_RING_OFFSET_2] = 0;
363    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GSVS_RING_OFFSET_3] = 0;
364 
365    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_VERT_ITEMSIZE] = 0;
366    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_VERT_ITEMSIZE_1] = 0;
367    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_VERT_ITEMSIZE_2] = 0;
368    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_VERT_ITEMSIZE_3] = 0;
369 
370    ctx->tracked_regs.reg_value[SI_TRACKED_SPI_VS_OUT_CONFIG] = 0;
371    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_PRIMITIVEID_EN] = 0;
372    ctx->tracked_regs.reg_value[SI_TRACKED_CB_DCC_CONTROL] = 0;
373 
374    /* Set all cleared context registers to saved. */
375    BITSET_SET_RANGE(ctx->tracked_regs.reg_saved_mask, 0, SI_NUM_TRACKED_CONTEXT_REGS - 1);
376 }
377 
si_install_draw_wrapper(struct si_context * sctx,pipe_draw_func wrapper,pipe_draw_vertex_state_func vstate_wrapper)378 void si_install_draw_wrapper(struct si_context *sctx, pipe_draw_func wrapper,
379                              pipe_draw_vertex_state_func vstate_wrapper)
380 {
381    if (wrapper) {
382       if (wrapper != sctx->b.draw_vbo) {
383          assert(!sctx->real_draw_vbo);
384          assert(!sctx->real_draw_vertex_state);
385          sctx->real_draw_vbo = sctx->b.draw_vbo;
386          sctx->real_draw_vertex_state = sctx->b.draw_vertex_state;
387          sctx->b.draw_vbo = wrapper;
388          sctx->b.draw_vertex_state = vstate_wrapper;
389       }
390    } else if (sctx->real_draw_vbo) {
391       sctx->real_draw_vbo = NULL;
392       sctx->real_draw_vertex_state = NULL;
393       si_select_draw_vbo(sctx);
394    }
395 }
396 
si_tmz_preamble(struct si_context * sctx)397 static void si_tmz_preamble(struct si_context *sctx)
398 {
399    bool secure = si_gfx_resources_check_encrypted(sctx);
400    if (secure != sctx->ws->cs_is_secure(&sctx->gfx_cs)) {
401       si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW |
402                             RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION, NULL);
403    }
404 }
405 
si_draw_vbo_tmz_preamble(struct pipe_context * ctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)406 static void si_draw_vbo_tmz_preamble(struct pipe_context *ctx,
407                                      const struct pipe_draw_info *info,
408                                      unsigned drawid_offset,
409                                      const struct pipe_draw_indirect_info *indirect,
410                                      const struct pipe_draw_start_count_bias *draws,
411                                      unsigned num_draws) {
412    struct si_context *sctx = (struct si_context *)ctx;
413 
414    si_tmz_preamble(sctx);
415    sctx->real_draw_vbo(ctx, info, drawid_offset, indirect, draws, num_draws);
416 }
417 
si_draw_vstate_tmz_preamble(struct pipe_context * ctx,struct pipe_vertex_state * state,uint32_t partial_velem_mask,struct pipe_draw_vertex_state_info info,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)418 static void si_draw_vstate_tmz_preamble(struct pipe_context *ctx,
419                                         struct pipe_vertex_state *state,
420                                         uint32_t partial_velem_mask,
421                                         struct pipe_draw_vertex_state_info info,
422                                         const struct pipe_draw_start_count_bias *draws,
423                                         unsigned num_draws) {
424    struct si_context *sctx = (struct si_context *)ctx;
425 
426    si_tmz_preamble(sctx);
427    sctx->real_draw_vertex_state(ctx, state, partial_velem_mask, info, draws, num_draws);
428 }
429 
si_begin_new_gfx_cs(struct si_context * ctx,bool first_cs)430 void si_begin_new_gfx_cs(struct si_context *ctx, bool first_cs)
431 {
432    bool is_secure = false;
433 
434    if (!first_cs)
435       u_trace_fini(&ctx->trace);
436 
437    u_trace_init(&ctx->trace, &ctx->ds.trace_context);
438 
439    if (unlikely(radeon_uses_secure_bos(ctx->ws))) {
440       is_secure = ctx->ws->cs_is_secure(&ctx->gfx_cs);
441 
442       si_install_draw_wrapper(ctx, si_draw_vbo_tmz_preamble,
443                               si_draw_vstate_tmz_preamble);
444    }
445 
446    if (ctx->is_debug)
447       si_begin_gfx_cs_debug(ctx);
448 
449    if (ctx->screen->gds_oa)
450       ctx->ws->cs_add_buffer(&ctx->gfx_cs, ctx->screen->gds_oa, RADEON_USAGE_READWRITE, 0);
451 
452    /* Always invalidate caches at the beginning of IBs, because external
453     * users (e.g. BO evictions and SDMA/UVD/VCE IBs) can modify our
454     * buffers.
455     *
456     * Gfx10+ automatically invalidates I$, SMEM$, VMEM$, and GL1$ at the beginning of IBs,
457     * so we only need to flush the GL2 cache.
458     *
459     * Note that the cache flush done by the kernel at the end of GFX IBs
460     * isn't useful here, because that flush can finish after the following
461     * IB starts drawing.
462     *
463     * TODO: Do we also need to invalidate CB & DB caches?
464     */
465    ctx->barrier_flags |= SI_BARRIER_INV_L2;
466    if (ctx->gfx_level < GFX10)
467       ctx->barrier_flags |= SI_BARRIER_INV_ICACHE | SI_BARRIER_INV_SMEM | SI_BARRIER_INV_VMEM;
468 
469    /* Disable pipeline stats if there are no active queries. */
470    ctx->barrier_flags &= ~SI_BARRIER_EVENT_PIPELINESTAT_START & ~SI_BARRIER_EVENT_PIPELINESTAT_STOP;
471    if (ctx->num_hw_pipestat_streamout_queries)
472       ctx->barrier_flags |= SI_BARRIER_EVENT_PIPELINESTAT_START;
473    else
474       ctx->barrier_flags |= SI_BARRIER_EVENT_PIPELINESTAT_STOP;
475 
476    ctx->pipeline_stats_enabled = -1; /* indicate that the current hw state is unknown */
477 
478    /* We don't know if the last draw used NGG because it can be a different process.
479     * When switching NGG->legacy, we need to flush VGT for certain hw generations.
480     */
481    if (ctx->screen->info.has_vgt_flush_ngg_legacy_bug && !ctx->ngg)
482       ctx->barrier_flags |= SI_BARRIER_EVENT_VGT_FLUSH;
483 
484    si_mark_atom_dirty(ctx, &ctx->atoms.s.barrier);
485    si_mark_atom_dirty(ctx, &ctx->atoms.s.spi_ge_ring_state);
486 
487    if (ctx->screen->attribute_pos_prim_ring) {
488       radeon_add_to_buffer_list(ctx, &ctx->gfx_cs, ctx->screen->attribute_pos_prim_ring,
489                                 RADEON_USAGE_READWRITE | RADEON_PRIO_SHADER_RINGS);
490    }
491    if (ctx->border_color_buffer) {
492       radeon_add_to_buffer_list(ctx, &ctx->gfx_cs, ctx->border_color_buffer,
493                                 RADEON_USAGE_READ | RADEON_PRIO_BORDER_COLORS);
494    }
495    if (ctx->shadowing.registers) {
496       radeon_add_to_buffer_list(ctx, &ctx->gfx_cs, ctx->shadowing.registers,
497                                 RADEON_USAGE_READWRITE | RADEON_PRIO_DESCRIPTORS);
498 
499       if (ctx->shadowing.csa)
500          radeon_add_to_buffer_list(ctx, &ctx->gfx_cs, ctx->shadowing.csa,
501                                    RADEON_USAGE_READWRITE | RADEON_PRIO_DESCRIPTORS);
502    }
503 
504    si_add_all_descriptors_to_bo_list(ctx);
505    si_shader_pointers_mark_dirty(ctx);
506    ctx->cs_shader_state.emitted_program = NULL;
507 
508    /* The CS initialization should be emitted before everything else. */
509    if (ctx->cs_preamble_state) {
510       struct si_pm4_state *preamble = is_secure ? ctx->cs_preamble_state_tmz :
511                                                   ctx->cs_preamble_state;
512       radeon_begin(&ctx->gfx_cs);
513       radeon_emit_array(preamble->base.pm4, preamble->base.ndw);
514       radeon_end();
515    }
516 
517    if (!ctx->has_graphics) {
518       ctx->initial_gfx_cs_size = ctx->gfx_cs.current.cdw;
519       return;
520    }
521 
522    if (ctx->has_tessellation) {
523       radeon_add_to_buffer_list(ctx, &ctx->gfx_cs,
524                                 unlikely(is_secure) ? si_resource(ctx->screen->tess_rings_tmz)
525                                                     : si_resource(ctx->screen->tess_rings),
526                                 RADEON_USAGE_READWRITE | RADEON_PRIO_SHADER_RINGS);
527    }
528 
529    /* set all valid group as dirty so they get reemited on
530     * next draw command
531     */
532    si_pm4_reset_emitted(ctx);
533 
534    if (ctx->queued.named.ls)
535       ctx->prefetch_L2_mask |= SI_PREFETCH_LS;
536    if (ctx->queued.named.hs)
537       ctx->prefetch_L2_mask |= SI_PREFETCH_HS;
538    if (ctx->queued.named.es)
539       ctx->prefetch_L2_mask |= SI_PREFETCH_ES;
540    if (ctx->queued.named.gs)
541       ctx->prefetch_L2_mask |= SI_PREFETCH_GS;
542    if (ctx->queued.named.vs)
543       ctx->prefetch_L2_mask |= SI_PREFETCH_VS;
544    if (ctx->queued.named.ps)
545       ctx->prefetch_L2_mask |= SI_PREFETCH_PS;
546 
547    /* CLEAR_STATE disables all colorbuffers, so only enable bound ones. */
548    bool has_clear_state = ctx->screen->info.has_clear_state;
549    if (has_clear_state) {
550       ctx->framebuffer.dirty_cbufs =
551             u_bit_consecutive(0, ctx->framebuffer.state.nr_cbufs);
552       /* CLEAR_STATE disables the zbuffer, so only enable it if it's bound. */
553       ctx->framebuffer.dirty_zsbuf = ctx->framebuffer.state.zsbuf != NULL;
554    } else {
555       ctx->framebuffer.dirty_cbufs = u_bit_consecutive(0, 8);
556       ctx->framebuffer.dirty_zsbuf = true;
557    }
558 
559    /* RB+ depth-only rendering needs to set CB_COLOR0_INFO differently from CLEAR_STATE. */
560    if (ctx->screen->info.rbplus_allowed)
561       ctx->framebuffer.dirty_cbufs |= 0x1;
562 
563    /* GFX11+ needs to set NUM_SAMPLES differently from CLEAR_STATE. */
564    if (ctx->gfx_level >= GFX11)
565       ctx->framebuffer.dirty_zsbuf = true;
566 
567    /* Even with shadowed registers, we have to add buffers to the buffer list.
568     * These atoms are the only ones that add buffers.
569     *
570     * The framebuffer state also needs to set PA_SC_WINDOW_SCISSOR_BR differently from CLEAR_STATE.
571     */
572    si_mark_atom_dirty(ctx, &ctx->atoms.s.framebuffer);
573    si_mark_atom_dirty(ctx, &ctx->atoms.s.render_cond);
574    if (ctx->screen->use_ngg_culling)
575       si_mark_atom_dirty(ctx, &ctx->atoms.s.ngg_cull_state);
576 
577    if (first_cs || !ctx->shadowing.registers) {
578       /* These don't add any buffers, so skip them with shadowing. */
579       si_mark_atom_dirty(ctx, &ctx->atoms.s.clip_regs);
580       /* CLEAR_STATE sets zeros. */
581       if (!has_clear_state || ctx->clip_state_any_nonzeros)
582          si_mark_atom_dirty(ctx, &ctx->atoms.s.clip_state);
583       ctx->sample_locs_num_samples = 0;
584       si_mark_atom_dirty(ctx, &ctx->atoms.s.sample_locations);
585       si_mark_atom_dirty(ctx, &ctx->atoms.s.msaa_config);
586       /* CLEAR_STATE sets 0xffff. */
587       if (!has_clear_state || ctx->sample_mask != 0xffff)
588          si_mark_atom_dirty(ctx, &ctx->atoms.s.sample_mask);
589       si_mark_atom_dirty(ctx, &ctx->atoms.s.cb_render_state);
590       /* CLEAR_STATE sets zeros. */
591       if (!has_clear_state || ctx->blend_color_any_nonzeros)
592          si_mark_atom_dirty(ctx, &ctx->atoms.s.blend_color);
593       si_mark_atom_dirty(ctx, &ctx->atoms.s.db_render_state);
594       if (ctx->gfx_level >= GFX9)
595          si_mark_atom_dirty(ctx, &ctx->atoms.s.dpbb_state);
596       si_mark_atom_dirty(ctx, &ctx->atoms.s.stencil_ref);
597       si_mark_atom_dirty(ctx, &ctx->atoms.s.spi_map);
598       if (ctx->gfx_level < GFX11)
599          si_mark_atom_dirty(ctx, &ctx->atoms.s.streamout_enable);
600       /* CLEAR_STATE disables all window rectangles. */
601       if (!has_clear_state || ctx->num_window_rectangles > 0)
602          si_mark_atom_dirty(ctx, &ctx->atoms.s.window_rectangles);
603       si_mark_atom_dirty(ctx, &ctx->atoms.s.guardband);
604       si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
605       si_mark_atom_dirty(ctx, &ctx->atoms.s.viewports);
606       si_mark_atom_dirty(ctx, &ctx->atoms.s.vgt_pipeline_state);
607       si_mark_atom_dirty(ctx, &ctx->atoms.s.tess_io_layout);
608 
609       /* Set all register values to unknown. */
610       BITSET_ZERO(ctx->tracked_regs.reg_saved_mask);
611 
612       if (has_clear_state)
613          si_set_tracked_regs_to_clear_state(ctx);
614 
615       /* 0xffffffff is an impossible value for SPI_PS_INPUT_CNTL_n registers */
616       memset(ctx->tracked_regs.spi_ps_input_cntl, 0xff, sizeof(uint32_t) * 32);
617    }
618 
619    /* Invalidate various draw states so that they are emitted before
620     * the first draw call. */
621    ctx->last_instance_count = SI_INSTANCE_COUNT_UNKNOWN;
622    ctx->last_index_size = -1;
623    /* Primitive restart is set to false by the gfx preamble on GFX11+. */
624    ctx->last_primitive_restart_en = ctx->gfx_level >= GFX11 ? false : -1;
625    ctx->last_restart_index = SI_RESTART_INDEX_UNKNOWN;
626    ctx->last_prim = -1;
627    ctx->last_vs_state = ~0;
628    ctx->last_gs_state = ~0;
629    ctx->last_ls = NULL;
630    ctx->last_tcs = NULL;
631    ctx->last_tes_sh_base = -1;
632    ctx->last_num_tcs_input_cp = -1;
633 
634    assert(ctx->num_buffered_gfx_sh_regs == 0);
635    assert(ctx->num_buffered_compute_sh_regs == 0);
636    ctx->num_buffered_gfx_sh_regs = 0;
637    ctx->num_buffered_compute_sh_regs = 0;
638 
639    if (ctx->scratch_buffer)
640       si_mark_atom_dirty(ctx, &ctx->atoms.s.scratch_state);
641 
642    if (ctx->streamout.suspended) {
643       ctx->streamout.append_bitmask = ctx->streamout.enabled_mask;
644       si_streamout_buffers_dirty(ctx);
645    }
646 
647    if (!list_is_empty(&ctx->active_queries))
648       si_resume_queries(ctx);
649 
650    assert(!ctx->gfx_cs.prev_dw);
651    ctx->initial_gfx_cs_size = ctx->gfx_cs.current.cdw;
652 
653    /* All buffer references are removed on a flush, so si_check_needs_implicit_sync
654     * cannot determine if si_make_CB_shader_coherent() needs to be called.
655     * ctx->force_shader_coherency.with_cb will be cleared by the first call to
656     * si_make_CB_shader_coherent.
657     */
658    ctx->force_shader_coherency.with_cb = true;
659    ctx->force_shader_coherency.with_db = true;
660 }
661 
si_trace_emit(struct si_context * sctx)662 void si_trace_emit(struct si_context *sctx)
663 {
664    struct radeon_cmdbuf *cs = &sctx->gfx_cs;
665    uint32_t trace_id = ++sctx->current_saved_cs->trace_id;
666 
667    si_cp_write_data(sctx, sctx->current_saved_cs->trace_buf, 0, 4, V_370_MEM, V_370_ME, &trace_id);
668 
669    radeon_begin(cs);
670    radeon_emit(PKT3(PKT3_NOP, 0, 0));
671    radeon_emit(AC_ENCODE_TRACE_POINT(trace_id));
672    radeon_end();
673 
674    if (sctx->log)
675       u_log_flush(sctx->log);
676 }
677 
678 /* timestamp logging for u_trace: */
si_emit_ts(struct si_context * sctx,struct si_resource * buffer,unsigned int offset)679 void si_emit_ts(struct si_context *sctx, struct si_resource* buffer, unsigned int offset)
680 {
681    struct radeon_cmdbuf *cs = &sctx->gfx_cs;
682    uint64_t va = buffer->gpu_address + offset;
683    si_cp_release_mem(sctx, cs, V_028A90_BOTTOM_OF_PIPE_TS, 0, EOP_DST_SEL_MEM, EOP_INT_SEL_NONE,
684                         EOP_DATA_SEL_TIMESTAMP, buffer, va, 0, PIPE_QUERY_TIMESTAMP);
685 }
686