xref: /aosp_15_r20/external/mesa3d/src/intel/vulkan/genX_simple_shader.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2023 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <assert.h>
25 #include <stdbool.h>
26 
27 #include "util/macros.h"
28 
29 #include "anv_private.h"
30 
31 #include "genxml/gen_macros.h"
32 #include "genxml/genX_pack.h"
33 #include "common/intel_compute_slm.h"
34 #include "common/intel_genX_state_brw.h"
35 
36 static void
genX(emit_simpler_shader_init_fragment)37 genX(emit_simpler_shader_init_fragment)(struct anv_simple_shader *state)
38 {
39    assert(state->cmd_buffer && state->cmd_buffer->state.current_pipeline == _3D);
40 
41    struct anv_batch *batch = state->batch;
42    struct anv_device *device = state->device;
43    const struct brw_wm_prog_data *prog_data =
44       brw_wm_prog_data_const(state->kernel->prog_data);
45 
46    uint32_t *dw = anv_batch_emitn(batch,
47                                   1 + 2 * GENX(VERTEX_ELEMENT_STATE_length),
48                                   GENX(3DSTATE_VERTEX_ELEMENTS));
49    /* You might think there is some shady stuff going here and you would be
50     * right. We're setting up 2 VERTEX_ELEMENT_STATE yet we're only providing
51     * 1 (positions) VERTEX_BUFFER_STATE later.
52     *
53     * Find more about how to set up a 3D pipeline with a fragment shader but
54     * without a vertex shader in blorp_emit_vertex_elements() in
55     * blorp_genX_exec_brw.h.
56     */
57    GENX(VERTEX_ELEMENT_STATE_pack)(
58       batch, dw + 1, &(struct GENX(VERTEX_ELEMENT_STATE)) {
59          .VertexBufferIndex = 1,
60          .Valid = true,
61          .SourceElementFormat = ISL_FORMAT_R32G32B32A32_FLOAT,
62          .SourceElementOffset = 0,
63          .Component0Control = VFCOMP_STORE_SRC,
64          .Component1Control = VFCOMP_STORE_0,
65          .Component2Control = VFCOMP_STORE_0,
66          .Component3Control = VFCOMP_STORE_0,
67       });
68    GENX(VERTEX_ELEMENT_STATE_pack)(
69       batch, dw + 3, &(struct GENX(VERTEX_ELEMENT_STATE)) {
70          .VertexBufferIndex   = 0,
71          .Valid               = true,
72          .SourceElementFormat = ISL_FORMAT_R32G32B32_FLOAT,
73          .SourceElementOffset = 0,
74          .Component0Control   = VFCOMP_STORE_SRC,
75          .Component1Control   = VFCOMP_STORE_SRC,
76          .Component2Control   = VFCOMP_STORE_SRC,
77          .Component3Control   = VFCOMP_STORE_1_FP,
78       });
79 
80    anv_batch_emit(batch, GENX(3DSTATE_VF_STATISTICS), vf);
81    anv_batch_emit(batch, GENX(3DSTATE_VF_SGVS), sgvs) {
82       sgvs.InstanceIDEnable = true;
83       sgvs.InstanceIDComponentNumber = COMP_1;
84       sgvs.InstanceIDElementOffset = 0;
85    }
86 #if GFX_VER >= 11
87    anv_batch_emit(batch, GENX(3DSTATE_VF_SGVS_2), sgvs);
88 #endif
89    anv_batch_emit(batch, GENX(3DSTATE_VF_INSTANCING), vfi) {
90       vfi.InstancingEnable   = false;
91       vfi.VertexElementIndex = 0;
92    }
93    anv_batch_emit(batch, GENX(3DSTATE_VF_INSTANCING), vfi) {
94       vfi.InstancingEnable   = false;
95       vfi.VertexElementIndex = 1;
96    }
97 
98    anv_batch_emit(batch, GENX(3DSTATE_VF_TOPOLOGY), topo) {
99       topo.PrimitiveTopologyType = _3DPRIM_RECTLIST;
100    }
101 
102    /* Emit URB setup.  We tell it that the VS is active because we want it to
103     * allocate space for the VS.  Even though one isn't run, we need VUEs to
104     * store the data that VF is going to pass to SOL.
105     */
106    struct intel_urb_config urb_cfg_out = {
107       .size = { DIV_ROUND_UP(32, 64), 1, 1, 1 },
108    };
109 
110    genX(emit_l3_config)(batch, device, state->l3_config);
111    state->cmd_buffer->state.current_l3_config = state->l3_config;
112 
113    enum intel_urb_deref_block_size deref_block_size;
114    genX(emit_urb_setup)(device, batch, state->l3_config,
115                         VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT,
116                         state->urb_cfg, &urb_cfg_out, &deref_block_size);
117 
118    anv_batch_emit(batch, GENX(3DSTATE_PS_BLEND), ps_blend) {
119       ps_blend.HasWriteableRT = true;
120    }
121 
122    anv_batch_emit(batch, GENX(3DSTATE_WM_DEPTH_STENCIL), wm);
123 
124 #if GFX_VER >= 12
125    anv_batch_emit(batch, GENX(3DSTATE_DEPTH_BOUNDS), db) {
126       db.DepthBoundsTestEnable = false;
127       db.DepthBoundsTestMinValue = 0.0;
128       db.DepthBoundsTestMaxValue = 1.0;
129    }
130 #endif
131 
132    anv_batch_emit(batch, GENX(3DSTATE_MULTISAMPLE), ms);
133    anv_batch_emit(batch, GENX(3DSTATE_SAMPLE_MASK), sm) {
134       sm.SampleMask = 0x1;
135    }
136 
137    anv_batch_emit(batch, GENX(3DSTATE_VS), vs);
138    anv_batch_emit(batch, GENX(3DSTATE_HS), hs);
139    anv_batch_emit(batch, GENX(3DSTATE_TE), te);
140    anv_batch_emit(batch, GENX(3DSTATE_DS), DS);
141 
142 #if GFX_VERx10 >= 125
143    if (device->vk.enabled_extensions.EXT_mesh_shader) {
144       anv_batch_emit(batch, GENX(3DSTATE_MESH_CONTROL), mesh);
145       anv_batch_emit(batch, GENX(3DSTATE_TASK_CONTROL), task);
146    }
147 #endif
148 
149    anv_batch_emit(batch, GENX(3DSTATE_STREAMOUT), so);
150 
151    anv_batch_emit(batch, GENX(3DSTATE_GS), gs);
152 
153    anv_batch_emit(batch, GENX(3DSTATE_CLIP), clip) {
154       clip.PerspectiveDivideDisable = true;
155    }
156 
157    anv_batch_emit(batch, GENX(3DSTATE_SF), sf) {
158 #if GFX_VER >= 12
159       sf.DerefBlockSize = deref_block_size;
160 #endif
161    }
162 
163    anv_batch_emit(batch, GENX(3DSTATE_RASTER), raster) {
164       raster.CullMode = CULLMODE_NONE;
165    }
166 
167    anv_batch_emit(batch, GENX(3DSTATE_SBE), sbe) {
168       sbe.VertexURBEntryReadOffset = 1;
169       sbe.NumberofSFOutputAttributes = prog_data->num_varying_inputs;
170       sbe.VertexURBEntryReadLength = MAX2((prog_data->num_varying_inputs + 1) / 2, 1);
171       sbe.ConstantInterpolationEnable = prog_data->flat_inputs;
172       sbe.ForceVertexURBEntryReadLength = true;
173       sbe.ForceVertexURBEntryReadOffset = true;
174       for (unsigned i = 0; i < 32; i++)
175          sbe.AttributeActiveComponentFormat[i] = ACF_XYZW;
176    }
177 
178    anv_batch_emit(batch, GENX(3DSTATE_WM), wm);
179 
180    anv_batch_emit(batch, GENX(3DSTATE_PS), ps) {
181       intel_set_ps_dispatch_state(&ps, device->info, prog_data,
182                                   1 /* rasterization_samples */,
183                                   0 /* msaa_flags */);
184 
185       ps.VectorMaskEnable       = prog_data->uses_vmask;
186 
187       ps.BindingTableEntryCount = GFX_VER == 9 ? 1 : 0;
188 #if GFX_VER < 20
189       ps.PushConstantEnable     = prog_data->base.nr_params > 0 ||
190                                   prog_data->base.ubo_ranges[0].length;
191 #endif
192 
193       ps.DispatchGRFStartRegisterForConstantSetupData0 =
194          brw_wm_prog_data_dispatch_grf_start_reg(prog_data, ps, 0);
195       ps.DispatchGRFStartRegisterForConstantSetupData1 =
196          brw_wm_prog_data_dispatch_grf_start_reg(prog_data, ps, 1);
197 #if GFX_VER < 20
198       ps.DispatchGRFStartRegisterForConstantSetupData2 =
199          brw_wm_prog_data_dispatch_grf_start_reg(prog_data, ps, 2);
200 #endif
201 
202       ps.KernelStartPointer0 = state->kernel->kernel.offset +
203          brw_wm_prog_data_prog_offset(prog_data, ps, 0);
204       ps.KernelStartPointer1 = state->kernel->kernel.offset +
205          brw_wm_prog_data_prog_offset(prog_data, ps, 1);
206 #if GFX_VER < 20
207       ps.KernelStartPointer2 = state->kernel->kernel.offset +
208          brw_wm_prog_data_prog_offset(prog_data, ps, 2);
209 #endif
210 
211       ps.MaximumNumberofThreadsPerPSD = device->info->max_threads_per_psd - 1;
212    }
213 
214 #if INTEL_WA_18038825448_GFX_VER
215    const bool needs_ps_dependency =
216       genX(cmd_buffer_set_coarse_pixel_active)
217          (state->cmd_buffer, ANV_COARSE_PIXEL_STATE_DISABLED);
218 #endif
219 
220    anv_batch_emit(batch, GENX(3DSTATE_PS_EXTRA), psx) {
221       psx.PixelShaderValid = true;
222 #if GFX_VER < 20
223       psx.AttributeEnable = prog_data->num_varying_inputs > 0;
224 #endif
225       psx.PixelShaderIsPerSample = prog_data->persample_dispatch;
226       psx.PixelShaderComputedDepthMode = prog_data->computed_depth_mode;
227       psx.PixelShaderComputesStencil = prog_data->computed_stencil;
228 
229 #if INTEL_WA_18038825448_GFX_VER
230       psx.EnablePSDependencyOnCPsizeChange = needs_ps_dependency;
231 #endif
232    }
233 
234    anv_batch_emit(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC), cc) {
235       struct anv_state cc_state =
236          anv_state_stream_alloc(state->dynamic_state_stream,
237                                 4 * GENX(CC_VIEWPORT_length), 32);
238       if (cc_state.map == NULL)
239          return;
240 
241       struct GENX(CC_VIEWPORT) cc_viewport = {
242          .MinimumDepth = 0.0f,
243          .MaximumDepth = 1.0f,
244       };
245       GENX(CC_VIEWPORT_pack)(NULL, cc_state.map, &cc_viewport);
246       cc.CCViewportPointer = cc_state.offset;
247    }
248 
249 #if GFX_VER >= 12
250    /* Disable Primitive Replication. */
251    anv_batch_emit(batch, GENX(3DSTATE_PRIMITIVE_REPLICATION), pr);
252 #endif
253 
254    anv_batch_emit(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc);
255    anv_batch_emit(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_HS), alloc);
256    anv_batch_emit(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_DS), alloc);
257    anv_batch_emit(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_GS), alloc);
258    anv_batch_emit(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_PS), alloc) {
259       alloc.ConstantBufferOffset = 0;
260       alloc.ConstantBufferSize   = device->info->max_constant_urb_size_kb;
261    }
262 
263 #if GFX_VERx10 == 125
264    /* DG2: Wa_22011440098
265     * MTL: Wa_18022330953
266     *
267     * In 3D mode, after programming push constant alloc command immediately
268     * program push constant command(ZERO length) without any commit between
269     * them.
270     *
271     * Note that Wa_16011448509 isn't needed here as all address bits are zero.
272     */
273    anv_batch_emit(batch, GENX(3DSTATE_CONSTANT_ALL), c) {
274       /* Update empty push constants for all stages (bitmask = 11111b) */
275       c.ShaderUpdateEnable = 0x1f;
276       c.MOCS = anv_mocs(device, NULL, 0);
277    }
278 #endif
279 
280 #if GFX_VER == 9
281    /* Allocate a binding table for Gfx9 for 2 reason :
282     *
283     *   1. we need a to emit a 3DSTATE_BINDING_TABLE_POINTERS_PS to make the
284     *      HW apply the preceeding 3DSTATE_CONSTANT_PS
285     *
286     *   2. Emitting an empty 3DSTATE_BINDING_TABLE_POINTERS_PS would cause RT
287     *      writes (even though they're empty) to disturb later writes
288     *      (probably due to RT cache)
289     *
290     * Our binding table only has one entry to the null surface.
291     */
292    uint32_t bt_offset;
293    state->bt_state =
294       anv_cmd_buffer_alloc_binding_table(state->cmd_buffer, 1, &bt_offset);
295    if (state->bt_state.map == NULL) {
296       VkResult result = anv_cmd_buffer_new_binding_table_block(state->cmd_buffer);
297       if (result != VK_SUCCESS)
298          return;
299 
300       /* Re-emit state base addresses so we get the new surface state base
301        * address before we start emitting binding tables etc.
302        */
303       genX(cmd_buffer_emit_bt_pool_base_address)(state->cmd_buffer);
304 
305       state->bt_state =
306          anv_cmd_buffer_alloc_binding_table(state->cmd_buffer, 1, &bt_offset);
307       assert(state->bt_state.map != NULL);
308    }
309 
310    uint32_t *bt_map = state->bt_state.map;
311    bt_map[0] = anv_bindless_state_for_binding_table(
312       device,
313       device->null_surface_state).offset + bt_offset;
314 
315    state->cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
316 #endif
317 
318 #if INTEL_WA_14018283232_GFX_VER
319    genX(cmd_buffer_ensure_wa_14018283232)(state->cmd_buffer, false);
320 #endif
321 
322    /* Flag all the instructions emitted by the memcpy. */
323    struct anv_gfx_dynamic_state *hw_state =
324       &state->cmd_buffer->state.gfx.dyn_state;
325 
326    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_URB);
327    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_VF_STATISTICS);
328    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_VF);
329    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_VF_TOPOLOGY);
330    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_VERTEX_INPUT);
331    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_VF_SGVS);
332 #if GFX_VER >= 11
333    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_VF_SGVS_2);
334 #endif
335 #if GFX_VER >= 12
336    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_PRIMITIVE_REPLICATION);
337 #endif
338    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_STREAMOUT);
339    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_VIEWPORT_CC);
340    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_CLIP);
341    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_RASTER);
342    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_SAMPLE_MASK);
343    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_MULTISAMPLE);
344    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_DEPTH_BOUNDS);
345    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_WM);
346    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_WM_DEPTH_STENCIL);
347    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_SF);
348    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_SBE);
349    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_VS);
350    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_HS);
351    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_DS);
352    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_TE);
353    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_GS);
354    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_PS);
355    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_PS_EXTRA);
356    BITSET_SET(hw_state->dirty, ANV_GFX_STATE_PS_BLEND);
357    if (device->vk.enabled_extensions.EXT_mesh_shader) {
358       BITSET_SET(hw_state->dirty, ANV_GFX_STATE_MESH_CONTROL);
359       BITSET_SET(hw_state->dirty, ANV_GFX_STATE_TASK_CONTROL);
360    }
361 
362    /* Update urb config after simple shader. */
363    memcpy(&state->cmd_buffer->state.gfx.urb_cfg, &urb_cfg_out,
364           sizeof(struct intel_urb_config));
365 
366    state->cmd_buffer->state.gfx.vb_dirty = BITFIELD_BIT(0);
367    state->cmd_buffer->state.gfx.dirty |= ~(ANV_CMD_DIRTY_INDEX_BUFFER |
368                                            ANV_CMD_DIRTY_XFB_ENABLE |
369                                            ANV_CMD_DIRTY_OCCLUSION_QUERY_ACTIVE |
370                                            ANV_CMD_DIRTY_FS_MSAA_FLAGS |
371                                            ANV_CMD_DIRTY_RESTART_INDEX);
372    state->cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
373    state->cmd_buffer->state.gfx.push_constant_stages = VK_SHADER_STAGE_FRAGMENT_BIT;
374 }
375 
376 static void
genX(emit_simpler_shader_init_compute)377 genX(emit_simpler_shader_init_compute)(struct anv_simple_shader *state)
378 {
379    assert(state->cmd_buffer == NULL ||
380           state->cmd_buffer->state.current_pipeline == GPGPU);
381 
382 #if GFX_VERx10 >= 125
383    struct anv_shader_bin *cs_bin = state->kernel;
384    const struct brw_cs_prog_data *prog_data =
385       (const struct brw_cs_prog_data *) cs_bin->prog_data;
386    /* Currently our simple shaders are simple enough that they never spill. */
387    assert(prog_data->base.total_scratch == 0);
388    if (state->cmd_buffer != NULL) {
389       genX(cmd_buffer_ensure_cfe_state)(state->cmd_buffer, 0);
390    } else {
391       anv_batch_emit(state->batch, GENX(CFE_STATE), cfe) {
392          cfe.MaximumNumberofThreads =
393             state->device->info->max_cs_threads *
394             state->device->info->subslice_total;
395       }
396    }
397 #endif
398 }
399 
400 /** Initialize a simple shader emission */
401 void
genX(emit_simple_shader_init)402 genX(emit_simple_shader_init)(struct anv_simple_shader *state)
403 {
404    assert(state->kernel->stage == MESA_SHADER_FRAGMENT ||
405           state->kernel->stage == MESA_SHADER_COMPUTE);
406 
407    if (state->kernel->stage == MESA_SHADER_FRAGMENT)
408       genX(emit_simpler_shader_init_fragment)(state);
409    else
410       genX(emit_simpler_shader_init_compute)(state);
411 }
412 
413 /** Allocate push constant data for a simple shader */
414 struct anv_state
genX(simple_shader_alloc_push)415 genX(simple_shader_alloc_push)(struct anv_simple_shader *state, uint32_t size)
416 {
417    struct anv_state s;
418 
419    if (state->kernel->stage == MESA_SHADER_FRAGMENT) {
420       s = anv_state_stream_alloc(state->dynamic_state_stream,
421                                  size, ANV_UBO_ALIGNMENT);
422    } else {
423 #if GFX_VERx10 >= 125
424       s = anv_state_stream_alloc(state->general_state_stream, align(size, 64), 64);
425 #else
426       s = anv_state_stream_alloc(state->dynamic_state_stream, size, 64);
427 #endif
428    }
429 
430    if (s.map == NULL)
431       anv_batch_set_error(state->batch, VK_ERROR_OUT_OF_DEVICE_MEMORY);
432 
433    return s;
434 }
435 
436 /** Get the address of allocated push constant data by
437  *  genX(simple_shader_alloc_push)
438  */
439 struct anv_address
genX(simple_shader_push_state_address)440 genX(simple_shader_push_state_address)(struct anv_simple_shader *state,
441                                        struct anv_state push_state)
442 {
443    if (state->kernel->stage == MESA_SHADER_FRAGMENT) {
444       return anv_state_pool_state_address(
445          &state->device->dynamic_state_pool, push_state);
446    } else {
447 #if GFX_VERx10 >= 125
448       return anv_state_pool_state_address(
449          &state->device->general_state_pool, push_state);
450 #else
451       return anv_state_pool_state_address(
452          &state->device->dynamic_state_pool, push_state);
453 #endif
454    }
455 }
456 
457 /** Emit a simple shader dispatch */
458 void
genX(emit_simple_shader_dispatch)459 genX(emit_simple_shader_dispatch)(struct anv_simple_shader *state,
460                                   uint32_t num_threads,
461                                   struct anv_state push_state)
462 {
463    struct anv_device *device = state->device;
464    struct anv_batch *batch = state->batch;
465    struct anv_address push_addr =
466       anv_state_pool_state_address(&device->dynamic_state_pool, push_state);
467 
468    if (state->kernel->stage == MESA_SHADER_FRAGMENT) {
469       /* At the moment we require a command buffer associated with this
470        * emission as we need to allocate binding tables on Gfx9.
471        */
472       assert(state->cmd_buffer != NULL);
473 
474       struct anv_state vs_data_state =
475          anv_state_stream_alloc(state->dynamic_state_stream,
476                                 9 * sizeof(uint32_t), 32);
477       if (vs_data_state.map == NULL)
478          return;
479 
480       float x0 = 0.0f, x1 = MIN2(num_threads, 8192);
481       float y0 = 0.0f, y1 = DIV_ROUND_UP(num_threads, 8192);
482       float z = 0.0f;
483 
484       float *vertices = vs_data_state.map;
485       vertices[0] = x1; vertices[1] = y1; vertices[2] = z; /* v0 */
486       vertices[3] = x0; vertices[4] = y1; vertices[5] = z; /* v1 */
487       vertices[6] = x0; vertices[7] = y0; vertices[8] = z; /* v2 */
488 
489       uint32_t *dw = anv_batch_emitn(batch,
490                                      1 + GENX(VERTEX_BUFFER_STATE_length),
491                                      GENX(3DSTATE_VERTEX_BUFFERS));
492       GENX(VERTEX_BUFFER_STATE_pack)(batch, dw + 1,
493                                      &(struct GENX(VERTEX_BUFFER_STATE)) {
494                                         .VertexBufferIndex     = 0,
495                                         .AddressModifyEnable   = true,
496                                         .BufferStartingAddress = (struct anv_address) {
497                                            .bo = device->dynamic_state_pool.block_pool.bo,
498                                            .offset = vs_data_state.offset,
499                                         },
500                                         .BufferPitch           = 3 * sizeof(float),
501                                         .BufferSize            = 9 * sizeof(float),
502                                         .MOCS                  = anv_mocs(device, NULL, 0),
503 #if GFX_VER >= 12
504                                         .L3BypassDisable       = true,
505 #endif
506                                      });
507 
508 #if GFX_VERx10 > 120
509       dw =
510          anv_batch_emitn(batch,
511                          GENX(3DSTATE_CONSTANT_ALL_length) +
512                          GENX(3DSTATE_CONSTANT_ALL_DATA_length),
513                          GENX(3DSTATE_CONSTANT_ALL),
514                          .ShaderUpdateEnable = BITFIELD_BIT(MESA_SHADER_FRAGMENT),
515                          .PointerBufferMask = 0x1,
516                          .MOCS = anv_mocs(device, NULL, 0));
517 
518       GENX(3DSTATE_CONSTANT_ALL_DATA_pack)(
519          batch, dw + GENX(3DSTATE_CONSTANT_ALL_length),
520          &(struct GENX(3DSTATE_CONSTANT_ALL_DATA)) {
521             .PointerToConstantBuffer = push_addr,
522             .ConstantBufferReadLength = DIV_ROUND_UP(push_state.alloc_size, 32),
523          });
524 #else
525       /* The Skylake PRM contains the following restriction:
526        *
527        *    "The driver must ensure The following case does not occur
528        *     without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
529        *     buffer 3 read length equal to zero committed followed by a
530        *     3DSTATE_CONSTANT_* with buffer 0 read length not equal to
531        *     zero committed."
532        *
533        * To avoid this, we program the highest slot.
534        */
535       anv_batch_emit(batch, GENX(3DSTATE_CONSTANT_PS), c) {
536          c.MOCS = anv_mocs(device, NULL, 0);
537          c.ConstantBody.ReadLength[3] = DIV_ROUND_UP(push_state.alloc_size, 32);
538          c.ConstantBody.Buffer[3] = push_addr;
539       }
540 #endif
541 
542 #if GFX_VER == 9
543       /* Why are the push constants not flushed without a binding table
544        * update??
545        */
546       anv_batch_emit(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_PS), btp) {
547          btp.PointertoPSBindingTable = state->bt_state.offset;
548       }
549 #endif
550 
551       genX(emit_breakpoint)(batch, device, true);
552       anv_batch_emit(batch, GENX(3DPRIMITIVE), prim) {
553          prim.VertexAccessType         = SEQUENTIAL;
554          prim.PrimitiveTopologyType    = _3DPRIM_RECTLIST;
555          prim.VertexCountPerInstance   = 3;
556          prim.InstanceCount            = 1;
557       }
558       genX(batch_emit_post_3dprimitive_was)(batch, device, _3DPRIM_RECTLIST, 3);
559       genX(emit_breakpoint)(batch, device, false);
560    } else {
561       const struct intel_device_info *devinfo = device->info;
562       const struct brw_cs_prog_data *prog_data =
563          (const struct brw_cs_prog_data *) state->kernel->prog_data;
564       const struct intel_cs_dispatch_info dispatch =
565          brw_cs_get_dispatch_info(devinfo, prog_data, NULL);
566 
567 #if GFX_VERx10 >= 125
568       anv_batch_emit(batch, GENX(COMPUTE_WALKER), cw) {
569          cw.SIMDSize                       = dispatch.simd_size / 16;
570          cw.MessageSIMD                    = dispatch.simd_size / 16,
571          cw.IndirectDataStartAddress       = push_state.offset;
572          cw.IndirectDataLength             = push_state.alloc_size;
573          cw.LocalXMaximum                  = prog_data->local_size[0] - 1;
574          cw.LocalYMaximum                  = prog_data->local_size[1] - 1;
575          cw.LocalZMaximum                  = prog_data->local_size[2] - 1;
576          cw.ThreadGroupIDXDimension        = DIV_ROUND_UP(num_threads,
577                                                           dispatch.simd_size);
578          cw.ThreadGroupIDYDimension        = 1;
579          cw.ThreadGroupIDZDimension        = 1;
580          cw.ExecutionMask                  = dispatch.right_mask;
581          cw.PostSync.MOCS                  = anv_mocs(device, NULL, 0);
582 
583 #if GFX_VERx10 >= 125
584          cw.GenerateLocalID                = prog_data->generate_local_id != 0;
585          cw.EmitLocal                      = prog_data->generate_local_id;
586          cw.WalkOrder                      = prog_data->walk_order;
587          cw.TileLayout = prog_data->walk_order == INTEL_WALK_ORDER_YXZ ?
588                          TileY32bpe : Linear;
589 #endif
590 
591          cw.InterfaceDescriptor = (struct GENX(INTERFACE_DESCRIPTOR_DATA)) {
592             .KernelStartPointer                = state->kernel->kernel.offset +
593                                                  brw_cs_prog_data_prog_offset(prog_data,
594                                                                               dispatch.simd_size),
595             .SamplerStatePointer               = 0,
596             .BindingTablePointer               = 0,
597             .BindingTableEntryCount            = 0,
598             .NumberofThreadsinGPGPUThreadGroup = dispatch.threads,
599             .SharedLocalMemorySize             = intel_compute_slm_encode_size(GFX_VER,
600                                                                                prog_data->base.total_shared),
601             .NumberOfBarriers                  = prog_data->uses_barrier,
602          };
603       }
604 #else
605       const uint32_t vfe_curbe_allocation =
606          ALIGN(prog_data->push.per_thread.regs * dispatch.threads +
607                prog_data->push.cross_thread.regs, 2);
608 
609       /* From the Sky Lake PRM Vol 2a, MEDIA_VFE_STATE:
610        *
611        *    "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
612        *     the only bits that are changed are scoreboard related: Scoreboard
613        *     Enable, Scoreboard Type, Scoreboard Mask, Scoreboard * Delta. For
614        *     these scoreboard related states, a MEDIA_STATE_FLUSH is
615        *     sufficient."
616        */
617       enum anv_pipe_bits emitted_bits = 0;
618       genX(emit_apply_pipe_flushes)(batch, device, GPGPU, ANV_PIPE_CS_STALL_BIT,
619                                     &emitted_bits);
620 
621       /* If we have a command buffer allocated with the emission, update the
622        * pending bits.
623        */
624       if (state->cmd_buffer)
625          anv_cmd_buffer_update_pending_query_bits(state->cmd_buffer, emitted_bits);
626 
627       anv_batch_emit(batch, GENX(MEDIA_VFE_STATE), vfe) {
628          vfe.StackSize              = 0;
629          vfe.MaximumNumberofThreads =
630             devinfo->max_cs_threads * devinfo->subslice_total - 1;
631          vfe.NumberofURBEntries     = 2;
632 #if GFX_VER < 11
633          vfe.ResetGatewayTimer      = true;
634 #endif
635          vfe.URBEntryAllocationSize = 2;
636          vfe.CURBEAllocationSize    = vfe_curbe_allocation;
637 
638          if (prog_data->base.total_scratch) {
639             /* Broadwell's Per Thread Scratch Space is in the range [0, 11]
640              * where 0 = 1k, 1 = 2k, 2 = 4k, ..., 11 = 2M.
641              */
642             vfe.PerThreadScratchSpace =
643                ffs(prog_data->base.total_scratch) - 11;
644             vfe.ScratchSpaceBasePointer =
645                (struct anv_address) {
646                .bo = anv_scratch_pool_alloc(device,
647                                             &device->scratch_pool,
648                                             MESA_SHADER_COMPUTE,
649                                             prog_data->base.total_scratch),
650                .offset = 0,
651             };
652          }
653       }
654       struct anv_state iface_desc_state =
655          anv_state_stream_alloc(state->dynamic_state_stream,
656                                 GENX(INTERFACE_DESCRIPTOR_DATA_length) * 4, 64);
657       if (iface_desc_state.map == NULL)
658          return;
659 
660       struct GENX(INTERFACE_DESCRIPTOR_DATA) iface_desc = {
661          .KernelStartPointer                    = state->kernel->kernel.offset +
662                                                   brw_cs_prog_data_prog_offset(prog_data,
663                                                                                dispatch.simd_size),
664 
665          .SamplerCount                          = 0,
666          .BindingTableEntryCount                = 0,
667          .BarrierEnable                         = prog_data->uses_barrier,
668          .SharedLocalMemorySize                 = intel_compute_slm_encode_size(GFX_VER,
669                                                                                 prog_data->base.total_shared),
670 
671          .ConstantURBEntryReadOffset            = 0,
672          .ConstantURBEntryReadLength            = prog_data->push.per_thread.regs,
673          .CrossThreadConstantDataReadLength     = prog_data->push.cross_thread.regs,
674 #if GFX_VER >= 12
675          /* TODO: Check if we are missing workarounds and enable mid-thread
676           * preemption.
677           *
678           * We still have issues with mid-thread preemption (it was already
679           * disabled by the kernel on gfx11, due to missing workarounds). It's
680           * possible that we are just missing some workarounds, and could
681           * enable it later, but for now let's disable it to fix a GPU in
682           * compute in Car Chase (and possibly more).
683           */
684          .ThreadPreemptionDisable               = true,
685 #endif
686          .NumberofThreadsinGPGPUThreadGroup     = dispatch.threads,
687       };
688       GENX(INTERFACE_DESCRIPTOR_DATA_pack)(batch, iface_desc_state.map, &iface_desc);
689       anv_batch_emit(batch, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), mid) {
690          mid.InterfaceDescriptorTotalLength        = iface_desc_state.alloc_size;
691          mid.InterfaceDescriptorDataStartAddress   = iface_desc_state.offset;
692       }
693       anv_batch_emit(batch, GENX(MEDIA_CURBE_LOAD), curbe) {
694          curbe.CURBEDataStartAddress = push_state.offset;
695          curbe.CURBETotalDataLength  = push_state.alloc_size;
696       }
697       anv_batch_emit(batch, GENX(GPGPU_WALKER), ggw) {
698          ggw.SIMDSize                     = dispatch.simd_size / 16;
699          ggw.ThreadDepthCounterMaximum    = 0;
700          ggw.ThreadHeightCounterMaximum   = 0;
701          ggw.ThreadWidthCounterMaximum    = dispatch.threads - 1;
702          ggw.ThreadGroupIDXDimension      = DIV_ROUND_UP(num_threads,
703                                                          dispatch.simd_size);
704          ggw.ThreadGroupIDYDimension      = 1;
705          ggw.ThreadGroupIDZDimension      = 1;
706          ggw.RightExecutionMask           = dispatch.right_mask;
707          ggw.BottomExecutionMask          = 0xffffffff;
708       }
709       anv_batch_emit(batch, GENX(MEDIA_STATE_FLUSH), msf);
710 #endif
711    }
712 }
713 
714 void
genX(emit_simple_shader_end)715 genX(emit_simple_shader_end)(struct anv_simple_shader *state)
716 {
717    anv_batch_emit(state->batch, GENX(MI_BATCH_BUFFER_END), end);
718 
719    if ((state->batch->next - state->batch->start) & 4)
720       anv_batch_emit(state->batch, GENX(MI_NOOP), noop);
721 }
722