1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /*
25 * NOTE: The header can be included multiple times, from the same file.
26 */
27
28 /*
29 * Gen-specific function declarations. This header must *not* be included
30 * directly. Instead, it is included multiple times by anv_private.h.
31 *
32 * In this header file, the usual genx() macro is available.
33 */
34
35 #ifndef ANV_PRIVATE_H
36 #error This file is included by means other than anv_private.h
37 #endif
38
39 struct intel_sample_positions;
40 struct intel_urb_config;
41 struct anv_async_submit;
42 struct anv_embedded_sampler;
43 struct anv_pipeline_embedded_sampler_binding;
44 struct anv_trtt_bind;
45
46 typedef struct nir_builder nir_builder;
47 typedef struct nir_shader nir_shader;
48
49 extern const uint32_t genX(vk_to_intel_cullmode)[];
50
51 extern const uint32_t genX(vk_to_intel_front_face)[];
52
53 extern const uint32_t genX(vk_to_intel_primitive_type)[];
54
55 extern const uint32_t genX(vk_to_intel_compare_op)[];
56
57 extern const uint32_t genX(vk_to_intel_stencil_op)[];
58
59 extern const uint32_t genX(vk_to_intel_logic_op)[];
60
61 extern const uint32_t genX(vk_to_intel_fillmode)[];
62
63 void genX(init_physical_device_state)(struct anv_physical_device *device);
64
65 VkResult genX(init_device_state)(struct anv_device *device);
66
67 void genX(init_cps_device_state)(struct anv_device *device);
68
69 nir_shader *genX(load_libanv_shader)(struct anv_device *device, void *mem_ctx);
70
71 uint32_t genX(call_internal_shader)(nir_builder *b,
72 enum anv_internal_kernel_name shader_name);
73
74 void
75 genX(set_fast_clear_state)(struct anv_cmd_buffer *cmd_buffer,
76 const struct anv_image *image,
77 const enum isl_format format,
78 union isl_color_value clear_color);
79
80 void
81 genX(load_image_clear_color)(struct anv_cmd_buffer *cmd_buffer,
82 struct anv_state surface_state,
83 const struct anv_image *image);
84
85 void genX(cmd_buffer_emit_bt_pool_base_address)(struct anv_cmd_buffer *cmd_buffer);
86
87 void genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer);
88
89 void genX(cmd_buffer_apply_pipe_flushes)(struct anv_cmd_buffer *cmd_buffer);
90
91 void
92 genX(cmd_buffer_update_color_aux_op)(struct anv_cmd_buffer *cmd_buffer,
93 enum isl_aux_op aux_op);
94
95 void genX(cmd_buffer_emit_gfx12_depth_wa)(struct anv_cmd_buffer *cmd_buffer,
96 const struct isl_surf *surf);
97
98 void genX(cmd_buffer_set_binding_for_gfx8_vb_flush)(struct anv_cmd_buffer *cmd_buffer,
99 int vb_index,
100 struct anv_address vb_address,
101 uint32_t vb_size);
102 void genX(cmd_buffer_update_dirty_vbs_for_gfx8_vb_flush)(struct anv_cmd_buffer *cmd_buffer,
103 uint32_t access_type,
104 uint64_t vb_used);
105
106 void genX(cmd_buffer_emit_hashing_mode)(struct anv_cmd_buffer *cmd_buffer,
107 unsigned width, unsigned height,
108 unsigned scale);
109
110 void genX(urb_workaround)(struct anv_cmd_buffer *cmd_buffer,
111 const struct intel_urb_config *urb_cfg);
112
113 void genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer);
114 void genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer);
115 void genX(emit_pipeline_select)(struct anv_batch *batch, uint32_t pipeline,
116 const struct anv_device *device);
117
118 void genX(apply_task_urb_workaround)(struct anv_cmd_buffer *cmd_buffer);
119
120 void genX(emit_vertex_input)(struct anv_batch *batch,
121 uint32_t *vertex_element_dws,
122 struct anv_graphics_pipeline *pipeline,
123 const struct vk_vertex_input_state *vi,
124 bool emit_in_pipeline);
125
126 enum anv_pipe_bits
127 genX(emit_apply_pipe_flushes)(struct anv_batch *batch,
128 struct anv_device *device,
129 uint32_t current_pipeline,
130 enum anv_pipe_bits bits,
131 enum anv_pipe_bits *emitted_flush_bits);
132 void
133 genX(invalidate_aux_map)(struct anv_batch *batch,
134 struct anv_device *device,
135 enum intel_engine_class engine_class,
136 enum anv_pipe_bits bits);
137
138 #if INTEL_WA_14018283232_GFX_VER
139 void genX(batch_emit_wa_14018283232)(struct anv_batch *batch);
140
141 static inline void
genX(cmd_buffer_ensure_wa_14018283232)142 genX(cmd_buffer_ensure_wa_14018283232)(struct anv_cmd_buffer *cmd_buffer,
143 bool toggle)
144 {
145 struct anv_gfx_dynamic_state *hw_state =
146 &cmd_buffer->state.gfx.dyn_state;
147 if (intel_needs_workaround(cmd_buffer->device->info, 14018283232) &&
148 hw_state->wa_14018283232_toggle != toggle) {
149 hw_state->wa_14018283232_toggle = toggle;
150 BITSET_SET(hw_state->dirty, ANV_GFX_STATE_WA_14018283232);
151 genX(batch_emit_wa_14018283232)(&cmd_buffer->batch);
152 }
153 }
154 #endif
155
156 static inline bool
genX(cmd_buffer_set_coarse_pixel_active)157 genX(cmd_buffer_set_coarse_pixel_active)(struct anv_cmd_buffer *cmd_buffer,
158 enum anv_coarse_pixel_state state)
159 {
160 #if INTEL_WA_18038825448_GFX_VER
161 struct anv_cmd_graphics_state *gfx =
162 &cmd_buffer->state.gfx;
163 if (intel_needs_workaround(cmd_buffer->device->info, 18038825448) &&
164 gfx->coarse_pixel_active != state) {
165 gfx->coarse_pixel_active = state;
166 gfx->dirty |= ANV_CMD_DIRTY_COARSE_PIXEL_ACTIVE;
167 return true;
168 }
169 return false;
170 #else
171 return false;
172 #endif
173 }
174
175 void genX(emit_so_memcpy_init)(struct anv_memcpy_state *state,
176 struct anv_device *device,
177 struct anv_cmd_buffer *cmd_buffer,
178 struct anv_batch *batch);
179
180 void genX(emit_so_memcpy_fini)(struct anv_memcpy_state *state);
181
182 void genX(emit_so_memcpy_end)(struct anv_memcpy_state *state);
183
184 void genX(emit_so_memcpy)(struct anv_memcpy_state *state,
185 struct anv_address dst, struct anv_address src,
186 uint32_t size);
187
188 void genX(emit_l3_config)(struct anv_batch *batch,
189 const struct anv_device *device,
190 const struct intel_l3_config *cfg);
191
192 void genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer,
193 const struct intel_l3_config *cfg);
194
195 void genX(flush_descriptor_buffers)(struct anv_cmd_buffer *cmd_buffer,
196 struct anv_cmd_pipeline_state *pipe_state);
197
198 uint32_t
199 genX(cmd_buffer_flush_descriptor_sets)(struct anv_cmd_buffer *cmd_buffer,
200 struct anv_cmd_pipeline_state *pipe_state,
201 const VkShaderStageFlags dirty,
202 struct anv_shader_bin **shaders,
203 uint32_t num_shaders);
204
205 void genX(cmd_buffer_flush_gfx_hw_state)(struct anv_cmd_buffer *cmd_buffer);
206
207 anv_cmd_dirty_mask_t
208 genX(cmd_buffer_flush_gfx_runtime_state)(struct anv_cmd_buffer *cmd_buffer);
209
210 void genX(cmd_buffer_flush_gfx_hw_state)(struct anv_cmd_buffer *cmd_buffer);
211
212 void genX(cmd_buffer_enable_pma_fix)(struct anv_cmd_buffer *cmd_buffer,
213 bool enable);
214
215 void genX(cmd_buffer_mark_image_written)(struct anv_cmd_buffer *cmd_buffer,
216 const struct anv_image *image,
217 VkImageAspectFlagBits aspect,
218 enum isl_aux_usage aux_usage,
219 uint32_t level,
220 uint32_t base_layer,
221 uint32_t layer_count);
222
223 void genX(cmd_emit_conditional_render_predicate)(struct anv_cmd_buffer *cmd_buffer);
224
225 struct anv_address genX(cmd_buffer_ray_query_globals)(struct anv_cmd_buffer *cmd_buffer);
226
227 void genX(cmd_buffer_ensure_cfe_state)(struct anv_cmd_buffer *cmd_buffer,
228 uint32_t total_scratch);
229
230 void
231 genX(emit_urb_setup)(struct anv_device *device, struct anv_batch *batch,
232 const struct intel_l3_config *l3_config,
233 VkShaderStageFlags active_stages,
234 const struct intel_urb_config *urb_cfg_in,
235 struct intel_urb_config *urb_cfg_out,
236 enum intel_urb_deref_block_size *deref_block_size);
237
238 void genX(emit_sample_pattern)(struct anv_batch *batch,
239 const struct vk_sample_locations_state *sl);
240
241 void genX(cmd_buffer_so_memcpy)(struct anv_cmd_buffer *cmd_buffer,
242 struct anv_address dst, struct anv_address src,
243 uint32_t size);
244
245 void genX(cmd_buffer_dispatch_kernel)(struct anv_cmd_buffer *cmd_buffer,
246 struct anv_kernel *kernel,
247 const uint32_t *global_size, /* NULL for indirect */
248 uint32_t arg_count,
249 const struct anv_kernel_arg *args);
250
251 void genX(blorp_init_dynamic_states)(struct blorp_context *context);
252
253 void genX(blorp_exec)(struct blorp_batch *batch,
254 const struct blorp_params *params);
255
256 void genX(batch_emit_secondary_call)(struct anv_batch *batch,
257 struct anv_device *device,
258 struct anv_address secondary_addr,
259 struct anv_address secondary_return_addr);
260
261 void *genX(batch_emit_return)(struct anv_batch *batch);
262
263 void genX(cmd_emit_timestamp)(struct anv_batch *batch,
264 struct anv_device *device,
265 struct anv_address addr,
266 enum anv_timestamp_capture_type type,
267 void *data);
268
269 void genX(cmd_capture_data)(struct anv_batch *batch,
270 struct anv_device *device,
271 struct anv_address dst_addr,
272 struct anv_address src_addr,
273 uint32_t size_B);
274
275 void
276 genX(batch_emit_post_3dprimitive_was)(struct anv_batch *batch,
277 const struct anv_device *device,
278 uint32_t primitive_topology,
279 uint32_t vertex_count);
280
281 void genX(batch_emit_fast_color_dummy_blit)(struct anv_batch *batch,
282 struct anv_device *device);
283
284 VkPolygonMode
285 genX(raster_polygon_mode)(const struct anv_graphics_pipeline *pipeline,
286 VkPolygonMode polygon_mode,
287 VkPrimitiveTopology primitive_topology);
288
289 void
290 genX(graphics_pipeline_emit)(struct anv_graphics_pipeline *pipeline,
291 const struct vk_graphics_pipeline_state *state);
292
293 void
294 genX(compute_pipeline_emit)(struct anv_compute_pipeline *pipeline);
295
296 void
297 genX(ray_tracing_pipeline_emit)(struct anv_ray_tracing_pipeline *pipeline);
298
299 #define anv_shader_bin_get_bsr(bin, local_arg_offset) ({ \
300 assert((local_arg_offset) % 8 == 0); \
301 const struct brw_bs_prog_data *prog_data = \
302 brw_bs_prog_data_const(bin->prog_data); \
303 assert(prog_data->simd_size == 8 || prog_data->simd_size == 16); \
304 \
305 (struct GENX(BINDLESS_SHADER_RECORD)) { \
306 .OffsetToLocalArguments = (local_arg_offset) / 8, \
307 .BindlessShaderDispatchMode = \
308 prog_data->simd_size == 16 ? RT_SIMD16 : RT_SIMD8, \
309 .KernelStartPointer = bin->kernel.offset, \
310 }; \
311 })
312
313 void
314 genX(batch_set_preemption)(struct anv_batch *batch,
315 const struct intel_device_info *devinfo,
316 uint32_t current_pipeline,
317 bool value);
318
319 void
320 genX(cmd_buffer_set_preemption)(struct anv_cmd_buffer *cmd_buffer, bool value);
321
322 void
323 genX(batch_emit_pipe_control)(struct anv_batch *batch,
324 const struct intel_device_info *devinfo,
325 uint32_t current_pipeline,
326 enum anv_pipe_bits bits,
327 const char *reason);
328
329 void
330 genX(batch_emit_pipe_control_write)(struct anv_batch *batch,
331 const struct intel_device_info *devinfo,
332 uint32_t current_pipeline,
333 uint32_t post_sync_op,
334 struct anv_address address,
335 uint32_t imm_data,
336 enum anv_pipe_bits bits,
337 const char *reason);
338
339 #define genx_batch_emit_pipe_control(a, b, c, d) \
340 genX(batch_emit_pipe_control) (a, b, c, d, __func__)
341
342 #define genx_batch_emit_pipe_control_write(a, b, c, d, e, f, g) \
343 genX(batch_emit_pipe_control_write) (a, b, c, d, e, f, g, __func__)
344
345 void genX(batch_emit_breakpoint)(struct anv_batch *batch,
346 struct anv_device *device,
347 bool emit_before_draw);
348
349 static inline void
genX(emit_breakpoint)350 genX(emit_breakpoint)(struct anv_batch *batch,
351 struct anv_device *device,
352 bool emit_before_draw)
353 {
354 if (INTEL_DEBUG(DEBUG_DRAW_BKP))
355 genX(batch_emit_breakpoint)(batch, device, emit_before_draw);
356 }
357
358 void
359 genX(cmd_buffer_begin_companion)(struct anv_cmd_buffer *buffer,
360 VkCommandBufferLevel level);
361
362 struct anv_state
363 genX(cmd_buffer_begin_companion_rcs_syncpoint)(struct anv_cmd_buffer *cmd_buffer);
364
365 void
366 genX(cmd_buffer_end_companion_rcs_syncpoint)(struct anv_cmd_buffer *cmd_buffer,
367 struct anv_state syncpoint);
368
369 void
370 genX(emit_simple_shader_init)(struct anv_simple_shader *state);
371
372 void
373 genX(emit_simple_shader_dispatch)(struct anv_simple_shader *state,
374 uint32_t num_threads,
375 struct anv_state push_state);
376
377 struct anv_state
378 genX(simple_shader_alloc_push)(struct anv_simple_shader *state, uint32_t size);
379
380 struct anv_address
381 genX(simple_shader_push_state_address)(struct anv_simple_shader *state,
382 struct anv_state push_state);
383
384 void
385 genX(emit_simple_shader_end)(struct anv_simple_shader *state);
386
387 VkResult genX(init_trtt_context_state)(struct anv_async_submit *submit);
388
389 void genX(write_trtt_entries)(struct anv_async_submit *submit,
390 struct anv_trtt_bind *l3l2_binds,
391 uint32_t n_l3l2_binds,
392 struct anv_trtt_bind *l1_binds,
393 uint32_t n_l1_binds);
394
395 void genX(async_submit_end)(struct anv_async_submit *submit);
396
397 void
398 genX(cmd_buffer_emit_push_descriptor_buffer_surface)(struct anv_cmd_buffer *cmd_buffer,
399 struct anv_descriptor_set *set);
400
401 void
402 genX(cmd_buffer_emit_push_descriptor_surfaces)(struct anv_cmd_buffer *cmd_buffer,
403 struct anv_descriptor_set *set);
404
405 static inline VkShaderStageFlags
genX(cmd_buffer_flush_push_descriptors)406 genX(cmd_buffer_flush_push_descriptors)(struct anv_cmd_buffer *cmd_buffer,
407 struct anv_cmd_pipeline_state *state,
408 struct anv_pipeline *pipeline)
409 {
410 if (!pipeline->use_push_descriptor && !pipeline->use_push_descriptor_buffer)
411 return 0;
412
413 assert(pipeline->layout.push_descriptor_set_index != -1);
414 struct anv_descriptor_set *set =
415 state->descriptors[pipeline->layout.push_descriptor_set_index];
416 assert(set->is_push);
417
418 const VkShaderStageFlags push_buffer_dirty =
419 cmd_buffer->state.push_descriptors_dirty &
420 pipeline->use_push_descriptor_buffer;
421 if (push_buffer_dirty) {
422 if (set->desc_surface_state.map == NULL)
423 genX(cmd_buffer_emit_push_descriptor_buffer_surface)(cmd_buffer, set);
424
425 /* Force the next push descriptor update to allocate a new descriptor set. */
426 state->push_descriptor.set_used_on_gpu = true;
427 }
428
429 const VkShaderStageFlags push_descriptor_dirty =
430 cmd_buffer->state.push_descriptors_dirty & pipeline->use_push_descriptor;
431 if (push_descriptor_dirty) {
432 genX(cmd_buffer_emit_push_descriptor_surfaces)(cmd_buffer, set);
433
434 /* Force the next push descriptor update to allocate a new descriptor set. */
435 state->push_descriptor.set_used_on_gpu = true;
436 }
437
438 /* Clear the dirty stages now that we've generated the surface states for
439 * them.
440 */
441 cmd_buffer->state.push_descriptors_dirty &=
442 ~(push_descriptor_dirty | push_buffer_dirty);
443
444 /* Return the binding table stages that need to be updated */
445 return push_buffer_dirty | push_descriptor_dirty;
446 }
447
448 void genX(emit_embedded_sampler)(struct anv_device *device,
449 struct anv_embedded_sampler *sampler,
450 struct anv_pipeline_embedded_sampler_binding *binding);
451