1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_blorp.c
25 *
26 * ============================= GENXML CODE =============================
27 * [This file is compiled once per generation.]
28 * =======================================================================
29 *
30 * GenX specific code for working with BLORP (blitting, resolves, clears
31 * on the 3D engine). This provides the driver-specific hooks needed to
32 * implement the BLORP API.
33 *
34 * See iris_blit.c, iris_clear.c, and so on.
35 */
36
37 #include <assert.h>
38
39 #include "iris_batch.h"
40 #include "iris_resource.h"
41 #include "iris_context.h"
42
43 #include "util/u_upload_mgr.h"
44 #include "intel/common/intel_l3_config.h"
45 #include "intel/compiler/brw_compiler.h"
46
47 #include "genxml/gen_macros.h"
48
49 #if GFX_VER >= 9
50 #include "blorp/blorp_genX_exec_brw.h"
51 #else
52 #include "blorp/blorp_genX_exec_elk.h"
53 #endif
54
55 static uint32_t *
stream_state(struct iris_batch * batch,struct u_upload_mgr * uploader,unsigned size,unsigned alignment,uint32_t * out_offset,struct iris_bo ** out_bo)56 stream_state(struct iris_batch *batch,
57 struct u_upload_mgr *uploader,
58 unsigned size,
59 unsigned alignment,
60 uint32_t *out_offset,
61 struct iris_bo **out_bo)
62 {
63 struct pipe_resource *res = NULL;
64 void *ptr = NULL;
65
66 u_upload_alloc(uploader, 0, size, alignment, out_offset, &res, &ptr);
67
68 struct iris_bo *bo = iris_resource_bo(res);
69 iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
70
71 iris_record_state_size(batch->state_sizes,
72 bo->address + *out_offset, size);
73
74 /* If the caller has asked for a BO, we leave them the responsibility of
75 * adding bo->address (say, by handing an address to genxml). If not,
76 * we assume they want the offset from a base address.
77 */
78 if (out_bo)
79 *out_bo = bo;
80 else
81 *out_offset += iris_bo_offset_from_base_address(bo);
82
83 pipe_resource_reference(&res, NULL);
84
85 return ptr;
86 }
87
88 static void *
blorp_emit_dwords(struct blorp_batch * blorp_batch,unsigned n)89 blorp_emit_dwords(struct blorp_batch *blorp_batch, unsigned n)
90 {
91 struct iris_batch *batch = blorp_batch->driver_batch;
92 return iris_get_command_space(batch, n * sizeof(uint32_t));
93 }
94
95 static uint64_t
combine_and_pin_address(struct blorp_batch * blorp_batch,struct blorp_address addr)96 combine_and_pin_address(struct blorp_batch *blorp_batch,
97 struct blorp_address addr)
98 {
99 struct iris_batch *batch = blorp_batch->driver_batch;
100 struct iris_bo *bo = addr.buffer;
101
102 iris_use_pinned_bo(batch, bo,
103 addr.reloc_flags & IRIS_BLORP_RELOC_FLAGS_EXEC_OBJECT_WRITE,
104 IRIS_DOMAIN_NONE);
105
106 /* Assume this is a general address, not relative to a base. */
107 return bo->address + addr.offset;
108 }
109
110 static uint64_t
blorp_emit_reloc(struct blorp_batch * blorp_batch,UNUSED void * location,struct blorp_address addr,uint32_t delta)111 blorp_emit_reloc(struct blorp_batch *blorp_batch, UNUSED void *location,
112 struct blorp_address addr, uint32_t delta)
113 {
114 return combine_and_pin_address(blorp_batch, addr) + delta;
115 }
116
117 static void
blorp_surface_reloc(struct blorp_batch * blorp_batch,uint32_t ss_offset,struct blorp_address addr,uint32_t delta)118 blorp_surface_reloc(struct blorp_batch *blorp_batch, uint32_t ss_offset,
119 struct blorp_address addr, uint32_t delta)
120 {
121 /* Let blorp_get_surface_address do the pinning. */
122 }
123
124 static uint64_t
blorp_get_surface_address(struct blorp_batch * blorp_batch,struct blorp_address addr)125 blorp_get_surface_address(struct blorp_batch *blorp_batch,
126 struct blorp_address addr)
127 {
128 return combine_and_pin_address(blorp_batch, addr);
129 }
130
131 UNUSED static struct blorp_address
blorp_get_surface_base_address(UNUSED struct blorp_batch * blorp_batch)132 blorp_get_surface_base_address(UNUSED struct blorp_batch *blorp_batch)
133 {
134 return (struct blorp_address) { .offset = IRIS_MEMZONE_BINDER_START };
135 }
136
137 static uint32_t
blorp_get_dynamic_state(struct blorp_batch * batch,enum blorp_dynamic_state name)138 blorp_get_dynamic_state(struct blorp_batch *batch,
139 enum blorp_dynamic_state name)
140 {
141 unreachable("Not implemented");
142 }
143
144 static void *
blorp_alloc_dynamic_state(struct blorp_batch * blorp_batch,uint32_t size,uint32_t alignment,uint32_t * offset)145 blorp_alloc_dynamic_state(struct blorp_batch *blorp_batch,
146 uint32_t size,
147 uint32_t alignment,
148 uint32_t *offset)
149 {
150 struct iris_context *ice = blorp_batch->blorp->driver_ctx;
151 struct iris_batch *batch = blorp_batch->driver_batch;
152
153 return stream_state(batch, ice->state.dynamic_uploader,
154 size, alignment, offset, NULL);
155 }
156
157 UNUSED static void *
blorp_alloc_general_state(struct blorp_batch * blorp_batch,uint32_t size,uint32_t alignment,uint32_t * offset)158 blorp_alloc_general_state(struct blorp_batch *blorp_batch,
159 uint32_t size,
160 uint32_t alignment,
161 uint32_t *offset)
162 {
163 /* Use dynamic state range for general state on iris. */
164 return blorp_alloc_dynamic_state(blorp_batch, size, alignment, offset);
165 }
166
167 static bool
blorp_alloc_binding_table(struct blorp_batch * blorp_batch,unsigned num_entries,unsigned state_size,unsigned state_alignment,uint32_t * out_bt_offset,uint32_t * surface_offsets,void ** surface_maps)168 blorp_alloc_binding_table(struct blorp_batch *blorp_batch,
169 unsigned num_entries,
170 unsigned state_size,
171 unsigned state_alignment,
172 uint32_t *out_bt_offset,
173 uint32_t *surface_offsets,
174 void **surface_maps)
175 {
176 struct iris_context *ice = blorp_batch->blorp->driver_ctx;
177 struct iris_binder *binder = &ice->state.binder;
178 struct iris_batch *batch = blorp_batch->driver_batch;
179
180 unsigned bt_offset =
181 iris_binder_reserve(ice, num_entries * sizeof(uint32_t));
182 uint32_t *bt_map = binder->map + bt_offset;
183
184 uint32_t surf_base_offset = GFX_VER < 11 ? binder->bo->address : 0;
185
186 *out_bt_offset = bt_offset;
187
188 for (unsigned i = 0; i < num_entries; i++) {
189 surface_maps[i] = stream_state(batch, ice->state.surface_uploader,
190 state_size, state_alignment,
191 &surface_offsets[i], NULL);
192 bt_map[i] = surface_offsets[i] - surf_base_offset;
193 }
194
195 iris_use_pinned_bo(batch, binder->bo, false, IRIS_DOMAIN_NONE);
196
197 batch->screen->vtbl.update_binder_address(batch, binder);
198
199 return true;
200 }
201
202 static uint32_t
blorp_binding_table_offset_to_pointer(struct blorp_batch * batch,uint32_t offset)203 blorp_binding_table_offset_to_pointer(struct blorp_batch *batch,
204 uint32_t offset)
205 {
206 /* See IRIS_BT_OFFSET_SHIFT in iris_state.c */
207 return offset >> ((GFX_VER >= 11 && GFX_VERx10 < 125) ? 3 : 0);
208 }
209
210 static void *
blorp_alloc_vertex_buffer(struct blorp_batch * blorp_batch,uint32_t size,struct blorp_address * addr)211 blorp_alloc_vertex_buffer(struct blorp_batch *blorp_batch,
212 uint32_t size,
213 struct blorp_address *addr)
214 {
215 struct iris_context *ice = blorp_batch->blorp->driver_ctx;
216 struct iris_batch *batch = blorp_batch->driver_batch;
217 struct iris_bo *bo;
218 uint32_t offset;
219
220 void *map = stream_state(batch, ice->ctx.const_uploader, size, 64,
221 &offset, &bo);
222
223 *addr = (struct blorp_address) {
224 .buffer = bo,
225 .offset = offset,
226 .mocs = iris_mocs(bo, &batch->screen->isl_dev,
227 ISL_SURF_USAGE_VERTEX_BUFFER_BIT),
228 .local_hint = iris_bo_likely_local(bo),
229 };
230
231 return map;
232 }
233
234 /**
235 * See iris_upload_render_state's IRIS_DIRTY_VERTEX_BUFFERS handling for
236 * a comment about why these VF invalidations are needed.
237 */
238 static void
blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch * blorp_batch,const struct blorp_address * addrs,UNUSED uint32_t * sizes,unsigned num_vbs)239 blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch *blorp_batch,
240 const struct blorp_address *addrs,
241 UNUSED uint32_t *sizes,
242 unsigned num_vbs)
243 {
244 #if GFX_VER < 11
245 struct iris_context *ice = blorp_batch->blorp->driver_ctx;
246 struct iris_batch *batch = blorp_batch->driver_batch;
247 bool need_invalidate = false;
248
249 for (unsigned i = 0; i < num_vbs; i++) {
250 struct iris_bo *bo = addrs[i].buffer;
251 uint16_t high_bits = bo->address >> 32u;
252
253 if (high_bits != ice->state.last_vbo_high_bits[i]) {
254 need_invalidate = true;
255 ice->state.last_vbo_high_bits[i] = high_bits;
256 }
257 }
258
259 if (need_invalidate) {
260 iris_emit_pipe_control_flush(batch,
261 "workaround: VF cache 32-bit key [blorp]",
262 PIPE_CONTROL_VF_CACHE_INVALIDATE |
263 PIPE_CONTROL_CS_STALL);
264 }
265 #endif
266 }
267
268 static struct blorp_address
blorp_get_workaround_address(struct blorp_batch * blorp_batch)269 blorp_get_workaround_address(struct blorp_batch *blorp_batch)
270 {
271 struct iris_batch *batch = blorp_batch->driver_batch;
272
273 return (struct blorp_address) {
274 .buffer = batch->screen->workaround_address.bo,
275 .offset = batch->screen->workaround_address.offset,
276 .local_hint =
277 iris_bo_likely_local(batch->screen->workaround_address.bo),
278 };
279 }
280
281 static void
blorp_flush_range(UNUSED struct blorp_batch * blorp_batch,UNUSED void * start,UNUSED size_t size)282 blorp_flush_range(UNUSED struct blorp_batch *blorp_batch,
283 UNUSED void *start,
284 UNUSED size_t size)
285 {
286 /* All allocated states come from the batch which we will flush before we
287 * submit it. There's nothing for us to do here.
288 */
289 }
290
291 static void
blorp_pre_emit_urb_config(struct blorp_batch * blorp_batch,struct intel_urb_config * urb_cfg)292 blorp_pre_emit_urb_config(struct blorp_batch *blorp_batch,
293 struct intel_urb_config *urb_cfg)
294 {
295 genX(urb_workaround)(blorp_batch->driver_batch, urb_cfg);
296 }
297
298 static const struct intel_l3_config *
blorp_get_l3_config(struct blorp_batch * blorp_batch)299 blorp_get_l3_config(struct blorp_batch *blorp_batch)
300 {
301 struct iris_batch *batch = blorp_batch->driver_batch;
302 return batch->screen->l3_config_3d;
303 }
304
305 static void
iris_blorp_exec_render(struct blorp_batch * blorp_batch,const struct blorp_params * params)306 iris_blorp_exec_render(struct blorp_batch *blorp_batch,
307 const struct blorp_params *params)
308 {
309 struct iris_context *ice = blorp_batch->blorp->driver_ctx;
310 struct iris_batch *batch = blorp_batch->driver_batch;
311 uint32_t pc_flags = 0;
312
313 #if GFX_VER >= 11
314 /* The PIPE_CONTROL command description says:
315 *
316 * "Whenever a Binding Table Index (BTI) used by a Render Target Message
317 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
318 * Target Cache Flush by enabling this bit. When render target flush
319 * is set due to new association of BTI, PS Scoreboard Stall bit must
320 * be set in this packet."
321 */
322 pc_flags = PIPE_CONTROL_RENDER_TARGET_FLUSH |
323 PIPE_CONTROL_STALL_AT_SCOREBOARD;
324 #endif
325
326 /* Check if blorp ds state matches ours. */
327 if (intel_needs_workaround(batch->screen->devinfo, 18019816803)) {
328 const bool blorp_ds_state =
329 params->depth.enabled || params->stencil.enabled;
330 if (ice->state.ds_write_state != blorp_ds_state) {
331 pc_flags |= PIPE_CONTROL_PSS_STALL_SYNC;
332 ice->state.ds_write_state = blorp_ds_state;
333 }
334 }
335
336 if (pc_flags != 0) {
337 iris_emit_pipe_control_flush(batch,
338 "workaround: prior to [blorp]",
339 pc_flags);
340 }
341
342 if (params->depth.enabled &&
343 !(blorp_batch->flags & BLORP_BATCH_NO_EMIT_DEPTH_STENCIL))
344 genX(emit_depth_state_workarounds)(ice, batch, ¶ms->depth.surf);
345
346 iris_require_command_space(batch, 1400);
347
348 #if GFX_VER == 8
349 genX(update_pma_fix)(ice, batch, false);
350 #endif
351
352 const unsigned scale = params->fast_clear_op ? UINT_MAX : 1;
353 if (ice->state.current_hash_scale != scale) {
354 genX(emit_hashing_mode)(ice, batch, params->x1 - params->x0,
355 params->y1 - params->y0, scale);
356 }
357
358 #if GFX_VERx10 == 125
359 iris_use_pinned_bo(batch, iris_resource_bo(ice->state.pixel_hashing_tables),
360 false, IRIS_DOMAIN_NONE);
361 #else
362 assert(!ice->state.pixel_hashing_tables);
363 #endif
364
365 #if GFX_VER >= 12
366 genX(invalidate_aux_map_state)(batch);
367 #endif
368
369 iris_handle_always_flush_cache(batch);
370
371 blorp_exec(blorp_batch, params);
372
373 iris_handle_always_flush_cache(batch);
374
375 /* We've smashed all state compared to what the normal 3D pipeline
376 * rendering tracks for GL.
377 */
378
379 uint64_t skip_bits = (IRIS_DIRTY_POLYGON_STIPPLE |
380 IRIS_DIRTY_SO_BUFFERS |
381 IRIS_DIRTY_SO_DECL_LIST |
382 IRIS_DIRTY_LINE_STIPPLE |
383 IRIS_ALL_DIRTY_FOR_COMPUTE |
384 IRIS_DIRTY_SCISSOR_RECT |
385 IRIS_DIRTY_VF);
386 /* Wa_14016820455
387 * On Gfx 12.5 platforms, the SF_CL_VIEWPORT pointer can be invalidated
388 * likely by a read cache invalidation when clipping is disabled, so we
389 * don't skip its dirty bit here, in order to reprogram it.
390 */
391 if (GFX_VERx10 != 125)
392 skip_bits |= IRIS_DIRTY_SF_CL_VIEWPORT;
393
394 uint64_t skip_stage_bits = (IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE |
395 IRIS_STAGE_DIRTY_UNCOMPILED_VS |
396 IRIS_STAGE_DIRTY_UNCOMPILED_TCS |
397 IRIS_STAGE_DIRTY_UNCOMPILED_TES |
398 IRIS_STAGE_DIRTY_UNCOMPILED_GS |
399 IRIS_STAGE_DIRTY_UNCOMPILED_FS |
400 IRIS_STAGE_DIRTY_SAMPLER_STATES_VS |
401 IRIS_STAGE_DIRTY_SAMPLER_STATES_TCS |
402 IRIS_STAGE_DIRTY_SAMPLER_STATES_TES |
403 IRIS_STAGE_DIRTY_SAMPLER_STATES_GS);
404
405 if (!ice->shaders.prog[MESA_SHADER_TESS_EVAL]) {
406 /* BLORP disabled tessellation, but it was already off anyway */
407 skip_stage_bits |= IRIS_STAGE_DIRTY_TCS |
408 IRIS_STAGE_DIRTY_TES |
409 IRIS_STAGE_DIRTY_CONSTANTS_TCS |
410 IRIS_STAGE_DIRTY_CONSTANTS_TES |
411 IRIS_STAGE_DIRTY_BINDINGS_TCS |
412 IRIS_STAGE_DIRTY_BINDINGS_TES;
413 }
414
415 if (!ice->shaders.prog[MESA_SHADER_GEOMETRY]) {
416 /* BLORP disabled geometry shaders, but it was already off anyway */
417 skip_stage_bits |= IRIS_STAGE_DIRTY_GS |
418 IRIS_STAGE_DIRTY_CONSTANTS_GS |
419 IRIS_STAGE_DIRTY_BINDINGS_GS;
420 }
421
422 /* we can skip flagging IRIS_DIRTY_DEPTH_BUFFER, if
423 * BLORP_BATCH_NO_EMIT_DEPTH_STENCIL is set.
424 */
425 if (blorp_batch->flags & BLORP_BATCH_NO_EMIT_DEPTH_STENCIL)
426 skip_bits |= IRIS_DIRTY_DEPTH_BUFFER;
427
428 if (!params->wm_prog_data)
429 skip_bits |= IRIS_DIRTY_BLEND_STATE | IRIS_DIRTY_PS_BLEND;
430
431 ice->state.dirty |= ~skip_bits;
432 ice->state.stage_dirty |= ~skip_stage_bits;
433
434 for (int i = 0; i < ARRAY_SIZE(ice->shaders.urb.cfg.size); i++)
435 ice->shaders.urb.cfg.size[i] = 0;
436
437 if (params->src.enabled)
438 iris_bo_bump_seqno(params->src.addr.buffer, batch->next_seqno,
439 IRIS_DOMAIN_SAMPLER_READ);
440 if (params->dst.enabled)
441 iris_bo_bump_seqno(params->dst.addr.buffer, batch->next_seqno,
442 IRIS_DOMAIN_RENDER_WRITE);
443 if (params->depth.enabled)
444 iris_bo_bump_seqno(params->depth.addr.buffer, batch->next_seqno,
445 IRIS_DOMAIN_DEPTH_WRITE);
446 if (params->stencil.enabled)
447 iris_bo_bump_seqno(params->stencil.addr.buffer, batch->next_seqno,
448 IRIS_DOMAIN_DEPTH_WRITE);
449 }
450
451 static void
iris_blorp_exec_blitter(struct blorp_batch * blorp_batch,const struct blorp_params * params)452 iris_blorp_exec_blitter(struct blorp_batch *blorp_batch,
453 const struct blorp_params *params)
454 {
455 struct iris_batch *batch = blorp_batch->driver_batch;
456
457 /* Around the length of a XY_BLOCK_COPY_BLT and MI_FLUSH_DW */
458 iris_require_command_space(batch, 108);
459
460 iris_handle_always_flush_cache(batch);
461
462 blorp_exec(blorp_batch, params);
463
464 iris_handle_always_flush_cache(batch);
465
466 if (params->src.enabled) {
467 iris_bo_bump_seqno(params->src.addr.buffer, batch->next_seqno,
468 IRIS_DOMAIN_OTHER_READ);
469 }
470
471 iris_bo_bump_seqno(params->dst.addr.buffer, batch->next_seqno,
472 IRIS_DOMAIN_OTHER_WRITE);
473 }
474
475 static void
iris_blorp_exec(struct blorp_batch * blorp_batch,const struct blorp_params * params)476 iris_blorp_exec(struct blorp_batch *blorp_batch,
477 const struct blorp_params *params)
478 {
479 if (blorp_batch->flags & BLORP_BATCH_USE_BLITTER)
480 iris_blorp_exec_blitter(blorp_batch, params);
481 else
482 iris_blorp_exec_render(blorp_batch, params);
483 }
484
485 static void
blorp_measure_start(struct blorp_batch * blorp_batch,const struct blorp_params * params)486 blorp_measure_start(struct blorp_batch *blorp_batch,
487 const struct blorp_params *params)
488 {
489 struct iris_context *ice = blorp_batch->blorp->driver_ctx;
490 struct iris_batch *batch = blorp_batch->driver_batch;
491
492 trace_intel_begin_blorp(&batch->trace);
493
494 if (batch->measure == NULL)
495 return;
496
497 iris_measure_snapshot(ice, batch,
498 blorp_op_to_intel_measure_snapshot(params->op),
499 NULL, NULL, NULL);
500 }
501
502
503 static void
blorp_measure_end(struct blorp_batch * blorp_batch,const struct blorp_params * params)504 blorp_measure_end(struct blorp_batch *blorp_batch,
505 const struct blorp_params *params)
506 {
507 struct iris_batch *batch = blorp_batch->driver_batch;
508
509 trace_intel_end_blorp(&batch->trace,
510 params->op,
511 params->x1 - params->x0,
512 params->y1 - params->y0,
513 params->num_samples,
514 params->shader_pipeline,
515 params->dst.view.format,
516 params->src.view.format,
517 (blorp_batch->flags & BLORP_BATCH_PREDICATE_ENABLE));
518 }
519
520 void
genX(init_blorp)521 genX(init_blorp)(struct iris_context *ice)
522 {
523 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
524
525 #if GFX_VER >= 9
526 blorp_init_brw(&ice->blorp, ice, &screen->isl_dev, screen->brw, NULL);
527 #else
528 blorp_init_elk(&ice->blorp, ice, &screen->isl_dev, screen->elk, NULL);
529 #endif
530 ice->blorp.lookup_shader = iris_blorp_lookup_shader;
531 ice->blorp.upload_shader = iris_blorp_upload_shader;
532 ice->blorp.exec = iris_blorp_exec;
533 ice->blorp.enable_tbimr = screen->driconf.enable_tbimr;
534 }
535
536 static void
blorp_emit_pre_draw(struct blorp_batch * blorp_batch,const struct blorp_params * params)537 blorp_emit_pre_draw(struct blorp_batch *blorp_batch, const struct blorp_params *params)
538 {
539 struct iris_batch *batch = blorp_batch->driver_batch;
540 blorp_measure_start(blorp_batch, params);
541 genX(maybe_emit_breakpoint)(batch, true);
542 }
543
544 static void
blorp_emit_post_draw(struct blorp_batch * blorp_batch,const struct blorp_params * params)545 blorp_emit_post_draw(struct blorp_batch *blorp_batch, const struct blorp_params *params)
546 {
547 struct iris_batch *batch = blorp_batch->driver_batch;
548
549 // A _3DPRIM_RECTLIST is a MESA_PRIM_QUAD_STRIP with a implied vertex
550 genX(emit_3dprimitive_was)(batch, NULL, MESA_PRIM_QUAD_STRIP, 3);
551 genX(maybe_emit_breakpoint)(batch, false);
552 blorp_measure_end(blorp_batch, params);
553 }
554