1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_pipe_control.c
25 *
26 * PIPE_CONTROL is the main flushing and synchronization primitive on Intel
27 * GPUs. It can invalidate caches, stall until rendering reaches various
28 * stages of completion, write to memory, and other things. In a way, it's
29 * a swiss army knife command - it has all kinds of capabilities, but some
30 * significant limitations as well.
31 *
32 * Unfortunately, it's notoriously complicated and difficult to use. Many
33 * sub-commands can't be used together. Some are meant to be used at the
34 * top of the pipeline (invalidating caches before drawing), while some are
35 * meant to be used at the end (stalling or flushing after drawing).
36 *
37 * Also, there's a list of restrictions a mile long, which vary by generation.
38 * Do this before doing that, or suffer the consequences (usually a GPU hang).
39 *
40 * This file contains helpers for emitting them safely. You can simply call
41 * iris_emit_pipe_control_flush() with the desired operations (as logical
42 * PIPE_CONTROL_* bits), and it will take care of splitting it into multiple
43 * PIPE_CONTROL commands as necessary. The per-generation workarounds are
44 * applied in iris_emit_raw_pipe_control() in iris_state.c.
45 */
46
47 #include "iris_context.h"
48 #include "util/hash_table.h"
49 #include "util/set.h"
50
51 /**
52 * Emit a PIPE_CONTROL with various flushing flags.
53 *
54 * The caller is responsible for deciding what flags are appropriate for the
55 * given generation.
56 */
57 void
iris_emit_pipe_control_flush(struct iris_batch * batch,const char * reason,uint32_t flags)58 iris_emit_pipe_control_flush(struct iris_batch *batch,
59 const char *reason,
60 uint32_t flags)
61 {
62 if ((flags & PIPE_CONTROL_CACHE_FLUSH_BITS) &&
63 (flags & PIPE_CONTROL_CACHE_INVALIDATE_BITS)) {
64 /* A pipe control command with flush and invalidate bits set
65 * simultaneously is an inherently racy operation on Gfx6+ if the
66 * contents of the flushed caches were intended to become visible from
67 * any of the invalidated caches. Split it in two PIPE_CONTROLs, the
68 * first one should stall the pipeline to make sure that the flushed R/W
69 * caches are coherent with memory once the specified R/O caches are
70 * invalidated. On pre-Gfx6 hardware the (implicit) R/O cache
71 * invalidation seems to happen at the bottom of the pipeline together
72 * with any write cache flush, so this shouldn't be a concern. In order
73 * to ensure a full stall, we do an end-of-pipe sync.
74 */
75 iris_emit_end_of_pipe_sync(batch, reason,
76 flags & PIPE_CONTROL_CACHE_FLUSH_BITS);
77 flags &= ~(PIPE_CONTROL_CACHE_FLUSH_BITS | PIPE_CONTROL_CS_STALL);
78 }
79
80 batch->screen->vtbl.emit_raw_pipe_control(batch, reason, flags, NULL, 0, 0);
81 }
82
83 /**
84 * Emit a PIPE_CONTROL that writes to a buffer object.
85 *
86 * \p flags should contain one of the following items:
87 * - PIPE_CONTROL_WRITE_IMMEDIATE
88 * - PIPE_CONTROL_WRITE_TIMESTAMP
89 * - PIPE_CONTROL_WRITE_DEPTH_COUNT
90 */
91 void
iris_emit_pipe_control_write(struct iris_batch * batch,const char * reason,uint32_t flags,struct iris_bo * bo,uint32_t offset,uint64_t imm)92 iris_emit_pipe_control_write(struct iris_batch *batch,
93 const char *reason, uint32_t flags,
94 struct iris_bo *bo, uint32_t offset,
95 uint64_t imm)
96 {
97 batch->screen->vtbl.emit_raw_pipe_control(batch, reason, flags, bo, offset, imm);
98 }
99
100 /*
101 * From Sandybridge PRM, volume 2, "1.7.2 End-of-Pipe Synchronization":
102 *
103 * Write synchronization is a special case of end-of-pipe
104 * synchronization that requires that the render cache and/or depth
105 * related caches are flushed to memory, where the data will become
106 * globally visible. This type of synchronization is required prior to
107 * SW (CPU) actually reading the result data from memory, or initiating
108 * an operation that will use as a read surface (such as a texture
109 * surface) a previous render target and/or depth/stencil buffer
110 *
111 * From Haswell PRM, volume 2, part 1, "End-of-Pipe Synchronization":
112 *
113 * Exercising the write cache flush bits (Render Target Cache Flush
114 * Enable, Depth Cache Flush Enable, DC Flush) in PIPE_CONTROL only
115 * ensures the write caches are flushed and doesn't guarantee the data
116 * is globally visible.
117 *
118 * SW can track the completion of the end-of-pipe-synchronization by
119 * using "Notify Enable" and "PostSync Operation - Write Immediate
120 * Data" in the PIPE_CONTROL command.
121 */
122 void
iris_emit_end_of_pipe_sync(struct iris_batch * batch,const char * reason,uint32_t flags)123 iris_emit_end_of_pipe_sync(struct iris_batch *batch,
124 const char *reason, uint32_t flags)
125 {
126 /* From Sandybridge PRM, volume 2, "1.7.3.1 Writing a Value to Memory":
127 *
128 * "The most common action to perform upon reaching a synchronization
129 * point is to write a value out to memory. An immediate value
130 * (included with the synchronization command) may be written."
131 *
132 * From Broadwell PRM, volume 7, "End-of-Pipe Synchronization":
133 *
134 * "In case the data flushed out by the render engine is to be read
135 * back in to the render engine in coherent manner, then the render
136 * engine has to wait for the fence completion before accessing the
137 * flushed data. This can be achieved by following means on various
138 * products: PIPE_CONTROL command with CS Stall and the required
139 * write caches flushed with Post-Sync-Operation as Write Immediate
140 * Data.
141 *
142 * Example:
143 * - Workload-1 (3D/GPGPU/MEDIA)
144 * - PIPE_CONTROL (CS Stall, Post-Sync-Operation Write Immediate
145 * Data, Required Write Cache Flush bits set)
146 * - Workload-2 (Can use the data produce or output by Workload-1)
147 */
148 iris_emit_pipe_control_write(batch, reason,
149 flags | PIPE_CONTROL_CS_STALL |
150 PIPE_CONTROL_WRITE_IMMEDIATE,
151 batch->screen->workaround_address.bo,
152 batch->screen->workaround_address.offset, 0);
153 }
154
155 /**
156 * Emits appropriate flushes and invalidations for any previous memory
157 * operations on \p bo to be strictly ordered relative to any subsequent
158 * memory operations performed from the caching domain \p access.
159 *
160 * This is useful because the GPU has separate incoherent caches for the
161 * render target, sampler, etc., which need to be explicitly invalidated or
162 * flushed in order to obtain the expected memory ordering in cases where the
163 * same surface is accessed through multiple caches (e.g. due to
164 * render-to-texture).
165 *
166 * This provides the expected memory ordering guarantees whether or not the
167 * previous access was performed from the same batch or a different one, but
168 * only the former case needs to be handled explicitly here, since the kernel
169 * already inserts implicit flushes and synchronization in order to guarantee
170 * that any data dependencies between batches are satisfied.
171 *
172 * Even though no flushing nor invalidation is required in order to account
173 * for concurrent updates from other batches, we provide the guarantee that a
174 * required synchronization operation due to a previous batch-local update
175 * will never be omitted due to the influence of another thread accessing the
176 * same buffer concurrently from the same caching domain: Such a concurrent
177 * update will only ever change the seqno of the last update to a value
178 * greater than the local value (see iris_bo_bump_seqno()), which means that
179 * we will always emit at least as much flushing and invalidation as we would
180 * have for the local seqno (see the coherent_seqnos comparisons below).
181 */
182 void
iris_emit_buffer_barrier_for(struct iris_batch * batch,struct iris_bo * bo,enum iris_domain access)183 iris_emit_buffer_barrier_for(struct iris_batch *batch,
184 struct iris_bo *bo,
185 enum iris_domain access)
186 {
187 const struct intel_device_info *devinfo = batch->screen->devinfo;
188
189 const bool access_via_l3 = iris_domain_is_l3_coherent(devinfo, access);
190
191 const uint32_t all_flush_bits = (PIPE_CONTROL_CACHE_FLUSH_BITS |
192 PIPE_CONTROL_STALL_AT_SCOREBOARD |
193 PIPE_CONTROL_FLUSH_ENABLE);
194 const uint32_t flush_bits[NUM_IRIS_DOMAINS] = {
195 [IRIS_DOMAIN_RENDER_WRITE] = PIPE_CONTROL_RENDER_TARGET_FLUSH,
196 [IRIS_DOMAIN_DEPTH_WRITE] = PIPE_CONTROL_DEPTH_CACHE_FLUSH,
197 [IRIS_DOMAIN_DATA_WRITE] = PIPE_CONTROL_FLUSH_HDC,
198 /* OTHER_WRITE includes "VF Cache Invalidate" to make sure that any
199 * stream output writes are finished. CS stall is added implicitly.
200 */
201 [IRIS_DOMAIN_OTHER_WRITE] = PIPE_CONTROL_FLUSH_ENABLE | PIPE_CONTROL_VF_CACHE_INVALIDATE,
202 [IRIS_DOMAIN_VF_READ] = PIPE_CONTROL_STALL_AT_SCOREBOARD,
203 [IRIS_DOMAIN_SAMPLER_READ] = PIPE_CONTROL_STALL_AT_SCOREBOARD,
204 [IRIS_DOMAIN_PULL_CONSTANT_READ] = PIPE_CONTROL_STALL_AT_SCOREBOARD,
205 [IRIS_DOMAIN_OTHER_READ] = PIPE_CONTROL_STALL_AT_SCOREBOARD,
206 };
207 const uint32_t invalidate_bits[NUM_IRIS_DOMAINS] = {
208 [IRIS_DOMAIN_RENDER_WRITE] = PIPE_CONTROL_RENDER_TARGET_FLUSH,
209 [IRIS_DOMAIN_DEPTH_WRITE] = PIPE_CONTROL_DEPTH_CACHE_FLUSH,
210 [IRIS_DOMAIN_DATA_WRITE] = PIPE_CONTROL_FLUSH_HDC,
211 [IRIS_DOMAIN_OTHER_WRITE] = PIPE_CONTROL_FLUSH_ENABLE,
212 [IRIS_DOMAIN_VF_READ] = PIPE_CONTROL_VF_CACHE_INVALIDATE,
213 [IRIS_DOMAIN_SAMPLER_READ] = PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE,
214 [IRIS_DOMAIN_PULL_CONSTANT_READ] = PIPE_CONTROL_CONST_CACHE_INVALIDATE |
215 (iris_indirect_ubos_use_sampler(batch->screen) ?
216 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE :
217 PIPE_CONTROL_DATA_CACHE_FLUSH),
218 };
219 const uint32_t l3_flush_bits[NUM_IRIS_DOMAINS] = {
220 [IRIS_DOMAIN_RENDER_WRITE] = PIPE_CONTROL_TILE_CACHE_FLUSH,
221 [IRIS_DOMAIN_DEPTH_WRITE] = PIPE_CONTROL_TILE_CACHE_FLUSH,
222 [IRIS_DOMAIN_DATA_WRITE] = PIPE_CONTROL_DATA_CACHE_FLUSH,
223 };
224 uint32_t bits = 0;
225
226 /* Iterate over all read/write domains first in order to handle RaW
227 * and WaW dependencies, which might involve flushing the domain of
228 * the previous access and invalidating the specified domain.
229 */
230 for (unsigned i = 0; i < IRIS_DOMAIN_OTHER_WRITE; i++) {
231 assert(!iris_domain_is_read_only(i));
232 assert(iris_domain_is_l3_coherent(devinfo, i));
233
234 if (i != access) {
235 const uint64_t seqno = READ_ONCE(bo->last_seqnos[i]);
236
237 /* Invalidate unless the most recent read/write access from
238 * this domain is already guaranteed to be visible to the
239 * specified domain. Flush if the most recent access from
240 * this domain occurred after its most recent flush.
241 */
242 if (seqno > batch->coherent_seqnos[access][i]) {
243 bits |= invalidate_bits[access];
244
245 if (access_via_l3) {
246 /* Both domains share L3. If the most recent read/write access
247 * in domain `i' isn't visible to L3, then flush it to L3.
248 */
249 if (seqno > batch->l3_coherent_seqnos[i])
250 bits |= flush_bits[i];
251 } else {
252 /* Domain `i` is L3 coherent but the specified domain is not.
253 * Flush both this cache and L3 out to memory.
254 */
255 if (seqno > batch->coherent_seqnos[i][i])
256 bits |= flush_bits[i] | l3_flush_bits[i];
257 }
258 }
259 }
260 }
261
262 /* All read-only domains can be considered mutually coherent since
263 * the order of read-only memory operations is immaterial. If the
264 * specified domain is read/write we need to iterate over them too,
265 * in order to handle any WaR dependencies.
266 */
267 if (!iris_domain_is_read_only(access)) {
268 for (unsigned i = IRIS_DOMAIN_VF_READ; i < NUM_IRIS_DOMAINS; i++) {
269 assert(iris_domain_is_read_only(i));
270 const uint64_t seqno = READ_ONCE(bo->last_seqnos[i]);
271
272 const uint64_t last_visible_seqno =
273 iris_domain_is_l3_coherent(devinfo, i) ?
274 batch->l3_coherent_seqnos[i] : batch->coherent_seqnos[i][i];
275
276 /* Flush if the most recent access from this domain occurred
277 * after its most recent flush.
278 */
279 if (seqno > last_visible_seqno)
280 bits |= flush_bits[i];
281 }
282 }
283
284 /* The IRIS_DOMAIN_OTHER_WRITE kitchen-sink domain cannot be
285 * considered coherent with itself since it's really a collection
286 * of multiple incoherent read/write domains, so we special-case it
287 * here.
288 */
289 const unsigned i = IRIS_DOMAIN_OTHER_WRITE;
290 const uint64_t seqno = READ_ONCE(bo->last_seqnos[i]);
291
292 assert(!iris_domain_is_l3_coherent(devinfo, i));
293
294 /* Invalidate unless the most recent read/write access from this
295 * domain is already guaranteed to be visible to the specified
296 * domain. Flush if the most recent access from this domain
297 * occurred after its most recent flush.
298 */
299 if (seqno > batch->coherent_seqnos[access][i]) {
300 bits |= invalidate_bits[access];
301
302 /* There is a non-L3-coherent write that isn't visible to the
303 * specified domain. If the access is via L3, then it might see
304 * stale L3 data that was loaded before that write. In this case,
305 * we try to invalidate all read-only sections of the L3 cache.
306 */
307 if (access_via_l3 && seqno > batch->l3_coherent_seqnos[i])
308 bits |= PIPE_CONTROL_L3_RO_INVALIDATE_BITS;
309
310 if (seqno > batch->coherent_seqnos[i][i])
311 bits |= flush_bits[i];
312 }
313
314 if (bits) {
315 /* Stall-at-scoreboard is not supported by the compute pipeline, use the
316 * documented sequence of two PIPE_CONTROLs with PIPE_CONTROL_FLUSH_ENABLE
317 * set in the second PIPE_CONTROL in order to obtain a similar effect.
318 */
319 const bool compute_stall_sequence = batch->name == IRIS_BATCH_COMPUTE &&
320 (bits & PIPE_CONTROL_STALL_AT_SCOREBOARD) &&
321 !(bits & PIPE_CONTROL_CACHE_FLUSH_BITS);
322
323 /* Stall-at-scoreboard is not expected to work in combination with other
324 * flush bits.
325 */
326 if (bits & PIPE_CONTROL_CACHE_FLUSH_BITS)
327 bits &= ~PIPE_CONTROL_STALL_AT_SCOREBOARD;
328
329 if (batch->name == IRIS_BATCH_COMPUTE)
330 bits &= ~PIPE_CONTROL_GRAPHICS_BITS;
331
332 /* Emit any required flushes and invalidations. */
333 if ((bits & all_flush_bits) || compute_stall_sequence)
334 iris_emit_end_of_pipe_sync(batch, "cache tracker: flush",
335 bits & all_flush_bits);
336
337 if ((bits & ~all_flush_bits) || compute_stall_sequence)
338 iris_emit_pipe_control_flush(batch, "cache tracker: invalidate",
339 (bits & ~all_flush_bits) |
340 (compute_stall_sequence ?
341 PIPE_CONTROL_FLUSH_ENABLE : 0));
342 }
343 }
344
345 /**
346 * Flush and invalidate all caches (for debugging purposes).
347 */
348 void
iris_flush_all_caches(struct iris_batch * batch)349 iris_flush_all_caches(struct iris_batch *batch)
350 {
351 iris_emit_pipe_control_flush(batch, "debug: flush all caches",
352 PIPE_CONTROL_CS_STALL |
353 PIPE_CONTROL_DATA_CACHE_FLUSH |
354 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
355 PIPE_CONTROL_RENDER_TARGET_FLUSH |
356 PIPE_CONTROL_TILE_CACHE_FLUSH |
357 PIPE_CONTROL_VF_CACHE_INVALIDATE |
358 PIPE_CONTROL_INSTRUCTION_INVALIDATE |
359 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
360 PIPE_CONTROL_CONST_CACHE_INVALIDATE |
361 PIPE_CONTROL_STATE_CACHE_INVALIDATE);
362 }
363
364 static void
iris_texture_barrier(struct pipe_context * ctx,unsigned flags)365 iris_texture_barrier(struct pipe_context *ctx, unsigned flags)
366 {
367 struct iris_context *ice = (void *) ctx;
368 struct iris_batch *render_batch = &ice->batches[IRIS_BATCH_RENDER];
369 struct iris_batch *compute_batch = &ice->batches[IRIS_BATCH_COMPUTE];
370
371 if (render_batch->contains_draw) {
372 iris_batch_maybe_flush(render_batch, 48);
373 iris_emit_pipe_control_flush(render_batch,
374 "API: texture barrier (1/2)",
375 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
376 PIPE_CONTROL_RENDER_TARGET_FLUSH |
377 PIPE_CONTROL_CS_STALL);
378 iris_emit_pipe_control_flush(render_batch,
379 "API: texture barrier (2/2)",
380 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
381 }
382
383 if (compute_batch->contains_draw) {
384 iris_batch_maybe_flush(compute_batch, 48);
385 iris_emit_pipe_control_flush(compute_batch,
386 "API: texture barrier (1/2)",
387 PIPE_CONTROL_CS_STALL);
388 iris_emit_pipe_control_flush(compute_batch,
389 "API: texture barrier (2/2)",
390 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
391 }
392 }
393
394 static void
iris_memory_barrier(struct pipe_context * ctx,unsigned flags)395 iris_memory_barrier(struct pipe_context *ctx, unsigned flags)
396 {
397 struct iris_context *ice = (void *) ctx;
398 unsigned bits = PIPE_CONTROL_DATA_CACHE_FLUSH | PIPE_CONTROL_CS_STALL;
399
400 if (flags & (PIPE_BARRIER_VERTEX_BUFFER |
401 PIPE_BARRIER_INDEX_BUFFER |
402 PIPE_BARRIER_INDIRECT_BUFFER)) {
403 bits |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
404 }
405
406 if (flags & PIPE_BARRIER_CONSTANT_BUFFER) {
407 bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
408 PIPE_CONTROL_CONST_CACHE_INVALIDATE;
409 }
410
411 if (flags & PIPE_BARRIER_TEXTURE)
412 bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
413
414 if (flags & PIPE_BARRIER_FRAMEBUFFER) {
415 /* The caller may have issued a render target read and a data cache data
416 * port write in the same draw call. Depending on the hardware, iris
417 * performs render target reads with either the sampler or the render
418 * cache data port. If the next framebuffer access is a render target
419 * read, the previously affected caches must be invalidated.
420 */
421 bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
422 PIPE_CONTROL_RENDER_TARGET_FLUSH;
423 }
424
425 iris_foreach_batch(ice, batch) {
426 const unsigned allowed_bits =
427 batch->name == IRIS_BATCH_COMPUTE ? ~PIPE_CONTROL_GRAPHICS_BITS : ~0u;
428
429 if (batch->contains_draw) {
430 iris_batch_maybe_flush(batch, 24);
431 iris_emit_pipe_control_flush(batch,
432 "API: memory barrier",
433 bits & allowed_bits);
434 }
435 }
436 }
437
438 void
iris_init_flush_functions(struct pipe_context * ctx)439 iris_init_flush_functions(struct pipe_context *ctx)
440 {
441 ctx->memory_barrier = iris_memory_barrier;
442 ctx->texture_barrier = iris_texture_barrier;
443 }
444