xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/radeonsi/si_compute_blit.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * SPDX-License-Identifier: MIT
5  */
6 
7 #include "si_pipe.h"
8 #include "util/format/u_format.h"
9 #include "util/format_srgb.h"
10 #include "util/helpers.h"
11 #include "util/hash_table.h"
12 #include "util/u_pack_color.h"
13 #include "ac_nir_meta.h"
14 
si_compute_begin_internal(struct si_context * sctx,bool render_condition_enabled)15 static void si_compute_begin_internal(struct si_context *sctx, bool render_condition_enabled)
16 {
17    sctx->barrier_flags &= ~SI_BARRIER_EVENT_PIPELINESTAT_START;
18    if (sctx->num_hw_pipestat_streamout_queries) {
19       sctx->barrier_flags |= SI_BARRIER_EVENT_PIPELINESTAT_STOP;
20       si_mark_atom_dirty(sctx, &sctx->atoms.s.barrier);
21    }
22 
23    if (!render_condition_enabled)
24       sctx->render_cond_enabled = false;
25 
26    /* Force-disable fbfetch because there are unsolvable recursion problems. */
27    si_force_disable_ps_colorbuf0_slot(sctx);
28 
29    /* Skip decompression to prevent infinite recursion. */
30    sctx->blitter_running = true;
31 }
32 
si_compute_end_internal(struct si_context * sctx)33 static void si_compute_end_internal(struct si_context *sctx)
34 {
35    sctx->barrier_flags &= ~SI_BARRIER_EVENT_PIPELINESTAT_STOP;
36    if (sctx->num_hw_pipestat_streamout_queries) {
37       sctx->barrier_flags |= SI_BARRIER_EVENT_PIPELINESTAT_START;
38       si_mark_atom_dirty(sctx, &sctx->atoms.s.barrier);
39    }
40 
41    sctx->render_cond_enabled = sctx->render_cond;
42    sctx->blitter_running = false;
43 
44    /* We force-disabled fbfetch, so recompute the state. */
45    si_update_ps_colorbuf0_slot(sctx);
46 }
47 
si_launch_grid_internal(struct si_context * sctx,const struct pipe_grid_info * info,void * shader)48 static void si_launch_grid_internal(struct si_context *sctx, const struct pipe_grid_info *info,
49                                     void *shader)
50 {
51    void *saved_cs = sctx->cs_shader_state.program;
52    sctx->b.bind_compute_state(&sctx->b, shader);
53    sctx->b.launch_grid(&sctx->b, info);
54    sctx->b.bind_compute_state(&sctx->b, saved_cs);
55 }
56 
si_launch_grid_internal_ssbos(struct si_context * sctx,struct pipe_grid_info * info,void * shader,unsigned num_buffers,const struct pipe_shader_buffer * buffers,unsigned writeable_bitmask,bool render_condition_enable)57 void si_launch_grid_internal_ssbos(struct si_context *sctx, struct pipe_grid_info *info,
58                                    void *shader, unsigned num_buffers,
59                                    const struct pipe_shader_buffer *buffers,
60                                    unsigned writeable_bitmask, bool render_condition_enable)
61 {
62    /* Save states. */
63    struct pipe_shader_buffer saved_sb[3] = {};
64    assert(num_buffers <= ARRAY_SIZE(saved_sb));
65    si_get_shader_buffers(sctx, PIPE_SHADER_COMPUTE, 0, num_buffers, saved_sb);
66 
67    unsigned saved_writable_mask = 0;
68    for (unsigned i = 0; i < num_buffers; i++) {
69       if (sctx->const_and_shader_buffers[PIPE_SHADER_COMPUTE].writable_mask &
70           (1u << si_get_shaderbuf_slot(i)))
71          saved_writable_mask |= 1 << i;
72    }
73 
74    /* Bind buffers and launch compute. */
75    si_set_shader_buffers(&sctx->b, PIPE_SHADER_COMPUTE, 0, num_buffers, buffers,
76                          writeable_bitmask,
77                          true /* don't update bind_history to prevent unnecessary syncs later */);
78 
79    si_compute_begin_internal(sctx, render_condition_enable);
80    si_launch_grid_internal(sctx, info, shader);
81    si_compute_end_internal(sctx);
82 
83    /* Restore states. */
84    sctx->b.set_shader_buffers(&sctx->b, PIPE_SHADER_COMPUTE, 0, num_buffers, saved_sb,
85                               saved_writable_mask);
86    for (int i = 0; i < num_buffers; i++)
87       pipe_resource_reference(&saved_sb[i].buffer, NULL);
88 }
89 
90 static unsigned
set_work_size(struct pipe_grid_info * info,unsigned block_x,unsigned block_y,unsigned block_z,unsigned work_x,unsigned work_y,unsigned work_z)91 set_work_size(struct pipe_grid_info *info, unsigned block_x, unsigned block_y, unsigned block_z,
92               unsigned work_x, unsigned work_y, unsigned work_z)
93 {
94    info->block[0] = block_x;
95    info->block[1] = block_y;
96    info->block[2] = block_z;
97 
98    unsigned work[3] = {work_x, work_y, work_z};
99    for (int i = 0; i < 3; ++i) {
100       info->last_block[i] = work[i] % info->block[i];
101       info->grid[i] = DIV_ROUND_UP(work[i], info->block[i]);
102    }
103 
104    return work_z > 1 ? 3 : (work_y > 1 ? 2 : 1);
105 }
106 
107 /**
108  * Clear a buffer using read-modify-write with a 32-bit write bitmask.
109  * The clear value has 32 bits.
110  */
si_compute_clear_buffer_rmw(struct si_context * sctx,struct pipe_resource * dst,unsigned dst_offset,unsigned size,uint32_t clear_value,uint32_t writebitmask,bool render_condition_enable)111 void si_compute_clear_buffer_rmw(struct si_context *sctx, struct pipe_resource *dst,
112                                  unsigned dst_offset, unsigned size, uint32_t clear_value,
113                                  uint32_t writebitmask, bool render_condition_enable)
114 {
115    assert(dst_offset % 4 == 0);
116    assert(size % 4 == 0);
117 
118    assert(dst->target != PIPE_BUFFER || dst_offset + size <= dst->width0);
119 
120    /* Use buffer_load_dwordx4 and buffer_store_dwordx4 per thread. */
121    unsigned dwords_per_thread = 4;
122    unsigned num_threads = DIV_ROUND_UP(size, dwords_per_thread * 4);
123 
124    struct pipe_grid_info info = {};
125    set_work_size(&info, 64, 1, 1, num_threads, 1, 1);
126 
127    struct pipe_shader_buffer sb = {};
128    sb.buffer = dst;
129    sb.buffer_offset = dst_offset;
130    sb.buffer_size = size;
131 
132    sctx->cs_user_data[0] = clear_value & writebitmask;
133    sctx->cs_user_data[1] = ~writebitmask;
134 
135    if (!sctx->cs_clear_buffer_rmw)
136       sctx->cs_clear_buffer_rmw = si_create_clear_buffer_rmw_cs(sctx);
137 
138    si_launch_grid_internal_ssbos(sctx, &info, sctx->cs_clear_buffer_rmw, 1, &sb, 0x1,
139                                  render_condition_enable);
140 }
141 
142 /**
143  * This implements a clear/copy_buffer compute shader allowing an arbitrary src_offset, dst_offset,
144  * and size alignment, so that it can be used as a complete replacement for the typically slower
145  * CP DMA.
146  *
147  * It stores 16B blocks per thread aligned to a 16B offset just like a 16B-aligned clear/copy,
148  * and it byte-shifts src data by the amount of both src and dst misalignment to get the behavior
149  * of a totally unaligned clear/copy.
150  *
151  * The first and last thread can store less than 16B (up to 1B store granularity) depending on how
152  * much dst is unaligned.
153  */
si_compute_clear_copy_buffer(struct si_context * sctx,struct pipe_resource * dst,unsigned dst_offset,struct pipe_resource * src,unsigned src_offset,unsigned size,const uint32_t * clear_value,unsigned clear_value_size,unsigned dwords_per_thread,bool render_condition_enable,bool fail_if_slow)154 bool si_compute_clear_copy_buffer(struct si_context *sctx, struct pipe_resource *dst,
155                                   unsigned dst_offset, struct pipe_resource *src,
156                                   unsigned src_offset, unsigned size,
157                                   const uint32_t *clear_value, unsigned clear_value_size,
158                                   unsigned dwords_per_thread, bool render_condition_enable,
159                                   bool fail_if_slow)
160 {
161    assert(dst->target != PIPE_BUFFER || dst_offset + size <= dst->width0);
162    assert(!src || src_offset + size <= src->width0);
163    bool is_copy = src != NULL;
164 
165    struct ac_cs_clear_copy_buffer_options options = {
166       .nir_options = sctx->screen->nir_options,
167       .info = &sctx->screen->info,
168       .print_key = si_can_dump_shader(sctx->screen, MESA_SHADER_COMPUTE, SI_DUMP_SHADER_KEY),
169       .fail_if_slow = fail_if_slow,
170    };
171 
172    struct ac_cs_clear_copy_buffer_info info = {
173       .dst_offset = dst_offset,
174       .src_offset = src_offset,
175       .size = size,
176       .clear_value_size = is_copy ? 0 : clear_value_size,
177       .dwords_per_thread = dwords_per_thread,
178       .render_condition_enabled = render_condition_enable,
179       .dst_is_vram = si_resource(dst)->domains & RADEON_DOMAIN_VRAM,
180       .src_is_vram = src && si_resource(src)->domains & RADEON_DOMAIN_VRAM,
181       .src_is_sparse = src && src->flags & PIPE_RESOURCE_FLAG_SPARSE,
182    };
183    memcpy(info.clear_value, clear_value, clear_value_size);
184 
185    struct ac_cs_clear_copy_buffer_dispatch dispatch;
186 
187    if (!ac_prepare_cs_clear_copy_buffer(&options, &info, &dispatch))
188       return false;
189 
190    struct pipe_shader_buffer sb[2] = {};
191    for (unsigned i = 0; i < 2; i++) {
192       sb[i].buffer_offset = dispatch.ssbo[i].offset;
193       sb[i].buffer_size = dispatch.ssbo[i].size;
194    }
195 
196    if (is_copy)
197       sb[0].buffer = src;
198    sb[is_copy].buffer = dst;
199 
200    void *shader = _mesa_hash_table_u64_search(sctx->cs_dma_shaders, dispatch.shader_key.key);
201    if (!shader) {
202       shader = si_create_shader_state(sctx, ac_create_clear_copy_buffer_cs(&options,
203                                                                            &dispatch.shader_key));
204       _mesa_hash_table_u64_insert(sctx->cs_dma_shaders, dispatch.shader_key.key, shader);
205    }
206 
207    memcpy(sctx->cs_user_data, dispatch.user_data, sizeof(dispatch.user_data));
208 
209    struct pipe_grid_info grid = {};
210    set_work_size(&grid, dispatch.workgroup_size, 1, 1, dispatch.num_threads, 1, 1);
211 
212    si_launch_grid_internal_ssbos(sctx, &grid, shader, dispatch.num_ssbos, sb,
213                                  is_copy ? 0x2 : 0x1, render_condition_enable);
214    return true;
215 }
216 
si_clear_buffer(struct si_context * sctx,struct pipe_resource * dst,uint64_t offset,uint64_t size,uint32_t * clear_value,uint32_t clear_value_size,enum si_clear_method method,bool render_condition_enable)217 void si_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
218                      uint64_t offset, uint64_t size, uint32_t *clear_value,
219                      uint32_t clear_value_size, enum si_clear_method method,
220                      bool render_condition_enable)
221 {
222    if (!size)
223       return;
224 
225    ASSERTED unsigned clear_alignment = MIN2(clear_value_size, 4);
226 
227    assert(clear_value_size != 3 && clear_value_size != 6); /* 12 is allowed. */
228    assert(offset % clear_alignment == 0);
229    assert(size % clear_alignment == 0);
230    assert(offset < (UINT32_MAX & ~0x3)); /* the limit of pipe_shader_buffer::buffer_size */
231    assert(align(size, 16) < UINT32_MAX); /* we round up the size to 16 for compute */
232 
233    uint32_t clamped;
234    if (util_lower_clearsize_to_dword(clear_value, (int*)&clear_value_size, &clamped))
235       clear_value = &clamped;
236 
237    if (si_compute_clear_copy_buffer(sctx, dst, offset, NULL, 0, size, clear_value,
238                                     clear_value_size, 0, render_condition_enable,
239                                     method == SI_AUTO_SELECT_CLEAR_METHOD))
240       return;
241 
242    /* Compute handles all unaligned sizes, so this is always aligned. */
243    assert(offset % 4 == 0 && size % 4 == 0 && clear_value_size == 4);
244    assert(!render_condition_enable);
245 
246    si_cp_dma_clear_buffer(sctx, &sctx->gfx_cs, dst, offset, size, *clear_value);
247 }
248 
si_pipe_clear_buffer(struct pipe_context * ctx,struct pipe_resource * dst,unsigned offset,unsigned size,const void * clear_value,int clear_value_size)249 static void si_pipe_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst,
250                                  unsigned offset, unsigned size, const void *clear_value,
251                                  int clear_value_size)
252 {
253    struct si_context *sctx = (struct si_context *)ctx;
254 
255    si_barrier_before_simple_buffer_op(sctx, 0, dst, NULL);
256    si_clear_buffer(sctx, dst, offset, size, (uint32_t *)clear_value, clear_value_size,
257                    SI_AUTO_SELECT_CLEAR_METHOD, false);
258    si_barrier_after_simple_buffer_op(sctx, 0, dst, NULL);
259 }
260 
si_copy_buffer(struct si_context * sctx,struct pipe_resource * dst,struct pipe_resource * src,uint64_t dst_offset,uint64_t src_offset,unsigned size)261 void si_copy_buffer(struct si_context *sctx, struct pipe_resource *dst, struct pipe_resource *src,
262                     uint64_t dst_offset, uint64_t src_offset, unsigned size)
263 {
264    if (!size)
265       return;
266 
267    if (si_compute_clear_copy_buffer(sctx, dst, dst_offset, src, src_offset, size, NULL, 0, 0,
268                                     false, true))
269       return;
270 
271    si_cp_dma_copy_buffer(sctx, dst, src, dst_offset, src_offset, size);
272 }
273 
si_compute_shorten_ubyte_buffer(struct si_context * sctx,struct pipe_resource * dst,struct pipe_resource * src,uint64_t dst_offset,uint64_t src_offset,unsigned count,bool render_condition_enable)274 void si_compute_shorten_ubyte_buffer(struct si_context *sctx, struct pipe_resource *dst, struct pipe_resource *src,
275                                      uint64_t dst_offset, uint64_t src_offset, unsigned count,
276                                      bool render_condition_enable)
277 {
278    if (!count)
279       return;
280 
281    if (!sctx->cs_ubyte_to_ushort)
282       sctx->cs_ubyte_to_ushort = si_create_ubyte_to_ushort_compute_shader(sctx);
283 
284    struct pipe_grid_info info = {};
285    set_work_size(&info, 64, 1, 1, count, 1, 1);
286 
287    struct pipe_shader_buffer sb[2] = {};
288    sb[0].buffer = dst;
289    sb[0].buffer_offset = dst_offset;
290    sb[0].buffer_size = count * 2;
291 
292    sb[1].buffer = src;
293    sb[1].buffer_offset = src_offset;
294    sb[1].buffer_size = count;
295 
296    si_launch_grid_internal_ssbos(sctx, &info, sctx->cs_ubyte_to_ushort, 2, sb, 0x1,
297                                  render_condition_enable);
298 }
299 
si_compute_save_and_bind_images(struct si_context * sctx,unsigned num_images,struct pipe_image_view * images,struct pipe_image_view * saved_images)300 static void si_compute_save_and_bind_images(struct si_context *sctx, unsigned num_images,
301                                             struct pipe_image_view *images,
302                                             struct pipe_image_view *saved_images)
303 {
304    for (unsigned i = 0; i < num_images; i++) {
305       assert(sctx->b.screen->is_format_supported(sctx->b.screen, images[i].format,
306                                                  images[i].resource->target,
307                                                  images[i].resource->nr_samples,
308                                                  images[i].resource->nr_storage_samples,
309                                                  PIPE_BIND_SHADER_IMAGE));
310 
311       /* Always allow DCC stores on gfx10+. */
312       if (sctx->gfx_level >= GFX10 &&
313           images[i].access & PIPE_IMAGE_ACCESS_WRITE &&
314           !(images[i].access & SI_IMAGE_ACCESS_DCC_OFF))
315          images[i].access |= SI_IMAGE_ACCESS_ALLOW_DCC_STORE;
316 
317       /* Simplify the format according to what image stores support. */
318       if (images[i].access & PIPE_IMAGE_ACCESS_WRITE) {
319          images[i].format = util_format_linear(images[i].format); /* SRGB not supported */
320          /* Keep L8A8 formats as-is because GFX7 is unable to store into R8A8 for some reason. */
321          images[i].format = util_format_intensity_to_red(images[i].format);
322          images[i].format = util_format_rgbx_to_rgba(images[i].format); /* prevent partial writes */
323       }
324 
325       /* Save the image. */
326       util_copy_image_view(&saved_images[i], &sctx->images[PIPE_SHADER_COMPUTE].views[i]);
327    }
328 
329    /* This must be before the barrier and si_compute_begin_internal because it might invoke DCC
330     * decompression.
331     */
332    sctx->b.set_shader_images(&sctx->b, PIPE_SHADER_COMPUTE, 0, num_images, 0, images);
333 }
334 
si_compute_restore_images(struct si_context * sctx,unsigned num_images,struct pipe_image_view * saved_images)335 static void si_compute_restore_images(struct si_context *sctx, unsigned num_images,
336                                       struct pipe_image_view *saved_images)
337 {
338    sctx->b.set_shader_images(&sctx->b, PIPE_SHADER_COMPUTE, 0, num_images, 0, saved_images);
339    for (unsigned i = 0; i < num_images; i++)
340       pipe_resource_reference(&saved_images[i].resource, NULL);
341 }
342 
si_retile_dcc(struct si_context * sctx,struct si_texture * tex)343 void si_retile_dcc(struct si_context *sctx, struct si_texture *tex)
344 {
345    assert(sctx->gfx_level < GFX12);
346 
347    /* Flush and wait for CB before retiling DCC. */
348    sctx->barrier_flags |= SI_BARRIER_SYNC_AND_INV_CB;
349    si_mark_atom_dirty(sctx, &sctx->atoms.s.barrier);
350 
351    /* Set the DCC buffer. */
352    assert(tex->surface.meta_offset && tex->surface.meta_offset <= UINT_MAX);
353    assert(tex->surface.display_dcc_offset && tex->surface.display_dcc_offset <= UINT_MAX);
354    assert(tex->surface.display_dcc_offset < tex->surface.meta_offset);
355    assert(tex->buffer.bo_size <= UINT_MAX);
356 
357    struct pipe_shader_buffer sb = {};
358    sb.buffer = &tex->buffer.b.b;
359    sb.buffer_offset = tex->surface.display_dcc_offset;
360    sb.buffer_size = tex->buffer.bo_size - sb.buffer_offset;
361 
362    sctx->cs_user_data[0] = tex->surface.meta_offset - tex->surface.display_dcc_offset;
363    sctx->cs_user_data[1] = (tex->surface.u.gfx9.color.dcc_pitch_max + 1) |
364                            (tex->surface.u.gfx9.color.dcc_height << 16);
365    sctx->cs_user_data[2] = (tex->surface.u.gfx9.color.display_dcc_pitch_max + 1) |
366                            (tex->surface.u.gfx9.color.display_dcc_height << 16);
367 
368    /* We have only 1 variant per bpp for now, so expect 32 bpp. */
369    assert(tex->surface.bpe == 4);
370 
371    void **shader = &sctx->cs_dcc_retile[tex->surface.u.gfx9.swizzle_mode];
372    if (!*shader)
373       *shader = si_create_dcc_retile_cs(sctx, &tex->surface);
374 
375    /* Dispatch compute. */
376    unsigned width = DIV_ROUND_UP(tex->buffer.b.b.width0, tex->surface.u.gfx9.color.dcc_block_width);
377    unsigned height = DIV_ROUND_UP(tex->buffer.b.b.height0, tex->surface.u.gfx9.color.dcc_block_height);
378 
379    struct pipe_grid_info info = {};
380    set_work_size(&info, 8, 8, 1, width, height, 1);
381 
382    si_barrier_before_simple_buffer_op(sctx, 0, sb.buffer, NULL);
383    si_launch_grid_internal_ssbos(sctx, &info, *shader, 1, &sb, 0x1, false);
384    si_barrier_after_simple_buffer_op(sctx, 0, sb.buffer, NULL);
385 
386    /* Don't flush caches. L2 will be flushed by the kernel fence. */
387 }
388 
gfx9_clear_dcc_msaa(struct si_context * sctx,struct pipe_resource * res,uint32_t clear_value,bool render_condition_enable)389 void gfx9_clear_dcc_msaa(struct si_context *sctx, struct pipe_resource *res, uint32_t clear_value,
390                          bool render_condition_enable)
391 {
392    struct si_texture *tex = (struct si_texture*)res;
393 
394    assert(sctx->gfx_level < GFX11);
395 
396    /* Set the DCC buffer. */
397    assert(tex->surface.meta_offset && tex->surface.meta_offset <= UINT_MAX);
398    assert(tex->buffer.bo_size <= UINT_MAX);
399 
400    struct pipe_shader_buffer sb = {};
401    sb.buffer = &tex->buffer.b.b;
402    sb.buffer_offset = tex->surface.meta_offset;
403    sb.buffer_size = tex->buffer.bo_size - sb.buffer_offset;
404 
405    sctx->cs_user_data[0] = (tex->surface.u.gfx9.color.dcc_pitch_max + 1) |
406                            (tex->surface.u.gfx9.color.dcc_height << 16);
407    sctx->cs_user_data[1] = (clear_value & 0xffff) |
408                            ((uint32_t)tex->surface.tile_swizzle << 16);
409 
410    /* These variables identify the shader variant. */
411    unsigned swizzle_mode = tex->surface.u.gfx9.swizzle_mode;
412    unsigned bpe_log2 = util_logbase2(tex->surface.bpe);
413    unsigned log2_samples = util_logbase2(tex->buffer.b.b.nr_samples);
414    bool fragments8 = tex->buffer.b.b.nr_storage_samples == 8;
415    bool is_array = tex->buffer.b.b.array_size > 1;
416    void **shader = &sctx->cs_clear_dcc_msaa[swizzle_mode][bpe_log2][fragments8][log2_samples - 2][is_array];
417 
418    if (!*shader)
419       *shader = gfx9_create_clear_dcc_msaa_cs(sctx, tex);
420 
421    /* Dispatch compute. */
422    unsigned width = DIV_ROUND_UP(tex->buffer.b.b.width0, tex->surface.u.gfx9.color.dcc_block_width);
423    unsigned height = DIV_ROUND_UP(tex->buffer.b.b.height0, tex->surface.u.gfx9.color.dcc_block_height);
424    unsigned depth = DIV_ROUND_UP(tex->buffer.b.b.array_size, tex->surface.u.gfx9.color.dcc_block_depth);
425 
426    struct pipe_grid_info info = {};
427    set_work_size(&info, 8, 8, 1, width, height, depth);
428 
429    si_launch_grid_internal_ssbos(sctx, &info, *shader, 1, &sb, 0x1, render_condition_enable);
430 }
431 
432 /* Expand FMASK to make it identity, so that image stores can ignore it. */
si_compute_expand_fmask(struct pipe_context * ctx,struct pipe_resource * tex)433 void si_compute_expand_fmask(struct pipe_context *ctx, struct pipe_resource *tex)
434 {
435    struct si_context *sctx = (struct si_context *)ctx;
436    bool is_array = tex->target == PIPE_TEXTURE_2D_ARRAY;
437    unsigned log_fragments = util_logbase2(tex->nr_storage_samples);
438    unsigned log_samples = util_logbase2(tex->nr_samples);
439    assert(tex->nr_samples >= 2);
440 
441    assert(sctx->gfx_level < GFX11);
442 
443    /* EQAA FMASK expansion is unimplemented. */
444    if (tex->nr_samples != tex->nr_storage_samples)
445       return;
446 
447    si_make_CB_shader_coherent(sctx, tex->nr_samples, true,
448                               ((struct si_texture*)tex)->surface.u.gfx9.color.dcc.pipe_aligned);
449 
450    /* Save states. */
451    struct pipe_image_view saved_image = {0};
452    util_copy_image_view(&saved_image, &sctx->images[PIPE_SHADER_COMPUTE].views[0]);
453 
454    /* Bind the image. */
455    struct pipe_image_view image = {0};
456    image.resource = tex;
457    /* Don't set WRITE so as not to trigger FMASK expansion, causing
458     * an infinite loop. */
459    image.shader_access = image.access = PIPE_IMAGE_ACCESS_READ;
460    image.format = util_format_linear(tex->format);
461    if (is_array)
462       image.u.tex.last_layer = tex->array_size - 1;
463 
464    ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, 0, &image);
465 
466    /* Bind the shader. */
467    void **shader = &sctx->cs_fmask_expand[log_samples - 1][is_array];
468    if (!*shader)
469       *shader = si_create_fmask_expand_cs(sctx, tex->nr_samples, is_array);
470 
471    /* Dispatch compute. */
472    struct pipe_grid_info info = {0};
473    set_work_size(&info, 8, 8, 1, tex->width0, tex->height0, is_array ? tex->array_size : 1);
474 
475    si_barrier_before_internal_op(sctx, 0, 0, NULL, 0, 1, &image);
476    si_compute_begin_internal(sctx, false);
477    si_launch_grid_internal(sctx, &info, *shader);
478    si_compute_end_internal(sctx);
479    si_barrier_after_internal_op(sctx, 0, 0, NULL, 0, 1, &image);
480 
481    /* Restore previous states. */
482    ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, 0, &saved_image);
483    pipe_resource_reference(&saved_image.resource, NULL);
484 
485    /* Array of fully expanded FMASK values, arranged by [log2(fragments)][log2(samples)-1]. */
486 #define INVALID 0 /* never used */
487    static const uint64_t fmask_expand_values[][4] = {
488       /* samples */
489       /* 2 (8 bpp) 4 (8 bpp)   8 (8-32bpp) 16 (16-64bpp)      fragments */
490       {0x02020202, 0x0E0E0E0E, 0xFEFEFEFE, 0xFFFEFFFE},      /* 1 */
491       {0x02020202, 0xA4A4A4A4, 0xAAA4AAA4, 0xAAAAAAA4},      /* 2 */
492       {INVALID, 0xE4E4E4E4, 0x44443210, 0x4444444444443210}, /* 4 */
493       {INVALID, INVALID, 0x76543210, 0x8888888876543210},    /* 8 */
494    };
495 
496    /* Clear FMASK to identity. */
497    struct si_texture *stex = (struct si_texture *)tex;
498 
499    si_clear_buffer(sctx, tex, stex->surface.fmask_offset, stex->surface.fmask_size,
500                    (uint32_t *)&fmask_expand_values[log_fragments][log_samples - 1],
501                    log_fragments >= 2 && log_samples == 4 ? 8 : 4,
502                    SI_AUTO_SELECT_CLEAR_METHOD, false);
503    si_barrier_after_simple_buffer_op(sctx, 0, tex, NULL);
504 }
505 
si_compute_clear_image_dcc_single(struct si_context * sctx,struct si_texture * tex,unsigned level,enum pipe_format format,const union pipe_color_union * color,bool render_condition_enable)506 void si_compute_clear_image_dcc_single(struct si_context *sctx, struct si_texture *tex,
507                                        unsigned level, enum pipe_format format,
508                                        const union pipe_color_union *color,
509                                        bool render_condition_enable)
510 {
511    assert(sctx->gfx_level >= GFX11); /* not believed to be useful on gfx10 */
512    unsigned dcc_block_width = tex->surface.u.gfx9.color.dcc_block_width;
513    unsigned dcc_block_height = tex->surface.u.gfx9.color.dcc_block_height;
514    unsigned width = DIV_ROUND_UP(u_minify(tex->buffer.b.b.width0, level), dcc_block_width);
515    unsigned height = DIV_ROUND_UP(u_minify(tex->buffer.b.b.height0, level), dcc_block_height);
516    unsigned depth = util_num_layers(&tex->buffer.b.b, level);
517    bool is_msaa = tex->buffer.b.b.nr_samples >= 2;
518 
519    struct pipe_image_view image = {0};
520    image.resource = &tex->buffer.b.b;
521    image.shader_access = image.access = PIPE_IMAGE_ACCESS_WRITE | SI_IMAGE_ACCESS_DCC_OFF;
522    image.format = format;
523    image.u.tex.level = level;
524    image.u.tex.last_layer = depth - 1;
525 
526    if (util_format_is_srgb(format)) {
527       union pipe_color_union color_srgb;
528       for (int i = 0; i < 3; i++)
529          color_srgb.f[i] = util_format_linear_to_srgb_float(color->f[i]);
530       color_srgb.f[3] = color->f[3];
531       memcpy(sctx->cs_user_data, color_srgb.ui, sizeof(color->ui));
532    } else {
533       memcpy(sctx->cs_user_data, color->ui, sizeof(color->ui));
534    }
535 
536    sctx->cs_user_data[4] = dcc_block_width | (dcc_block_height << 16);
537 
538    struct pipe_grid_info info = {0};
539    unsigned wg_dim = set_work_size(&info, 8, 8, 1, width, height, depth);
540 
541    void **shader = &sctx->cs_clear_image_dcc_single[is_msaa][wg_dim];
542    if (!*shader)
543       *shader = si_clear_image_dcc_single_shader(sctx, is_msaa, wg_dim);
544 
545    struct pipe_image_view saved_image = {};
546 
547    si_compute_save_and_bind_images(sctx, 1, &image, &saved_image);
548    si_compute_begin_internal(sctx, render_condition_enable);
549    si_launch_grid_internal(sctx, &info, *shader);
550    si_compute_end_internal(sctx);
551    si_compute_restore_images(sctx, 1, &saved_image);
552 }
553 
si_init_compute_blit_functions(struct si_context * sctx)554 void si_init_compute_blit_functions(struct si_context *sctx)
555 {
556    sctx->b.clear_buffer = si_pipe_clear_buffer;
557 }
558 
si_should_blit_clamp_to_edge(const struct pipe_blit_info * info,unsigned coord_mask)559 bool si_should_blit_clamp_to_edge(const struct pipe_blit_info *info, unsigned coord_mask)
560 {
561    return util_is_box_out_of_bounds(&info->src.box, coord_mask, info->src.resource->width0,
562                                     info->src.resource->height0, info->src.level);
563 }
564 
si_compute_clear_image(struct si_context * sctx,struct pipe_resource * tex,enum pipe_format format,unsigned level,const struct pipe_box * box,const union pipe_color_union * color,bool render_condition_enable,bool fail_if_slow)565 bool si_compute_clear_image(struct si_context *sctx, struct pipe_resource *tex,
566                             enum pipe_format format, unsigned level, const struct pipe_box *box,
567                             const union pipe_color_union *color, bool render_condition_enable,
568                             bool fail_if_slow)
569 {
570    unsigned access = 0;
571 
572    struct pipe_blit_info info;
573    memset(&info, 0, sizeof(info));
574    info.dst.resource = tex;
575    info.dst.level = level;
576    info.dst.box = *box;
577    info.dst.format = format;
578    info.mask = util_format_is_depth_or_stencil(format) ? PIPE_MASK_ZS : PIPE_MASK_RGBA;
579    info.render_condition_enable = render_condition_enable;
580 
581    if (util_format_is_subsampled_422(tex->format)) {
582       access |= SI_IMAGE_ACCESS_BLOCK_FORMAT_AS_UINT;
583       info.dst.format = PIPE_FORMAT_R32_UINT;
584       info.dst.box.x = util_format_get_nblocksx(tex->format, info.dst.box.x);
585    }
586 
587    return si_compute_blit(sctx, &info, color, access, 0, fail_if_slow);
588 }
589 
si_compute_copy_image(struct si_context * sctx,struct pipe_resource * dst,unsigned dst_level,struct pipe_resource * src,unsigned src_level,unsigned dstx,unsigned dsty,unsigned dstz,const struct pipe_box * src_box,bool fail_if_slow)590 bool si_compute_copy_image(struct si_context *sctx, struct pipe_resource *dst, unsigned dst_level,
591                            struct pipe_resource *src, unsigned src_level, unsigned dstx,
592                            unsigned dsty, unsigned dstz, const struct pipe_box *src_box,
593                            bool fail_if_slow)
594 {
595    struct si_texture *ssrc = (struct si_texture*)src;
596    struct si_texture *sdst = (struct si_texture*)dst;
597    enum pipe_format src_format = util_format_linear(src->format);
598    enum pipe_format dst_format = util_format_linear(dst->format);
599 
600    assert(util_format_is_subsampled_422(src_format) == util_format_is_subsampled_422(dst_format));
601 
602    /* Interpret as integer values to avoid NaN issues */
603    if (!vi_dcc_enabled(ssrc, src_level) &&
604        !vi_dcc_enabled(sdst, dst_level) &&
605        src_format == dst_format &&
606        util_format_is_float(src_format) &&
607        !util_format_is_compressed(src_format)) {
608       switch(util_format_get_blocksizebits(src_format)) {
609         case 16:
610           src_format = dst_format = PIPE_FORMAT_R16_UINT;
611           break;
612         case 32:
613           src_format = dst_format = PIPE_FORMAT_R32_UINT;
614           break;
615         case 64:
616           src_format = dst_format = PIPE_FORMAT_R32G32_UINT;
617           break;
618         case 128:
619           src_format = dst_format = PIPE_FORMAT_R32G32B32A32_UINT;
620           break;
621         default:
622           assert(false);
623       }
624    }
625 
626    /* Interpret compressed formats as UINT. */
627    struct pipe_box new_box;
628    unsigned src_access = 0, dst_access = 0;
629 
630    /* Note that staging copies do compressed<->UINT, so one of the formats is already UINT. */
631    if (util_format_is_compressed(src_format) || util_format_is_compressed(dst_format)) {
632       if (util_format_is_compressed(src_format))
633          src_access |= SI_IMAGE_ACCESS_BLOCK_FORMAT_AS_UINT;
634       if (util_format_is_compressed(dst_format))
635          dst_access |= SI_IMAGE_ACCESS_BLOCK_FORMAT_AS_UINT;
636 
637       dstx = util_format_get_nblocksx(dst_format, dstx);
638       dsty = util_format_get_nblocksy(dst_format, dsty);
639 
640       new_box.x = util_format_get_nblocksx(src_format, src_box->x);
641       new_box.y = util_format_get_nblocksy(src_format, src_box->y);
642       new_box.z = src_box->z;
643       new_box.width = util_format_get_nblocksx(src_format, src_box->width);
644       new_box.height = util_format_get_nblocksy(src_format, src_box->height);
645       new_box.depth = src_box->depth;
646       src_box = &new_box;
647 
648       if (ssrc->surface.bpe == 8)
649          src_format = dst_format = PIPE_FORMAT_R16G16B16A16_UINT; /* 64-bit block */
650       else
651          src_format = dst_format = PIPE_FORMAT_R32G32B32A32_UINT; /* 128-bit block */
652    }
653 
654    if (util_format_is_subsampled_422(src_format)) {
655       assert(src_format == dst_format);
656 
657       src_access |= SI_IMAGE_ACCESS_BLOCK_FORMAT_AS_UINT;
658       dst_access |= SI_IMAGE_ACCESS_BLOCK_FORMAT_AS_UINT;
659 
660       dstx = util_format_get_nblocksx(src_format, dstx);
661 
662       src_format = dst_format = PIPE_FORMAT_R32_UINT;
663 
664       /* Interpreting 422 subsampled format (16 bpp) as 32 bpp
665        * should force us to divide src_box->x, dstx and width by 2.
666        * But given that ac_surface allocates this format as 32 bpp
667        * and that surf_size is then modified to pack the values
668        * we must keep the original values to get the correct results.
669        */
670    }
671 
672    /* SNORM blitting has precision issues. Use the SINT equivalent instead, which doesn't
673     * force DCC decompression.
674     */
675    if (util_format_is_snorm(dst_format))
676       src_format = dst_format = util_format_snorm_to_sint(dst_format);
677 
678    struct pipe_blit_info info;
679    memset(&info, 0, sizeof(info));
680    info.dst.resource = dst;
681    info.dst.level = dst_level;
682    info.dst.box.x = dstx;
683    info.dst.box.y = dsty;
684    info.dst.box.z = dstz;
685    info.dst.box.width = src_box->width;
686    info.dst.box.height = src_box->height;
687    info.dst.box.depth = src_box->depth;
688    info.dst.format = dst_format;
689    info.src.resource = src;
690    info.src.level = src_level;
691    info.src.box = *src_box;
692    info.src.format = src_format;
693    info.mask = util_format_is_depth_or_stencil(dst_format) ? PIPE_MASK_ZS : PIPE_MASK_RGBA;
694 
695    /* Only the compute blit can copy compressed and subsampled images. */
696    fail_if_slow &= !dst_access && !src_access;
697 
698    bool success = si_compute_blit(sctx, &info, NULL, dst_access, src_access, fail_if_slow);
699    assert((!dst_access && !src_access) || success);
700    return success;
701 }
702 
get_tex_dim(struct si_texture * tex)703 static unsigned get_tex_dim(struct si_texture *tex)
704 {
705    switch (tex->buffer.b.b.target) {
706    case PIPE_TEXTURE_3D:
707       return 3;
708    case PIPE_BUFFER:
709    case PIPE_TEXTURE_1D:
710    case PIPE_TEXTURE_1D_ARRAY:
711       return 1;
712    default:
713       return 2;
714    }
715 }
716 
get_tex_is_array(struct si_texture * tex)717 static bool get_tex_is_array(struct si_texture *tex)
718 {
719    switch (tex->buffer.b.b.target) {
720    case PIPE_TEXTURE_CUBE:
721    case PIPE_TEXTURE_1D_ARRAY:
722    case PIPE_TEXTURE_2D_ARRAY:
723    case PIPE_TEXTURE_CUBE_ARRAY:
724       return true;
725    default:
726       return false;;
727    }
728 }
729 
si_compute_blit(struct si_context * sctx,const struct pipe_blit_info * info,const union pipe_color_union * clear_color,unsigned dst_access,unsigned src_access,bool fail_if_slow)730 bool si_compute_blit(struct si_context *sctx, const struct pipe_blit_info *info,
731                      const union pipe_color_union *clear_color, unsigned dst_access,
732                      unsigned src_access, bool fail_if_slow)
733 {
734    struct si_texture *sdst = (struct si_texture *)info->dst.resource;
735    struct si_texture *ssrc = (struct si_texture *)info->src.resource;
736    bool is_clear = !ssrc;
737    unsigned dst_samples = MAX2(1, sdst->buffer.b.b.nr_samples);
738 
739    /* MSAA image stores don't work on <= Gfx10.3. It's an issue with FMASK because
740     * AMD_DEBUG=nofmask fixes them. EQAA image stores are also unimplemented.
741     * MSAA image stores work fine on Gfx11 (it has neither FMASK nor EQAA).
742     */
743    if (sctx->gfx_level < GFX11 && !(sctx->screen->debug_flags & DBG(NO_FMASK)) && dst_samples > 1)
744       return false;
745 
746    if (info->dst_sample != 0 ||
747        info->alpha_blend ||
748        info->num_window_rectangles ||
749        info->scissor_enable)
750       return false;
751 
752    struct ac_cs_blit_options options = {
753       .nir_options = sctx->screen->nir_options,
754       .info = &sctx->screen->info,
755       .use_aco = sctx->screen->use_aco,
756       .no_fmask = sctx->screen->debug_flags & DBG(NO_FMASK),
757       /* Compute queues can't fail because there is no alternative. */
758       .fail_if_slow = sctx->has_graphics && fail_if_slow,
759    };
760 
761    struct ac_cs_blit_description blit = {
762       .dst = {
763          .surf = &sdst->surface,
764          .dim = get_tex_dim(sdst),
765          .is_array = get_tex_is_array(sdst),
766          .width0 = info->dst.resource->width0,
767          .height0 = info->dst.resource->height0,
768          .num_samples = info->dst.resource->nr_samples,
769          .level = info->dst.level,
770          .box = info->dst.box,
771          .format = info->dst.format,
772       },
773       .src = {
774          .surf = ssrc ? &ssrc->surface : NULL,
775          .dim = ssrc ? get_tex_dim(ssrc) : 0,
776          .is_array = ssrc ? get_tex_is_array(ssrc) : false,
777          .width0 = ssrc ? info->src.resource->width0 : 0,
778          .height0 = ssrc ? info->src.resource->height0 : 0,
779          .num_samples = ssrc ? info->src.resource->nr_samples : 0,
780          .level = info->src.level,
781          .box = info->src.box,
782          .format = info->src.format,
783       },
784       .is_gfx_queue = sctx->has_graphics,
785       /* if (src_access || dst_access), one of the images is block-compressed, which can't fall
786        * back to a pixel shader on radeonsi */
787       .dst_has_dcc = vi_dcc_enabled(sdst, info->dst.level) && !src_access && !dst_access,
788       .sample0_only = info->sample0_only,
789    };
790 
791    if (clear_color)
792       blit.clear_color = *clear_color;
793 
794    struct ac_cs_blit_dispatches out;
795    if (!ac_prepare_compute_blit(&options, &blit, &out))
796       return false;
797 
798    if (!out.num_dispatches)
799       return true;
800 
801    /* This is needed for compute queues if DCC stores are unsupported. */
802    if (sctx->gfx_level < GFX10 && !sctx->has_graphics && vi_dcc_enabled(sdst, info->dst.level))
803       si_texture_disable_dcc(sctx, sdst);
804 
805    /* Shader images. */
806    struct pipe_image_view image[2];
807    unsigned dst_index = is_clear ? 0 : 1;
808 
809    if (!is_clear) {
810       image[0].resource = info->src.resource;
811       image[0].shader_access = image[0].access = PIPE_IMAGE_ACCESS_READ | src_access;
812       image[0].format = info->src.format;
813       image[0].u.tex.level = info->src.level;
814       image[0].u.tex.first_layer = 0;
815       image[0].u.tex.last_layer = util_max_layer(info->src.resource, info->src.level);
816    }
817 
818    image[dst_index].resource = info->dst.resource;
819    image[dst_index].shader_access = image[dst_index].access = PIPE_IMAGE_ACCESS_WRITE | dst_access;
820    image[dst_index].format = info->dst.format;
821    image[dst_index].u.tex.level = info->dst.level;
822    image[dst_index].u.tex.first_layer = 0;
823    image[dst_index].u.tex.last_layer = util_max_layer(info->dst.resource, info->dst.level);
824 
825    /* Bind images and execute the barrier. */
826    unsigned num_images = is_clear ? 1 : 2;
827    struct pipe_image_view saved_images[2] = {};
828    assert(num_images <= ARRAY_SIZE(saved_images));
829 
830    /* This must be before the barrier and si_compute_begin_internal because it might invoke DCC
831     * decompression.
832     */
833    si_compute_save_and_bind_images(sctx, num_images, image, saved_images);
834    si_barrier_before_internal_op(sctx, 0, 0, NULL, 0, num_images, image);
835    si_compute_begin_internal(sctx, info->render_condition_enable);
836 
837    /* Execute compute blits. */
838    for (unsigned i = 0; i < out.num_dispatches; i++) {
839       struct ac_cs_blit_dispatch *dispatch = &out.dispatches[i];
840 
841       void *shader = _mesa_hash_table_u64_search(sctx->cs_blit_shaders, dispatch->shader_key.key);
842       if (!shader) {
843          shader = si_create_shader_state(sctx, ac_create_blit_cs(&options, &dispatch->shader_key));
844          _mesa_hash_table_u64_insert(sctx->cs_blit_shaders, dispatch->shader_key.key, shader);
845       }
846 
847       memcpy(sctx->cs_user_data, dispatch->user_data, sizeof(sctx->cs_user_data));
848 
849       struct pipe_grid_info grid = {
850          .block = {
851             dispatch->wg_size[0],
852             dispatch->wg_size[1],
853             dispatch->wg_size[2],
854          },
855          .last_block = {
856             dispatch->last_wg_size[0],
857             dispatch->last_wg_size[1],
858             dispatch->last_wg_size[2],
859          },
860          .grid = {
861             dispatch->num_workgroups[0],
862             dispatch->num_workgroups[1],
863             dispatch->num_workgroups[2],
864          },
865       };
866 
867       si_launch_grid_internal(sctx, &grid, shader);
868    }
869 
870    si_compute_end_internal(sctx);
871    si_barrier_after_internal_op(sctx, 0, 0, NULL, 0, num_images, image);
872    si_compute_restore_images(sctx, num_images, saved_images);
873    return true;
874 }
875