xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/freedreno/freedreno_resource.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2012 Rob Clark <[email protected]>
3  * SPDX-License-Identifier: MIT
4  *
5  * Authors:
6  *    Rob Clark <[email protected]>
7  */
8 
9 #include "util/format/u_format.h"
10 #include "util/format/u_format_rgtc.h"
11 #include "util/format/u_format_zs.h"
12 #include "util/set.h"
13 #include "util/u_drm.h"
14 #include "util/u_inlines.h"
15 #include "util/u_sample_positions.h"
16 #include "util/u_string.h"
17 #include "util/u_surface.h"
18 #include "util/u_transfer.h"
19 
20 #include "decode/util.h"
21 
22 #include "freedreno_batch_cache.h"
23 #include "freedreno_blitter.h"
24 #include "freedreno_context.h"
25 #include "freedreno_fence.h"
26 #include "freedreno_query_hw.h"
27 #include "freedreno_resource.h"
28 #include "freedreno_screen.h"
29 #include "freedreno_surface.h"
30 #include "freedreno_util.h"
31 
32 #include <errno.h>
33 #include "drm-uapi/drm_fourcc.h"
34 
35 /* XXX this should go away, needed for 'struct winsys_handle' */
36 #include "frontend/drm_driver.h"
37 
38 /**
39  * Go through the entire state and see if the resource is bound
40  * anywhere. If it is, mark the relevant state as dirty. This is
41  * called on realloc_bo to ensure the necessary state is re-
42  * emitted so the GPU looks at the new backing bo.
43  */
44 static void
rebind_resource_in_ctx(struct fd_context * ctx,struct fd_resource * rsc)45 rebind_resource_in_ctx(struct fd_context *ctx,
46                        struct fd_resource *rsc) assert_dt
47 {
48    struct pipe_resource *prsc = &rsc->b.b;
49 
50    if (ctx->rebind_resource)
51       ctx->rebind_resource(ctx, rsc);
52 
53    /* VBOs */
54    if (rsc->dirty & FD_DIRTY_VTXBUF) {
55       struct fd_vertexbuf_stateobj *vb = &ctx->vtx.vertexbuf;
56       for (unsigned i = 0; i < vb->count && !(ctx->dirty & FD_DIRTY_VTXBUF);
57            i++) {
58          if (vb->vb[i].buffer.resource == prsc)
59             fd_dirty_resource(ctx, prsc, FD_DIRTY_VTXBUF, false);
60       }
61    }
62 
63    /* xfb/so buffers: */
64    if (rsc->dirty & FD_DIRTY_STREAMOUT) {
65       struct fd_streamout_stateobj *so = &ctx->streamout;
66 
67       for (unsigned i = 0;
68             i < so->num_targets && !(ctx->dirty & FD_DIRTY_STREAMOUT);
69             i++) {
70          if (so->targets[i]->buffer == prsc)
71             fd_dirty_resource(ctx, prsc, FD_DIRTY_STREAMOUT, true);
72       }
73    }
74 
75    const enum fd_dirty_3d_state per_stage_dirty =
76       FD_DIRTY_CONST | FD_DIRTY_TEX | FD_DIRTY_IMAGE | FD_DIRTY_SSBO;
77 
78    if (!(rsc->dirty & per_stage_dirty))
79       return;
80 
81    /* per-shader-stage resources: */
82    for (unsigned stage = 0; stage < PIPE_SHADER_TYPES; stage++) {
83       /* Constbufs.. note that constbuf[0] is normal uniforms emitted in
84        * cmdstream rather than by pointer..
85        */
86       if ((rsc->dirty & FD_DIRTY_CONST) &&
87           !(ctx->dirty_shader[stage] & FD_DIRTY_CONST)) {
88          struct fd_constbuf_stateobj *cb = &ctx->constbuf[stage];
89          const unsigned num_ubos = util_last_bit(cb->enabled_mask);
90          for (unsigned i = 1; i < num_ubos; i++) {
91             if (cb->cb[i].buffer == prsc) {
92                fd_dirty_shader_resource(ctx, prsc, stage,
93                                         FD_DIRTY_SHADER_CONST, false);
94                break;
95             }
96          }
97       }
98 
99       /* Textures */
100       if ((rsc->dirty & FD_DIRTY_TEX) &&
101           !(ctx->dirty_shader[stage] & FD_DIRTY_TEX)) {
102          struct fd_texture_stateobj *tex = &ctx->tex[stage];
103          for (unsigned i = 0; i < tex->num_textures; i++) {
104             if (tex->textures[i] && (tex->textures[i]->texture == prsc)) {
105                fd_dirty_shader_resource(ctx, prsc, stage,
106                                         FD_DIRTY_SHADER_TEX, false);
107                break;
108             }
109          }
110       }
111 
112       /* Images */
113       if ((rsc->dirty & FD_DIRTY_IMAGE) &&
114           !(ctx->dirty_shader[stage] & FD_DIRTY_IMAGE)) {
115          struct fd_shaderimg_stateobj *si = &ctx->shaderimg[stage];
116          const unsigned num_images = util_last_bit(si->enabled_mask);
117          for (unsigned i = 0; i < num_images; i++) {
118             if (si->si[i].resource == prsc) {
119                bool write = si->si[i].access & PIPE_IMAGE_ACCESS_WRITE;
120                fd_dirty_shader_resource(ctx, prsc, stage,
121                                         FD_DIRTY_SHADER_IMAGE, write);
122                break;
123             }
124          }
125       }
126 
127       /* SSBOs */
128       if ((rsc->dirty & FD_DIRTY_SSBO) &&
129           !(ctx->dirty_shader[stage] & FD_DIRTY_SSBO)) {
130          struct fd_shaderbuf_stateobj *sb = &ctx->shaderbuf[stage];
131          const unsigned num_ssbos = util_last_bit(sb->enabled_mask);
132          for (unsigned i = 0; i < num_ssbos; i++) {
133             if (sb->sb[i].buffer == prsc) {
134                bool write = sb->writable_mask & BIT(i);
135                fd_dirty_shader_resource(ctx, prsc, stage,
136                                         FD_DIRTY_SHADER_SSBO, write);
137                break;
138             }
139          }
140       }
141    }
142 }
143 
144 static void
rebind_resource(struct fd_resource * rsc)145 rebind_resource(struct fd_resource *rsc) assert_dt
146 {
147    struct fd_screen *screen = fd_screen(rsc->b.b.screen);
148 
149    fd_screen_lock(screen);
150    fd_resource_lock(rsc);
151 
152    if (rsc->dirty)
153       list_for_each_entry (struct fd_context, ctx, &screen->context_list, node)
154          rebind_resource_in_ctx(ctx, rsc);
155 
156    fd_resource_unlock(rsc);
157    fd_screen_unlock(screen);
158 }
159 
160 static inline void
fd_resource_set_bo(struct fd_resource * rsc,struct fd_bo * bo)161 fd_resource_set_bo(struct fd_resource *rsc, struct fd_bo *bo)
162 {
163    struct fd_screen *screen = fd_screen(rsc->b.b.screen);
164 
165    rsc->bo = bo;
166    rsc->seqno = seqno_next_u16(&screen->rsc_seqno);
167 }
168 
169 int
__fd_resource_wait(struct fd_context * ctx,struct fd_resource * rsc,unsigned op,const char * func)170 __fd_resource_wait(struct fd_context *ctx, struct fd_resource *rsc, unsigned op,
171                    const char *func)
172 {
173    if (op & FD_BO_PREP_NOSYNC)
174       return fd_bo_cpu_prep(rsc->bo, ctx->pipe, op);
175 
176    int ret;
177 
178    perf_time_ctx (ctx, 10000, "%s: a busy \"%" PRSC_FMT "\" BO stalled", func,
179                   PRSC_ARGS(&rsc->b.b)) {
180       ret = fd_bo_cpu_prep(rsc->bo, ctx->pipe, op);
181    }
182 
183    return ret;
184 }
185 
186 static void
realloc_bo(struct fd_resource * rsc,uint32_t size)187 realloc_bo(struct fd_resource *rsc, uint32_t size)
188 {
189    struct pipe_resource *prsc = &rsc->b.b;
190    struct fd_screen *screen = fd_screen(rsc->b.b.screen);
191    uint32_t flags =
192       (prsc->target == PIPE_BUFFER) ? FD_BO_HINT_BUFFER : FD_BO_HINT_IMAGE |
193       COND(rsc->layout.tile_mode, FD_BO_NOMAP) |
194       COND((prsc->usage & PIPE_USAGE_STAGING) &&
195            (prsc->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT),
196            FD_BO_CACHED_COHERENT) |
197       COND(prsc->bind & PIPE_BIND_SHARED, FD_BO_SHARED) |
198       COND(prsc->bind & PIPE_BIND_SCANOUT, FD_BO_SCANOUT);
199    /* TODO other flags? */
200 
201    /* if we start using things other than write-combine,
202     * be sure to check for PIPE_RESOURCE_FLAG_MAP_COHERENT
203     */
204 
205    if (rsc->bo)
206       fd_bo_del(rsc->bo);
207 
208    struct fd_bo *bo =
209       fd_bo_new(screen->dev, size, flags, "%ux%ux%u@%u:%x", prsc->width0,
210                 prsc->height0, prsc->depth0, rsc->layout.cpp, prsc->bind);
211    fd_resource_set_bo(rsc, bo);
212 
213    /* Zero out the UBWC area on allocation.  This fixes intermittent failures
214     * with UBWC, which I suspect are due to the HW having a hard time
215     * interpreting arbitrary values populating the flags buffer when the BO
216     * was recycled through the bo cache (instead of fresh allocations from
217     * the kernel, which are zeroed).  sleep(1) in this spot didn't work
218     * around the issue, but any memset value seems to.
219     */
220    if (rsc->layout.ubwc) {
221       rsc->needs_ubwc_clear = true;
222    }
223 
224    util_range_set_empty(&rsc->valid_buffer_range);
225    fd_bc_invalidate_resource(rsc, true);
226 }
227 
228 static void
do_blit(struct fd_context * ctx,const struct pipe_blit_info * blit,bool fallback)229 do_blit(struct fd_context *ctx, const struct pipe_blit_info *blit,
230         bool fallback) assert_dt
231 {
232    struct pipe_context *pctx = &ctx->base;
233 
234    assert(!ctx->in_blit);
235    ctx->in_blit = true;
236 
237    /* TODO size threshold too?? */
238    if (fallback || !fd_blit(pctx, blit)) {
239       /* do blit on cpu: */
240       util_resource_copy_region(pctx, blit->dst.resource, blit->dst.level,
241                                 blit->dst.box.x, blit->dst.box.y,
242                                 blit->dst.box.z, blit->src.resource,
243                                 blit->src.level, &blit->src.box);
244    }
245 
246    ctx->in_blit = false;
247 }
248 
249 /**
250  * Replace the storage of dst with src.  This is only used by TC in the
251  * DISCARD_WHOLE_RESOURCE path, and src is a freshly allocated buffer.
252  */
253 void
fd_replace_buffer_storage(struct pipe_context * pctx,struct pipe_resource * pdst,struct pipe_resource * psrc,unsigned num_rebinds,uint32_t rebind_mask,uint32_t delete_buffer_id)254 fd_replace_buffer_storage(struct pipe_context *pctx, struct pipe_resource *pdst,
255                           struct pipe_resource *psrc, unsigned num_rebinds, uint32_t rebind_mask,
256                           uint32_t delete_buffer_id)
257 {
258    struct fd_context *ctx = fd_context(pctx);
259    struct fd_resource *dst = fd_resource(pdst);
260    struct fd_resource *src = fd_resource(psrc);
261 
262    DBG("pdst=%p, psrc=%p", pdst, psrc);
263 
264    /* This should only be called with buffers.. which side-steps some tricker
265     * cases, like a rsc that is in a batch-cache key...
266     */
267    assert(pdst->target == PIPE_BUFFER);
268    assert(psrc->target == PIPE_BUFFER);
269    assert(dst->track->bc_batch_mask == 0);
270    assert(src->track->bc_batch_mask == 0);
271    assert(src->track->batch_mask == 0);
272    assert(src->track->write_batch == NULL);
273    assert(memcmp(&dst->layout, &src->layout, sizeof(dst->layout)) == 0);
274 
275    /* get rid of any references that batch-cache might have to us (which
276     * should empty/destroy rsc->batches hashset)
277     *
278     * Note that we aren't actually destroying dst, but we are replacing
279     * it's storage so we want to go thru the same motions of decoupling
280     * it's batch connections.
281     */
282    fd_bc_invalidate_resource(dst, true);
283    rebind_resource(dst);
284 
285    util_idalloc_mt_free(&ctx->screen->buffer_ids, delete_buffer_id);
286 
287    fd_screen_lock(ctx->screen);
288 
289    fd_bo_del(dst->bo);
290    dst->bo = fd_bo_ref(src->bo);
291 
292    fd_resource_tracking_reference(&dst->track, src->track);
293    src->is_replacement = true;
294 
295    dst->seqno = seqno_next_u16(&ctx->screen->rsc_seqno);
296 
297    fd_screen_unlock(ctx->screen);
298 }
299 
300 static unsigned
translate_usage(unsigned usage)301 translate_usage(unsigned usage)
302 {
303    uint32_t op = 0;
304 
305    if (usage & PIPE_MAP_READ)
306       op |= FD_BO_PREP_READ;
307 
308    if (usage & PIPE_MAP_WRITE)
309       op |= FD_BO_PREP_WRITE;
310 
311    return op;
312 }
313 
314 bool
fd_resource_busy(struct pipe_screen * pscreen,struct pipe_resource * prsc,unsigned usage)315 fd_resource_busy(struct pipe_screen *pscreen, struct pipe_resource *prsc,
316                  unsigned usage)
317 {
318    struct fd_resource *rsc = fd_resource(prsc);
319 
320    if (pending(rsc, !!(usage & PIPE_MAP_WRITE)))
321       return true;
322 
323    if (resource_busy(rsc, translate_usage(usage)))
324       return true;
325 
326    return false;
327 }
328 
329 static void flush_resource(struct fd_context *ctx, struct fd_resource *rsc,
330                            unsigned usage);
331 
332 /**
333  * Helper to check if the format is something that we can blit/render
334  * to.. if the format is not renderable, there is no point in trying
335  * to do a staging blit (as it will still end up being a cpu copy)
336  */
337 static bool
is_renderable(struct pipe_resource * prsc)338 is_renderable(struct pipe_resource *prsc)
339 {
340    struct pipe_screen *pscreen = prsc->screen;
341    return pscreen->is_format_supported(
342          pscreen, prsc->format, prsc->target, prsc->nr_samples,
343          prsc->nr_storage_samples, PIPE_BIND_RENDER_TARGET);
344 }
345 
346 /**
347  * @rsc: the resource to shadow
348  * @level: the level to discard (if box != NULL, otherwise ignored)
349  * @box: the box to discard (or NULL if none)
350  * @modifier: the modifier for the new buffer state
351  */
352 static bool
fd_try_shadow_resource(struct fd_context * ctx,struct fd_resource * rsc,unsigned level,const struct pipe_box * box,uint64_t modifier)353 fd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc,
354                        unsigned level, const struct pipe_box *box,
355                        uint64_t modifier) assert_dt
356 {
357    struct pipe_context *pctx = &ctx->base;
358    struct pipe_resource *prsc = &rsc->b.b;
359    struct fd_screen *screen = fd_screen(pctx->screen);
360    struct fd_batch *batch;
361    bool fallback = false;
362 
363    if (prsc->next)
364       return false;
365 
366    /* Flush any pending batches writing the resource before we go mucking around
367     * in its insides.  The blit would immediately cause the batch to be flushed,
368     * anyway.
369     */
370    fd_bc_flush_writer(ctx, rsc);
371 
372    /* Because IB1 ("gmem") cmdstream is built only when we flush the
373     * batch, we need to flush any batches that reference this rsc as
374     * a render target.  Otherwise the framebuffer state emitted in
375     * IB1 will reference the resources new state, and not the state
376     * at the point in time that the earlier draws referenced it.
377     *
378     * Note that being in the gmem key doesn't necessarily mean the
379     * batch was considered a writer!
380     */
381    foreach_batch (batch, &screen->batch_cache, rsc->track->bc_batch_mask) {
382       fd_batch_flush(batch);
383    }
384 
385    /* TODO: somehow munge dimensions and format to copy unsupported
386     * render target format to something that is supported?
387     */
388    if (!is_renderable(prsc))
389       fallback = true;
390 
391    /* do shadowing back-blits on the cpu for buffers -- requires about a page of
392     * DMA to make GPU copies worth it according to robclark.  Note, if you
393     * decide to do it on the GPU then you'll need to update valid_buffer_range
394     * in the swap()s below.
395     */
396    if (prsc->target == PIPE_BUFFER)
397       fallback = true;
398 
399    bool discard_whole_level = box && util_texrange_covers_whole_level(
400                                         prsc, level, box->x, box->y, box->z,
401                                         box->width, box->height, box->depth);
402 
403    /* TODO need to be more clever about current level */
404    if ((prsc->target >= PIPE_TEXTURE_2D) && box && !discard_whole_level)
405       return false;
406 
407    struct pipe_resource *pshadow = pctx->screen->resource_create_with_modifiers(
408       pctx->screen, prsc, &modifier, 1);
409 
410    if (!pshadow)
411       return false;
412 
413    assert(!ctx->in_shadow);
414    ctx->in_shadow = true;
415 
416    /* get rid of any references that batch-cache might have to us (which
417     * should empty/destroy rsc->batches hashset)
418     */
419    fd_bc_invalidate_resource(rsc, false);
420    rebind_resource(rsc);
421 
422    fd_screen_lock(ctx->screen);
423 
424    /* Swap the backing bo's, so shadow becomes the old buffer,
425     * blit from shadow to new buffer.  From here on out, we
426     * cannot fail.
427     *
428     * Note that we need to do it in this order, otherwise if
429     * we go down cpu blit path, the recursive transfer_map()
430     * sees the wrong status..
431     */
432    struct fd_resource *shadow = fd_resource(pshadow);
433 
434    DBG("shadow: %p (%d, %p) -> %p (%d, %p)", rsc, rsc->b.b.reference.count,
435        rsc->track, shadow, shadow->b.b.reference.count, shadow->track);
436 
437    SWAP(rsc->bo, shadow->bo);
438    SWAP(rsc->valid, shadow->valid);
439 
440    /* swap() doesn't work because you can't typeof() the bitfield. */
441    bool temp = shadow->needs_ubwc_clear;
442    shadow->needs_ubwc_clear = rsc->needs_ubwc_clear;
443    rsc->needs_ubwc_clear = temp;
444 
445    SWAP(rsc->layout, shadow->layout);
446    rsc->seqno = seqno_next_u16(&ctx->screen->rsc_seqno);
447 
448    /* at this point, the newly created shadow buffer is not referenced
449     * by any batches, but the existing rsc (probably) is.  We need to
450     * transfer those references over:
451     */
452    assert(shadow->track->batch_mask == 0);
453    foreach_batch (batch, &ctx->screen->batch_cache, rsc->track->batch_mask) {
454       struct set_entry *entry = _mesa_set_search_pre_hashed(batch->resources, rsc->hash, rsc);
455       _mesa_set_remove(batch->resources, entry);
456       _mesa_set_add_pre_hashed(batch->resources, shadow->hash, shadow);
457    }
458    SWAP(rsc->track, shadow->track);
459 
460    fd_screen_unlock(ctx->screen);
461 
462    struct pipe_blit_info blit = {};
463    blit.dst.resource = prsc;
464    blit.dst.format = prsc->format;
465    blit.src.resource = pshadow;
466    blit.src.format = pshadow->format;
467    blit.mask = util_format_get_mask(prsc->format);
468    blit.filter = PIPE_TEX_FILTER_NEAREST;
469 
470 #define set_box(field, val)                                                    \
471    do {                                                                        \
472       blit.dst.field = (val);                                                  \
473       blit.src.field = (val);                                                  \
474    } while (0)
475 
476    /* Disable occlusion queries during shadow blits. */
477    bool saved_active_queries = ctx->active_queries;
478    pctx->set_active_query_state(pctx, false);
479 
480    /* blit the other levels in their entirety: */
481    for (unsigned l = 0; l <= prsc->last_level; l++) {
482       if (box && l == level)
483          continue;
484 
485       /* just blit whole level: */
486       set_box(level, l);
487       set_box(box.width, u_minify(prsc->width0, l));
488       set_box(box.height, u_minify(prsc->height0, l));
489       set_box(box.depth, u_minify(prsc->depth0, l));
490 
491       for (int i = 0; i < prsc->array_size; i++) {
492          set_box(box.z, i);
493          do_blit(ctx, &blit, fallback);
494       }
495    }
496 
497    /* deal w/ current level specially, since we might need to split
498     * it up into a couple blits:
499     */
500    if (box && !discard_whole_level) {
501       set_box(level, level);
502 
503       switch (prsc->target) {
504       case PIPE_BUFFER:
505       case PIPE_TEXTURE_1D:
506          set_box(box.y, 0);
507          set_box(box.z, 0);
508          set_box(box.height, 1);
509          set_box(box.depth, 1);
510 
511          if (box->x > 0) {
512             set_box(box.x, 0);
513             set_box(box.width, box->x);
514 
515             do_blit(ctx, &blit, fallback);
516          }
517          if ((box->x + box->width) < u_minify(prsc->width0, level)) {
518             set_box(box.x, box->x + box->width);
519             set_box(box.width,
520                     u_minify(prsc->width0, level) - (box->x + box->width));
521 
522             do_blit(ctx, &blit, fallback);
523          }
524          break;
525       case PIPE_TEXTURE_2D:
526          /* TODO */
527       default:
528          unreachable("TODO");
529       }
530    }
531 
532    pctx->set_active_query_state(pctx, saved_active_queries);
533 
534    ctx->in_shadow = false;
535 
536    pipe_resource_reference(&pshadow, NULL);
537 
538    return true;
539 }
540 
541 /**
542  * Uncompress an UBWC compressed buffer "in place".  This works basically
543  * like resource shadowing, creating a new resource, and doing an uncompress
544  * blit, and swapping the state between shadow and original resource so it
545  * appears to the gallium frontends as if nothing changed.
546  */
547 void
fd_resource_uncompress(struct fd_context * ctx,struct fd_resource * rsc,bool linear)548 fd_resource_uncompress(struct fd_context *ctx, struct fd_resource *rsc, bool linear)
549 {
550    tc_assert_driver_thread(ctx->tc);
551 
552    uint64_t modifier = linear ? DRM_FORMAT_MOD_LINEAR : DRM_FORMAT_MOD_QCOM_TILED3;
553 
554    ASSERTED bool success = fd_try_shadow_resource(ctx, rsc, 0, NULL, modifier);
555 
556    /* shadow should not fail in any cases where we need to uncompress: */
557    assert(success);
558 }
559 
560 /**
561  * Debug helper to hexdump a resource.
562  */
563 void
fd_resource_dump(struct fd_resource * rsc,const char * name)564 fd_resource_dump(struct fd_resource *rsc, const char *name)
565 {
566    fd_bo_cpu_prep(rsc->bo, NULL, FD_BO_PREP_READ);
567    printf("%s: \n", name);
568    dump_hex(fd_bo_map(rsc->bo), fd_bo_size(rsc->bo));
569 }
570 
571 static struct fd_resource *
fd_alloc_staging(struct fd_context * ctx,struct fd_resource * rsc,unsigned level,const struct pipe_box * box,unsigned usage)572 fd_alloc_staging(struct fd_context *ctx, struct fd_resource *rsc,
573                  unsigned level, const struct pipe_box *box, unsigned usage)
574    assert_dt
575 {
576    struct pipe_context *pctx = &ctx->base;
577    struct pipe_resource tmpl = rsc->b.b;
578 
579    /* We cannot currently do stencil export on earlier gens, and
580     * u_blitter cannot do blits involving stencil otherwise:
581     */
582    if ((ctx->screen->gen < 6) && !ctx->blit &&
583        (util_format_get_mask(tmpl.format) & PIPE_MASK_S))
584       return NULL;
585 
586    tmpl.width0 = box->width;
587    tmpl.height0 = box->height;
588    /* for array textures, box->depth is the array_size, otherwise
589     * for 3d textures, it is the depth:
590     */
591    if (tmpl.array_size > 1) {
592       if (tmpl.target == PIPE_TEXTURE_CUBE)
593          tmpl.target = PIPE_TEXTURE_2D_ARRAY;
594       tmpl.array_size = box->depth;
595       tmpl.depth0 = 1;
596    } else {
597       tmpl.array_size = 1;
598       tmpl.depth0 = box->depth;
599    }
600    tmpl.last_level = 0;
601    tmpl.bind |= PIPE_BIND_LINEAR;
602    tmpl.usage = PIPE_USAGE_STAGING;
603    tmpl.flags = (usage & PIPE_MAP_READ) ? PIPE_RESOURCE_FLAG_MAP_COHERENT : 0;
604 
605    struct pipe_resource *pstaging =
606       pctx->screen->resource_create(pctx->screen, &tmpl);
607    if (!pstaging)
608       return NULL;
609 
610    return fd_resource(pstaging);
611 }
612 
613 static void
fd_blit_from_staging(struct fd_context * ctx,struct fd_transfer * trans)614 fd_blit_from_staging(struct fd_context *ctx,
615                      struct fd_transfer *trans) assert_dt
616 {
617    DBG("");
618    struct pipe_resource *dst = trans->b.b.resource;
619    struct pipe_blit_info blit = {};
620 
621    blit.dst.resource = dst;
622    blit.dst.format = dst->format;
623    blit.dst.level = trans->b.b.level;
624    blit.dst.box = trans->b.b.box;
625    blit.src.resource = trans->staging_prsc;
626    blit.src.format = trans->staging_prsc->format;
627    blit.src.level = 0;
628    blit.src.box = trans->staging_box;
629    blit.mask = util_format_get_mask(trans->staging_prsc->format);
630    blit.filter = PIPE_TEX_FILTER_NEAREST;
631 
632    do_blit(ctx, &blit, false);
633 }
634 
635 static void
fd_blit_to_staging(struct fd_context * ctx,struct fd_transfer * trans)636 fd_blit_to_staging(struct fd_context *ctx, struct fd_transfer *trans) assert_dt
637 {
638    DBG("");
639    struct pipe_resource *src = trans->b.b.resource;
640    struct pipe_blit_info blit = {};
641 
642    blit.src.resource = src;
643    blit.src.format = src->format;
644    blit.src.level = trans->b.b.level;
645    blit.src.box = trans->b.b.box;
646    blit.dst.resource = trans->staging_prsc;
647    blit.dst.format = trans->staging_prsc->format;
648    blit.dst.level = 0;
649    blit.dst.box = trans->staging_box;
650    blit.mask = util_format_get_mask(trans->staging_prsc->format);
651    blit.filter = PIPE_TEX_FILTER_NEAREST;
652 
653    do_blit(ctx, &blit, false);
654 }
655 
656 static void
fd_resource_transfer_flush_region(struct pipe_context * pctx,struct pipe_transfer * ptrans,const struct pipe_box * box)657 fd_resource_transfer_flush_region(struct pipe_context *pctx,
658                                   struct pipe_transfer *ptrans,
659                                   const struct pipe_box *box)
660 {
661    struct fd_resource *rsc = fd_resource(ptrans->resource);
662 
663    if (ptrans->resource->target == PIPE_BUFFER)
664       util_range_add(&rsc->b.b, &rsc->valid_buffer_range,
665                      ptrans->box.x + box->x,
666                      ptrans->box.x + box->x + box->width);
667 }
668 
669 static void
flush_resource(struct fd_context * ctx,struct fd_resource * rsc,unsigned usage)670 flush_resource(struct fd_context *ctx, struct fd_resource *rsc,
671                unsigned usage) assert_dt
672 {
673    if (usage & PIPE_MAP_WRITE) {
674       fd_bc_flush_readers(ctx, rsc);
675    } else {
676       fd_bc_flush_writer(ctx, rsc);
677    }
678 }
679 
680 static void
fd_flush_resource(struct pipe_context * pctx,struct pipe_resource * prsc)681 fd_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
682    in_dt
683 {
684    struct fd_context *ctx = fd_context(pctx);
685    struct fd_resource *rsc = fd_resource(prsc);
686 
687    /* Flushing the resource is only required if we are relying on
688     * implicit-sync, in which case the rendering must be flushed
689     * to the kernel for the fence to be added to the backing GEM
690     * object.
691     */
692    if (ctx->no_implicit_sync)
693       return;
694 
695    flush_resource(ctx, rsc, PIPE_MAP_READ);
696 
697    /* If we had to flush a batch, make sure it makes it's way all the
698     * way to the kernel:
699     */
700    fd_resource_wait(ctx, rsc, FD_BO_PREP_FLUSH);
701 }
702 
703 static void
fd_resource_transfer_unmap(struct pipe_context * pctx,struct pipe_transfer * ptrans)704 fd_resource_transfer_unmap(struct pipe_context *pctx,
705                            struct pipe_transfer *ptrans)
706    in_dt /* TODO for threaded-ctx we'll need to split out unsynchronized path */
707 {
708    struct fd_context *ctx = fd_context(pctx);
709    struct fd_resource *rsc = fd_resource(ptrans->resource);
710    struct fd_transfer *trans = fd_transfer(ptrans);
711 
712    if (trans->staging_prsc) {
713       if (ptrans->usage & PIPE_MAP_WRITE)
714          fd_blit_from_staging(ctx, trans);
715       pipe_resource_reference(&trans->staging_prsc, NULL);
716    }
717 
718    if (trans->upload_ptr) {
719       fd_bo_upload(rsc->bo, trans->upload_ptr, ptrans->box.x, ptrans->box.width);
720       free(trans->upload_ptr);
721    }
722 
723    util_range_add(&rsc->b.b, &rsc->valid_buffer_range, ptrans->box.x,
724                   ptrans->box.x + ptrans->box.width);
725 
726    pipe_resource_reference(&ptrans->resource, NULL);
727 
728    assert(trans->b.staging == NULL); /* for threaded context only */
729 
730    /* Don't use pool_transfers_unsync. We are always in the driver
731     * thread. Freeing an object into a different pool is allowed.
732     */
733    slab_free(&ctx->transfer_pool, ptrans);
734 }
735 
736 static void
invalidate_resource(struct fd_resource * rsc,unsigned usage)737 invalidate_resource(struct fd_resource *rsc, unsigned usage) assert_dt
738 {
739    bool needs_flush = pending(rsc, !!(usage & PIPE_MAP_WRITE));
740    unsigned op = translate_usage(usage);
741 
742    if (needs_flush || resource_busy(rsc, op)) {
743       rebind_resource(rsc);
744       realloc_bo(rsc, fd_bo_size(rsc->bo));
745    } else {
746       util_range_set_empty(&rsc->valid_buffer_range);
747    }
748 }
749 
750 static bool
valid_range(struct fd_resource * rsc,const struct pipe_box * box)751 valid_range(struct fd_resource *rsc, const struct pipe_box *box)
752 {
753    return util_ranges_intersect(&rsc->valid_buffer_range, box->x, box->x + box->width);
754 }
755 
756 static void *
resource_transfer_map_staging(struct pipe_context * pctx,struct pipe_resource * prsc,unsigned level,unsigned usage,const struct pipe_box * box,struct fd_transfer * trans)757 resource_transfer_map_staging(struct pipe_context *pctx,
758                               struct pipe_resource *prsc,
759                               unsigned level, unsigned usage,
760                               const struct pipe_box *box,
761                               struct fd_transfer *trans)
762    in_dt
763 {
764    struct fd_context *ctx = fd_context(pctx);
765    struct fd_resource *rsc = fd_resource(prsc);
766    struct fd_resource *staging_rsc;
767 
768    assert(prsc->target != PIPE_BUFFER);
769 
770    staging_rsc = fd_alloc_staging(ctx, rsc, level, box, usage);
771    if (!staging_rsc)
772       return NULL;
773 
774    trans->staging_prsc = &staging_rsc->b.b;
775    trans->b.b.stride = fd_resource_pitch(staging_rsc, 0);
776    trans->b.b.layer_stride = fd_resource_layer_stride(staging_rsc, 0);
777    trans->staging_box = *box;
778    trans->staging_box.x = 0;
779    trans->staging_box.y = 0;
780    trans->staging_box.z = 0;
781 
782    if (usage & PIPE_MAP_READ) {
783       fd_blit_to_staging(ctx, trans);
784 
785       fd_resource_wait(ctx, staging_rsc, FD_BO_PREP_READ);
786    }
787 
788    ctx->stats.staging_uploads++;
789 
790    return fd_bo_map(staging_rsc->bo);
791 }
792 
793 static void *
resource_transfer_map_unsync(struct pipe_context * pctx,struct pipe_resource * prsc,unsigned level,unsigned usage,const struct pipe_box * box,struct fd_transfer * trans)794 resource_transfer_map_unsync(struct pipe_context *pctx,
795                              struct pipe_resource *prsc, unsigned level,
796                              unsigned usage, const struct pipe_box *box,
797                              struct fd_transfer *trans)
798 {
799    struct fd_resource *rsc = fd_resource(prsc);
800    enum pipe_format format = prsc->format;
801    uint32_t offset;
802    char *buf;
803 
804    if ((prsc->target == PIPE_BUFFER) &&
805        !(usage & (PIPE_MAP_READ | PIPE_MAP_DIRECTLY | PIPE_MAP_PERSISTENT)) &&
806        ((usage & PIPE_MAP_DISCARD_RANGE) || !valid_range(rsc, box)) &&
807        fd_bo_prefer_upload(rsc->bo, box->width)) {
808       trans->upload_ptr = malloc(box->width);
809       return trans->upload_ptr;
810    }
811 
812    buf = fd_bo_map(rsc->bo);
813 
814    /* With imported bo's allocated by something outside of mesa, when
815     * running in a VM (using virtio_gpu kernel driver) we could end up in
816     * a situation where we have a linear bo, but are unable to mmap it
817     * because it was allocated without the VIRTGPU_BLOB_FLAG_USE_MAPPABLE
818     * flag.  So we need end up needing to do a staging blit instead:
819     */
820    if (!buf)
821       return resource_transfer_map_staging(pctx, prsc, level, usage, box, trans);
822 
823    offset = box->y / util_format_get_blockheight(format) * trans->b.b.stride +
824             box->x / util_format_get_blockwidth(format) * rsc->layout.cpp +
825             fd_resource_offset(rsc, level, box->z);
826 
827    if (usage & PIPE_MAP_WRITE)
828       rsc->valid = true;
829 
830    return buf + offset;
831 }
832 
833 /**
834  * Note, with threaded_context, resource_transfer_map() is only called
835  * in driver thread, but resource_transfer_map_unsync() can be called in
836  * either driver or frontend thread.
837  */
838 static void *
resource_transfer_map(struct pipe_context * pctx,struct pipe_resource * prsc,unsigned level,unsigned usage,const struct pipe_box * box,struct fd_transfer * trans)839 resource_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
840                       unsigned level, unsigned usage,
841                       const struct pipe_box *box,
842                       struct fd_transfer *trans) in_dt
843 {
844    struct fd_context *ctx = fd_context(pctx);
845    struct fd_resource *rsc = fd_resource(prsc);
846    char *buf;
847    int ret = 0;
848 
849    tc_assert_driver_thread(ctx->tc);
850 
851    /* Strip the read flag if the buffer has been invalidated (or is freshly
852     * created). Avoids extra staging blits of undefined data on glTexSubImage of
853     * a fresh DEPTH_COMPONENT or STENCIL_INDEX texture being stored as z24s8.
854     */
855    if (!rsc->valid)
856       usage &= ~PIPE_MAP_READ;
857 
858    /* we always need a staging texture for tiled buffers:
859     *
860     * TODO we might sometimes want to *also* shadow the resource to avoid
861     * splitting a batch.. for ex, mid-frame texture uploads to a tiled
862     * texture.
863     */
864    if (rsc->layout.tile_mode) {
865       return resource_transfer_map_staging(pctx, prsc, level, usage, box, trans);
866    } else if ((usage & PIPE_MAP_READ) && !fd_bo_is_cached(rsc->bo)) {
867       perf_debug_ctx(ctx, "wc readback: prsc=%p, level=%u, usage=%x, box=%dx%d+%d,%d",
868                      prsc, level, usage, box->width, box->height, box->x, box->y);
869    }
870 
871    if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
872       invalidate_resource(rsc, usage);
873    } else {
874       unsigned op = translate_usage(usage);
875       bool needs_flush = pending(rsc, !!(usage & PIPE_MAP_WRITE));
876 
877       /* If the GPU is writing to the resource, or if it is reading from the
878        * resource and we're trying to write to it, flush the renders.
879        */
880       bool busy = needs_flush || resource_busy(rsc, op);
881 
882       /* if we need to flush/stall, see if we can make a shadow buffer
883        * to avoid this:
884        *
885        * TODO we could go down this path !reorder && !busy_for_read
886        * ie. we only *don't* want to go down this path if the blit
887        * will trigger a flush!
888        */
889       if (ctx->screen->reorder && busy && !(usage & PIPE_MAP_READ) &&
890           (usage & PIPE_MAP_DISCARD_RANGE)) {
891 
892          /* try shadowing only if it avoids a flush, otherwise staging would
893           * be better:
894           */
895          if (needs_flush && !(usage & TC_TRANSFER_MAP_NO_INVALIDATE) &&
896                fd_try_shadow_resource(ctx, rsc, level, box, DRM_FORMAT_MOD_LINEAR)) {
897             needs_flush = busy = false;
898             ctx->stats.shadow_uploads++;
899          } else {
900             struct fd_resource *staging_rsc = NULL;
901 
902             if (needs_flush) {
903                perf_debug_ctx(ctx, "flushing: %" PRSC_FMT, PRSC_ARGS(prsc));
904                flush_resource(ctx, rsc, usage);
905                needs_flush = false;
906             }
907 
908             /* in this case, we don't need to shadow the whole resource,
909              * since any draw that references the previous contents has
910              * already had rendering flushed for all tiles.  So we can
911              * use a staging buffer to do the upload.
912              */
913             if (is_renderable(prsc))
914                staging_rsc = fd_alloc_staging(ctx, rsc, level, box, usage);
915             if (staging_rsc) {
916                trans->staging_prsc = &staging_rsc->b.b;
917                trans->b.b.stride = fd_resource_pitch(staging_rsc, 0);
918                trans->b.b.layer_stride =
919                   fd_resource_layer_stride(staging_rsc, 0);
920                trans->staging_box = *box;
921                trans->staging_box.x = 0;
922                trans->staging_box.y = 0;
923                trans->staging_box.z = 0;
924                buf = fd_bo_map(staging_rsc->bo);
925 
926                ctx->stats.staging_uploads++;
927 
928                return buf;
929             }
930          }
931       }
932 
933       if (needs_flush) {
934          flush_resource(ctx, rsc, usage);
935          needs_flush = false;
936       }
937 
938       /* The GPU keeps track of how the various bo's are being used, and
939        * will wait if necessary for the proper operation to have
940        * completed.
941        */
942       if (busy) {
943          ret = fd_resource_wait(ctx, rsc, op);
944          if (ret)
945             return NULL;
946       }
947    }
948 
949    return resource_transfer_map_unsync(pctx, prsc, level, usage, box, trans);
950 }
951 
952 static unsigned
improve_transfer_map_usage(struct fd_context * ctx,struct fd_resource * rsc,unsigned usage,const struct pipe_box * box)953 improve_transfer_map_usage(struct fd_context *ctx, struct fd_resource *rsc,
954                            unsigned usage, const struct pipe_box *box)
955    /* Not *strictly* true, but the access to things that must only be in driver-
956     * thread are protected by !(usage & TC_TRANSFER_MAP_THREADED_UNSYNC):
957     */
958    in_dt
959 {
960    if (usage & TC_TRANSFER_MAP_NO_INVALIDATE) {
961       usage &= ~PIPE_MAP_DISCARD_WHOLE_RESOURCE;
962    }
963 
964    if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC)
965       usage |= PIPE_MAP_UNSYNCHRONIZED;
966 
967    if (!(usage &
968          (TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED | PIPE_MAP_UNSYNCHRONIZED))) {
969       if (ctx->in_shadow && !(usage & PIPE_MAP_READ)) {
970          usage |= PIPE_MAP_UNSYNCHRONIZED;
971       } else if ((usage & PIPE_MAP_WRITE) && (rsc->b.b.target == PIPE_BUFFER) &&
972                  !valid_range(rsc, box)) {
973          /* We are trying to write to a previously uninitialized range. No need
974           * to synchronize.
975           */
976          usage |= PIPE_MAP_UNSYNCHRONIZED;
977       }
978    }
979 
980    return usage;
981 }
982 
983 static void *
fd_resource_transfer_map(struct pipe_context * pctx,struct pipe_resource * prsc,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** pptrans)984 fd_resource_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
985                          unsigned level, unsigned usage,
986                          const struct pipe_box *box,
987                          struct pipe_transfer **pptrans)
988 {
989    struct fd_context *ctx = fd_context(pctx);
990    struct fd_resource *rsc = fd_resource(prsc);
991    struct fd_transfer *trans;
992    struct pipe_transfer *ptrans;
993 
994    DBG("prsc=%p, level=%u, usage=%x, box=%dx%d+%d,%d", prsc, level, usage,
995        box->width, box->height, box->x, box->y);
996 
997    if ((usage & PIPE_MAP_DIRECTLY) && rsc->layout.tile_mode) {
998       DBG("CANNOT MAP DIRECTLY!\n");
999       return NULL;
1000    }
1001 
1002    if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC) {
1003       ptrans = slab_zalloc(&ctx->transfer_pool_unsync);
1004    } else {
1005       ptrans = slab_zalloc(&ctx->transfer_pool);
1006    }
1007 
1008    if (!ptrans)
1009       return NULL;
1010 
1011    trans = fd_transfer(ptrans);
1012 
1013    usage = improve_transfer_map_usage(ctx, rsc, usage, box);
1014 
1015    pipe_resource_reference(&ptrans->resource, prsc);
1016    ptrans->level = level;
1017    ptrans->usage = usage;
1018    ptrans->box = *box;
1019    ptrans->stride = fd_resource_pitch(rsc, level);
1020    ptrans->layer_stride = fd_resource_layer_stride(rsc, level);
1021 
1022    void *ret;
1023    if (usage & PIPE_MAP_UNSYNCHRONIZED) {
1024       ret = resource_transfer_map_unsync(pctx, prsc, level, usage, box, trans);
1025    } else {
1026       ret = resource_transfer_map(pctx, prsc, level, usage, box, trans);
1027    }
1028 
1029    if (ret) {
1030       *pptrans = ptrans;
1031    } else {
1032       fd_resource_transfer_unmap(pctx, ptrans);
1033    }
1034 
1035    return ret;
1036 }
1037 
1038 static void
fd_resource_destroy(struct pipe_screen * pscreen,struct pipe_resource * prsc)1039 fd_resource_destroy(struct pipe_screen *pscreen, struct pipe_resource *prsc)
1040 {
1041    struct fd_screen *screen = fd_screen(prsc->screen);
1042    struct fd_resource *rsc = fd_resource(prsc);
1043 
1044    if (!rsc->is_replacement)
1045       fd_bc_invalidate_resource(rsc, true);
1046    if (rsc->bo)
1047       fd_bo_del(rsc->bo);
1048    if (rsc->lrz)
1049       fd_bo_del(rsc->lrz);
1050    if (rsc->scanout)
1051       renderonly_scanout_destroy(rsc->scanout, fd_screen(pscreen)->ro);
1052 
1053    if (prsc->target == PIPE_BUFFER)
1054       util_idalloc_mt_free(&screen->buffer_ids, rsc->b.buffer_id_unique);
1055 
1056    threaded_resource_deinit(prsc);
1057 
1058    util_range_destroy(&rsc->valid_buffer_range);
1059    simple_mtx_destroy(&rsc->lock);
1060    fd_resource_tracking_reference(&rsc->track, NULL);
1061 
1062    FREE(rsc);
1063 }
1064 
1065 static uint64_t
fd_resource_modifier(struct fd_resource * rsc)1066 fd_resource_modifier(struct fd_resource *rsc)
1067 {
1068    if (rsc->layout.ubwc_layer_size)
1069       return DRM_FORMAT_MOD_QCOM_COMPRESSED;
1070 
1071    switch (rsc->layout.tile_mode) {
1072    case 3: return DRM_FORMAT_MOD_QCOM_TILED3;
1073    case 2: return DRM_FORMAT_MOD_QCOM_TILED2;
1074    case 0: return DRM_FORMAT_MOD_LINEAR;
1075    default: return DRM_FORMAT_MOD_INVALID;
1076    }
1077 }
1078 
1079 static bool
fd_resource_get_handle(struct pipe_screen * pscreen,struct pipe_context * pctx,struct pipe_resource * prsc,struct winsys_handle * handle,unsigned usage)1080 fd_resource_get_handle(struct pipe_screen *pscreen, struct pipe_context *pctx,
1081                        struct pipe_resource *prsc, struct winsys_handle *handle,
1082                        unsigned usage)
1083    assert_dt
1084 {
1085    struct fd_resource *rsc = fd_resource(prsc);
1086 
1087    rsc->b.is_shared = true;
1088 
1089    if (prsc->target == PIPE_BUFFER)
1090       tc_buffer_disable_cpu_storage(&rsc->b.b);
1091 
1092    handle->modifier = fd_resource_modifier(rsc);
1093 
1094    if (prsc->target != PIPE_BUFFER) {
1095       struct fdl_metadata metadata = {
1096          .modifier = handle->modifier,
1097       };
1098       fd_bo_set_metadata(rsc->bo, &metadata, sizeof(metadata));
1099    }
1100 
1101    DBG("%" PRSC_FMT ", modifier=%" PRIx64, PRSC_ARGS(prsc), handle->modifier);
1102 
1103    bool ret = fd_screen_bo_get_handle(pscreen, rsc->bo, rsc->scanout,
1104                                       fd_resource_pitch(rsc, 0), handle);
1105 
1106    if (!ret && !(prsc->bind & PIPE_BIND_SHARED)) {
1107 
1108       pctx = threaded_context_unwrap_sync(pctx);
1109 
1110       struct fd_context *ctx = pctx ?
1111             fd_context(pctx) : fd_screen_aux_context_get(pscreen);
1112 
1113       /* Since gl is horrible, we can end up getting asked to export a handle
1114        * for a rsc which was not originally allocated in a way that can be
1115        * exported (for ex, sub-allocation or in the case of virtgpu we need
1116        * to tell the kernel at allocation time that the buffer can be shared)
1117        *
1118        * If we get into this scenario we can try to reallocate.
1119        */
1120 
1121       prsc->bind |= PIPE_BIND_SHARED;
1122 
1123       ret = fd_try_shadow_resource(ctx, rsc, 0, NULL, handle->modifier);
1124 
1125       if (!pctx)
1126          fd_screen_aux_context_put(pscreen);
1127 
1128       if (!ret)
1129          return false;
1130 
1131       return fd_resource_get_handle(pscreen, pctx, prsc, handle, usage);
1132    }
1133 
1134    return ret;
1135 }
1136 
1137 /* special case to resize query buf after allocated.. */
1138 void
fd_resource_resize(struct pipe_resource * prsc,uint32_t sz)1139 fd_resource_resize(struct pipe_resource *prsc, uint32_t sz)
1140 {
1141    struct fd_resource *rsc = fd_resource(prsc);
1142 
1143    assert(prsc->width0 == 0);
1144    assert(prsc->target == PIPE_BUFFER);
1145    assert(prsc->bind == PIPE_BIND_QUERY_BUFFER);
1146 
1147    prsc->width0 = sz;
1148    realloc_bo(rsc, fd_screen(prsc->screen)->setup_slices(rsc));
1149 }
1150 
1151 static void
fd_resource_layout_init(struct pipe_resource * prsc)1152 fd_resource_layout_init(struct pipe_resource *prsc)
1153 {
1154    struct fd_resource *rsc = fd_resource(prsc);
1155    struct fdl_layout *layout = &rsc->layout;
1156 
1157    layout->format = prsc->format;
1158 
1159    layout->width0 = prsc->width0;
1160    layout->height0 = prsc->height0;
1161    layout->depth0 = prsc->depth0;
1162 
1163    layout->cpp = util_format_get_blocksize(prsc->format);
1164    layout->cpp *= fd_resource_nr_samples(prsc);
1165    layout->cpp_shift = ffs(layout->cpp) - 1;
1166 }
1167 
1168 static struct fd_resource *
alloc_resource_struct(struct pipe_screen * pscreen,const struct pipe_resource * tmpl)1169 alloc_resource_struct(struct pipe_screen *pscreen,
1170                       const struct pipe_resource *tmpl)
1171 {
1172    struct fd_screen *screen = fd_screen(pscreen);
1173    struct fd_resource *rsc = CALLOC_STRUCT(fd_resource);
1174 
1175    if (!rsc)
1176       return NULL;
1177 
1178    struct pipe_resource *prsc = &rsc->b.b;
1179    *prsc = *tmpl;
1180 
1181    pipe_reference_init(&prsc->reference, 1);
1182    prsc->screen = pscreen;
1183    rsc->hash = _mesa_hash_pointer(rsc);
1184 
1185    util_range_init(&rsc->valid_buffer_range);
1186    simple_mtx_init(&rsc->lock, mtx_plain);
1187 
1188    rsc->track = CALLOC_STRUCT(fd_resource_tracking);
1189    if (!rsc->track) {
1190       free(rsc);
1191       return NULL;
1192    }
1193 
1194    pipe_reference_init(&rsc->track->reference, 1);
1195 
1196    bool allow_cpu_storage = (tmpl->target == PIPE_BUFFER) &&
1197                             (tmpl->width0 < 0x1000);
1198    threaded_resource_init(prsc, allow_cpu_storage);
1199 
1200    if (tmpl->target == PIPE_BUFFER)
1201       rsc->b.buffer_id_unique = util_idalloc_mt_alloc(&screen->buffer_ids);
1202 
1203    return rsc;
1204 }
1205 
1206 enum fd_layout_type {
1207    ERROR,
1208    LINEAR,
1209    TILED,
1210    UBWC,
1211 };
1212 
1213 static bool
has_implicit_modifier(const uint64_t * modifiers,int count)1214 has_implicit_modifier(const uint64_t *modifiers, int count)
1215 {
1216     return count == 0 ||
1217            drm_find_modifier(DRM_FORMAT_MOD_INVALID, modifiers, count);
1218 }
1219 
1220 static bool
has_explicit_modifier(const uint64_t * modifiers,int count)1221 has_explicit_modifier(const uint64_t *modifiers, int count)
1222 {
1223     for (int i = 0; i < count; i++) {
1224         if (modifiers[i] != DRM_FORMAT_MOD_INVALID)
1225             return true;
1226     }
1227     return false;
1228 }
1229 
1230 static enum fd_layout_type
get_best_layout(struct fd_screen * screen,const struct pipe_resource * tmpl,const uint64_t * modifiers,int count)1231 get_best_layout(struct fd_screen *screen,
1232                 const struct pipe_resource *tmpl, const uint64_t *modifiers,
1233                 int count)
1234 {
1235    const bool can_implicit = has_implicit_modifier(modifiers, count);
1236    const bool can_explicit = has_explicit_modifier(modifiers, count);
1237 
1238    /* First, find all the conditions which would force us to linear */
1239    if (!screen->tile_mode)
1240       return LINEAR;
1241 
1242    if (!screen->tile_mode(tmpl))
1243       return LINEAR;
1244 
1245    if (tmpl->target == PIPE_BUFFER)
1246       return LINEAR;
1247 
1248    if ((tmpl->usage == PIPE_USAGE_STAGING) &&
1249        !util_format_is_depth_or_stencil(tmpl->format))
1250       return LINEAR;
1251 
1252    if (tmpl->bind & PIPE_BIND_LINEAR) {
1253       if (tmpl->usage != PIPE_USAGE_STAGING)
1254          perf_debug("%" PRSC_FMT ": forcing linear: bind flags",
1255                     PRSC_ARGS(tmpl));
1256       return LINEAR;
1257    }
1258 
1259    if (FD_DBG(NOTILE))
1260        return LINEAR;
1261 
1262    /* Shared resources without explicit modifiers must always be linear */
1263    if (!can_explicit && (tmpl->bind & PIPE_BIND_SHARED)) {
1264       perf_debug("%" PRSC_FMT
1265                  ": forcing linear: shared resource + implicit modifiers",
1266                  PRSC_ARGS(tmpl));
1267       return LINEAR;
1268    }
1269 
1270    bool ubwc_ok = is_a6xx(screen);
1271    if (FD_DBG(NOUBWC))
1272       ubwc_ok = false;
1273 
1274    /* Disallow UBWC for front-buffer rendering.  The GPU does not atomically
1275     * write pixel and header data, nor does the display atomically read it.
1276     * The result can be visual corruption (ie. moreso than normal tearing).
1277     */
1278    if (tmpl->bind & PIPE_BIND_USE_FRONT_RENDERING)
1279       ubwc_ok = false;
1280 
1281    /* Disallow UBWC when asked not to use data dependent bandwidth compression:
1282     */
1283    if (tmpl->bind & PIPE_BIND_CONST_BW)
1284       ubwc_ok = false;
1285 
1286    if (ubwc_ok && !can_implicit &&
1287        !drm_find_modifier(DRM_FORMAT_MOD_QCOM_COMPRESSED, modifiers, count)) {
1288       perf_debug("%" PRSC_FMT
1289                  ": not using UBWC: not in acceptable modifier set",
1290                  PRSC_ARGS(tmpl));
1291       ubwc_ok = false;
1292    }
1293 
1294    if (ubwc_ok)
1295       return UBWC;
1296 
1297    if (can_implicit ||
1298        drm_find_modifier(DRM_FORMAT_MOD_QCOM_TILED3, modifiers, count))
1299       return TILED;
1300 
1301    if (!drm_find_modifier(DRM_FORMAT_MOD_LINEAR, modifiers, count)) {
1302       perf_debug("%" PRSC_FMT ": need linear but not in modifier set",
1303                  PRSC_ARGS(tmpl));
1304       return ERROR;
1305    }
1306 
1307    perf_debug("%" PRSC_FMT ": not using tiling: explicit modifiers and no UBWC",
1308               PRSC_ARGS(tmpl));
1309    return LINEAR;
1310 }
1311 
1312 /**
1313  * Helper that allocates a resource and resolves its layout (but doesn't
1314  * allocate its bo).
1315  *
1316  * It returns a pipe_resource (as fd_resource_create_with_modifiers()
1317  * would do), and also bo's minimum required size as an output argument.
1318  */
1319 static struct pipe_resource *
fd_resource_allocate_and_resolve(struct pipe_screen * pscreen,const struct pipe_resource * tmpl,const uint64_t * modifiers,int count,uint32_t * psize)1320 fd_resource_allocate_and_resolve(struct pipe_screen *pscreen,
1321                                  const struct pipe_resource *tmpl,
1322                                  const uint64_t *modifiers, int count,
1323                                  uint32_t *psize)
1324 {
1325    struct fd_screen *screen = fd_screen(pscreen);
1326    struct fd_resource *rsc;
1327    struct pipe_resource *prsc;
1328    enum pipe_format format = tmpl->format;
1329    uint32_t size;
1330 
1331    rsc = alloc_resource_struct(pscreen, tmpl);
1332    if (!rsc)
1333       return NULL;
1334 
1335    prsc = &rsc->b.b;
1336 
1337    /* Clover creates buffers with PIPE_FORMAT_NONE: */
1338    if ((prsc->target == PIPE_BUFFER) && (format == PIPE_FORMAT_NONE))
1339       format = prsc->format = PIPE_FORMAT_R8_UNORM;
1340 
1341    DBG("%" PRSC_FMT, PRSC_ARGS(prsc));
1342 
1343    if (tmpl->bind & PIPE_BIND_SHARED)
1344       rsc->b.is_shared = true;
1345 
1346    fd_resource_layout_init(prsc);
1347 
1348    enum fd_layout_type layout =
1349       get_best_layout(screen, tmpl, modifiers, count);
1350    if (layout == ERROR) {
1351       free(prsc);
1352       return NULL;
1353    }
1354 
1355    if (layout >= TILED)
1356       rsc->layout.tile_mode = screen->tile_mode(prsc);
1357    if (layout == UBWC)
1358       rsc->layout.ubwc = true;
1359 
1360    rsc->internal_format = format;
1361 
1362    if (prsc->target == PIPE_BUFFER) {
1363       assert(prsc->format == PIPE_FORMAT_R8_UNORM);
1364       size = prsc->width0;
1365       fdl_layout_buffer(&rsc->layout, size);
1366    } else {
1367       size = screen->setup_slices(rsc);
1368    }
1369 
1370    /* special case for hw-query buffer, which we need to allocate before we
1371     * know the size:
1372     */
1373    if (size == 0) {
1374       /* note, semi-intention == instead of & */
1375       assert(prsc->bind == PIPE_BIND_QUERY_BUFFER);
1376       *psize = 0;
1377       return prsc;
1378    }
1379 
1380    /* Set the layer size if the (non-a6xx) backend hasn't done so. */
1381    if (rsc->layout.layer_first && !rsc->layout.layer_size) {
1382       rsc->layout.layer_size = align(size, 4096);
1383       size = rsc->layout.layer_size * prsc->array_size;
1384    }
1385 
1386    if (FD_DBG(LAYOUT))
1387       fdl_dump_layout(&rsc->layout);
1388 
1389    /* Hand out the resolved size. */
1390    if (psize)
1391       *psize = size;
1392 
1393    return prsc;
1394 }
1395 
1396 /**
1397  * Create a new texture object, using the given template info.
1398  */
1399 static struct pipe_resource *
fd_resource_create_with_modifiers(struct pipe_screen * pscreen,const struct pipe_resource * tmpl,const uint64_t * modifiers,int count)1400 fd_resource_create_with_modifiers(struct pipe_screen *pscreen,
1401                                   const struct pipe_resource *tmpl,
1402                                   const uint64_t *modifiers, int count)
1403 {
1404    struct fd_screen *screen = fd_screen(pscreen);
1405    struct fd_resource *rsc;
1406    struct pipe_resource *prsc;
1407    uint32_t size;
1408 
1409    /* when using kmsro, scanout buffers are allocated on the display device
1410     * create_with_modifiers() doesn't give us usage flags, so we have to
1411     * assume that all calls with modifiers are scanout-possible
1412     */
1413    if (screen->ro &&
1414        ((tmpl->bind & PIPE_BIND_SCANOUT) ||
1415         has_explicit_modifier(modifiers, count))) {
1416       struct pipe_resource scanout_templat = *tmpl;
1417       struct renderonly_scanout *scanout;
1418       struct winsys_handle handle;
1419 
1420       /* note: alignment is wrong for a6xx */
1421       scanout_templat.width0 = align(tmpl->width0, screen->info->gmem_align_w);
1422 
1423       scanout =
1424          renderonly_scanout_for_resource(&scanout_templat, screen->ro, &handle);
1425       if (!scanout)
1426          return NULL;
1427 
1428       renderonly_scanout_destroy(scanout, screen->ro);
1429 
1430       assert(handle.type == WINSYS_HANDLE_TYPE_FD);
1431       rsc = fd_resource(pscreen->resource_from_handle(
1432          pscreen, tmpl, &handle, PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE));
1433       close(handle.handle);
1434       if (!rsc)
1435          return NULL;
1436 
1437       return &rsc->b.b;
1438    }
1439 
1440    prsc =
1441       fd_resource_allocate_and_resolve(pscreen, tmpl, modifiers, count, &size);
1442    if (!prsc)
1443       return NULL;
1444    rsc = fd_resource(prsc);
1445 
1446    realloc_bo(rsc, size);
1447    if (!rsc->bo)
1448       goto fail;
1449 
1450    return prsc;
1451 fail:
1452    fd_resource_destroy(pscreen, prsc);
1453    return NULL;
1454 }
1455 
1456 static struct pipe_resource *
fd_resource_create(struct pipe_screen * pscreen,const struct pipe_resource * tmpl)1457 fd_resource_create(struct pipe_screen *pscreen,
1458                    const struct pipe_resource *tmpl)
1459 {
1460    const uint64_t mod = DRM_FORMAT_MOD_INVALID;
1461    return fd_resource_create_with_modifiers(pscreen, tmpl, &mod, 1);
1462 }
1463 
1464 /**
1465  * Create a texture from a winsys_handle. The handle is often created in
1466  * another process by first creating a pipe texture and then calling
1467  * resource_get_handle.
1468  */
1469 static struct pipe_resource *
fd_resource_from_handle(struct pipe_screen * pscreen,const struct pipe_resource * tmpl,struct winsys_handle * handle,unsigned usage)1470 fd_resource_from_handle(struct pipe_screen *pscreen,
1471                         const struct pipe_resource *tmpl,
1472                         struct winsys_handle *handle, unsigned usage)
1473 {
1474    struct fd_screen *screen = fd_screen(pscreen);
1475    struct fd_resource *rsc = alloc_resource_struct(pscreen, tmpl);
1476 
1477    if (!rsc)
1478       return NULL;
1479 
1480    if (tmpl->target == PIPE_BUFFER)
1481       tc_buffer_disable_cpu_storage(&rsc->b.b);
1482 
1483    struct fdl_slice *slice = fd_resource_slice(rsc, 0);
1484    struct pipe_resource *prsc = &rsc->b.b;
1485 
1486    DBG("%" PRSC_FMT ", modifier=%" PRIx64, PRSC_ARGS(prsc), handle->modifier);
1487 
1488    rsc->b.is_shared = true;
1489 
1490    fd_resource_layout_init(prsc);
1491 
1492    struct fd_bo *bo = fd_screen_bo_from_handle(pscreen, handle);
1493    if (!bo)
1494       goto fail;
1495 
1496    fd_resource_set_bo(rsc, bo);
1497 
1498    rsc->internal_format = tmpl->format;
1499    rsc->layout.layer_first = true;
1500    rsc->layout.pitch0 = handle->stride;
1501    slice->offset = handle->offset;
1502    slice->size0 = handle->stride * prsc->height0;
1503 
1504    /* use a pitchalign of gmem_align_w pixels, because GMEM resolve for
1505     * lower alignments is not implemented (but possible for a6xx at least)
1506     *
1507     * for UBWC-enabled resources, layout_resource_for_modifier will further
1508     * validate the pitch and set the right pitchalign
1509     */
1510    rsc->layout.pitchalign =
1511       fdl_cpp_shift(&rsc->layout) + util_logbase2(screen->info->gmem_align_w);
1512 
1513    /* apply the minimum pitchalign (note: actually 4 for a3xx but doesn't
1514     * matter) */
1515    if (is_a6xx(screen) || is_a5xx(screen))
1516       rsc->layout.pitchalign = MAX2(rsc->layout.pitchalign, 6);
1517    else
1518       rsc->layout.pitchalign = MAX2(rsc->layout.pitchalign, 5);
1519 
1520    if (rsc->layout.pitch0 < (prsc->width0 * rsc->layout.cpp) ||
1521        fd_resource_pitch(rsc, 0) != rsc->layout.pitch0)
1522       goto fail;
1523 
1524    assert(rsc->layout.cpp);
1525 
1526    if (screen->layout_resource_for_modifier(rsc, handle->modifier) < 0)
1527       goto fail;
1528 
1529    if (screen->ro) {
1530       rsc->scanout =
1531          renderonly_create_gpu_import_for_resource(prsc, screen->ro, NULL);
1532       /* failure is expected in some cases.. */
1533    }
1534 
1535    rsc->valid = true;
1536 
1537    return prsc;
1538 
1539 fail:
1540    fd_resource_destroy(pscreen, prsc);
1541    return NULL;
1542 }
1543 
1544 bool
fd_render_condition_check(struct pipe_context * pctx)1545 fd_render_condition_check(struct pipe_context *pctx)
1546 {
1547    struct fd_context *ctx = fd_context(pctx);
1548 
1549    if (!ctx->cond_query)
1550       return true;
1551 
1552    perf_debug("Implementing conditional rendering using a CPU read instaed of HW conditional rendering.");
1553 
1554    union pipe_query_result res = {0};
1555    bool wait = ctx->cond_mode != PIPE_RENDER_COND_NO_WAIT &&
1556                ctx->cond_mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT;
1557 
1558    if (pctx->get_query_result(pctx, ctx->cond_query, wait, &res))
1559       return (bool)res.u64 != ctx->cond_cond;
1560 
1561    return true;
1562 }
1563 
1564 static void
fd_invalidate_resource(struct pipe_context * pctx,struct pipe_resource * prsc)1565 fd_invalidate_resource(struct pipe_context *pctx,
1566                        struct pipe_resource *prsc) in_dt
1567 {
1568    struct fd_context *ctx = fd_context(pctx);
1569    struct fd_resource *rsc = fd_resource(prsc);
1570 
1571    if (prsc->target == PIPE_BUFFER) {
1572       /* Handle the glInvalidateBufferData() case:
1573        */
1574       invalidate_resource(rsc, PIPE_MAP_READ | PIPE_MAP_WRITE);
1575    } else if (rsc->track->write_batch) {
1576       /* Handle the glInvalidateFramebuffer() case, telling us that
1577        * we can skip resolve.
1578        */
1579 
1580       struct fd_batch *batch = rsc->track->write_batch;
1581       struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1582 
1583       if (pfb->zsbuf && pfb->zsbuf->texture == prsc) {
1584          batch->resolve &= ~(FD_BUFFER_DEPTH | FD_BUFFER_STENCIL);
1585          fd_dirty_resource(ctx, prsc, FD_DIRTY_ZSA, true);
1586       }
1587 
1588       for (unsigned i = 0; i < pfb->nr_cbufs; i++) {
1589          if (pfb->cbufs[i] && pfb->cbufs[i]->texture == prsc) {
1590             batch->resolve &= ~(PIPE_CLEAR_COLOR0 << i);
1591             fd_dirty_resource(ctx, prsc, FD_DIRTY_FRAMEBUFFER, true);
1592          }
1593       }
1594    }
1595 
1596    rsc->valid = false;
1597 }
1598 
1599 static enum pipe_format
fd_resource_get_internal_format(struct pipe_resource * prsc)1600 fd_resource_get_internal_format(struct pipe_resource *prsc)
1601 {
1602    return fd_resource(prsc)->internal_format;
1603 }
1604 
1605 static void
fd_resource_set_stencil(struct pipe_resource * prsc,struct pipe_resource * stencil)1606 fd_resource_set_stencil(struct pipe_resource *prsc,
1607                         struct pipe_resource *stencil)
1608 {
1609    fd_resource(prsc)->stencil = fd_resource(stencil);
1610 }
1611 
1612 static struct pipe_resource *
fd_resource_get_stencil(struct pipe_resource * prsc)1613 fd_resource_get_stencil(struct pipe_resource *prsc)
1614 {
1615    struct fd_resource *rsc = fd_resource(prsc);
1616    if (rsc->stencil)
1617       return &rsc->stencil->b.b;
1618    return NULL;
1619 }
1620 
1621 static const struct u_transfer_vtbl transfer_vtbl = {
1622    .resource_create = fd_resource_create,
1623    .resource_destroy = fd_resource_destroy,
1624    .transfer_map = fd_resource_transfer_map,
1625    .transfer_flush_region = fd_resource_transfer_flush_region,
1626    .transfer_unmap = fd_resource_transfer_unmap,
1627    .get_internal_format = fd_resource_get_internal_format,
1628    .set_stencil = fd_resource_set_stencil,
1629    .get_stencil = fd_resource_get_stencil,
1630 };
1631 
1632 static int
fd_layout_resource_for_modifier(struct fd_resource * rsc,uint64_t modifier)1633 fd_layout_resource_for_modifier(struct fd_resource *rsc, uint64_t modifier)
1634 {
1635    switch (modifier) {
1636    case DRM_FORMAT_MOD_LINEAR:
1637       /* The dri gallium frontend will pass DRM_FORMAT_MOD_INVALID to us
1638        * when it's called through any of the non-modifier BO create entry
1639        * points.  Other drivers will determine tiling from the kernel or
1640        * other legacy backchannels, but for freedreno it just means
1641        * LINEAR. */
1642    case DRM_FORMAT_MOD_INVALID:
1643       return 0;
1644    default:
1645       return -1;
1646    }
1647 }
1648 
1649 static struct pipe_resource *
fd_resource_from_memobj(struct pipe_screen * pscreen,const struct pipe_resource * tmpl,struct pipe_memory_object * pmemobj,uint64_t offset)1650 fd_resource_from_memobj(struct pipe_screen *pscreen,
1651                         const struct pipe_resource *tmpl,
1652                         struct pipe_memory_object *pmemobj, uint64_t offset)
1653 {
1654    struct fd_screen *screen = fd_screen(pscreen);
1655    struct fd_memory_object *memobj = fd_memory_object(pmemobj);
1656    struct pipe_resource *prsc;
1657    struct fd_resource *rsc;
1658    struct fdl_metadata metadata;
1659    uint32_t size;
1660 
1661    assert(memobj->bo);
1662    assert(offset == 0);
1663 
1664    /* We shouldn't get a scanout buffer here. */
1665    assert(!(tmpl->bind & PIPE_BIND_SCANOUT));
1666 
1667    uint64_t modifiers = DRM_FORMAT_MOD_INVALID;
1668    if (pmemobj->dedicated &&
1669        !fd_bo_get_metadata(memobj->bo, &metadata, sizeof(metadata))) {
1670       modifiers = metadata.modifier;
1671    } else if (tmpl->bind & PIPE_BIND_LINEAR) {
1672       modifiers = DRM_FORMAT_MOD_LINEAR;
1673    } else if (is_a6xx(screen) && tmpl->width0 >= FDL_MIN_UBWC_WIDTH) {
1674       modifiers = DRM_FORMAT_MOD_QCOM_COMPRESSED;
1675    }
1676 
1677    /* Allocate new pipe resource. */
1678    prsc = fd_resource_allocate_and_resolve(pscreen, tmpl, &modifiers, 1, &size);
1679    if (!prsc)
1680       return NULL;
1681    rsc = fd_resource(prsc);
1682    rsc->b.is_shared = true;
1683 
1684    /* bo's size has to be large enough, otherwise cleanup resource and fail
1685     * gracefully.
1686     */
1687    if (fd_bo_size(memobj->bo) < size) {
1688       fd_resource_destroy(pscreen, prsc);
1689       return NULL;
1690    }
1691 
1692    /* Share the bo with the memory object. */
1693    fd_resource_set_bo(rsc, fd_bo_ref(memobj->bo));
1694 
1695    return prsc;
1696 }
1697 
1698 static struct pipe_memory_object *
fd_memobj_create_from_handle(struct pipe_screen * pscreen,struct winsys_handle * whandle,bool dedicated)1699 fd_memobj_create_from_handle(struct pipe_screen *pscreen,
1700                              struct winsys_handle *whandle, bool dedicated)
1701 {
1702    struct fd_memory_object *memobj = CALLOC_STRUCT(fd_memory_object);
1703    if (!memobj)
1704       return NULL;
1705 
1706    struct fd_bo *bo = fd_screen_bo_from_handle(pscreen, whandle);
1707    if (!bo) {
1708       free(memobj);
1709       return NULL;
1710    }
1711 
1712    memobj->b.dedicated = dedicated;
1713    memobj->bo = bo;
1714 
1715    return &memobj->b;
1716 }
1717 
1718 static void
fd_memobj_destroy(struct pipe_screen * pscreen,struct pipe_memory_object * pmemobj)1719 fd_memobj_destroy(struct pipe_screen *pscreen,
1720                   struct pipe_memory_object *pmemobj)
1721 {
1722    struct fd_memory_object *memobj = fd_memory_object(pmemobj);
1723 
1724    assert(memobj->bo);
1725    fd_bo_del(memobj->bo);
1726 
1727    free(pmemobj);
1728 }
1729 
1730 void
fd_resource_screen_init(struct pipe_screen * pscreen)1731 fd_resource_screen_init(struct pipe_screen *pscreen)
1732 {
1733    struct fd_screen *screen = fd_screen(pscreen);
1734 
1735    pscreen->resource_create = u_transfer_helper_resource_create;
1736    /* NOTE: u_transfer_helper does not yet support the _with_modifiers()
1737     * variant:
1738     */
1739    pscreen->resource_create_with_modifiers = fd_resource_create_with_modifiers;
1740    pscreen->resource_from_handle = fd_resource_from_handle;
1741    pscreen->resource_get_handle = fd_resource_get_handle;
1742    pscreen->resource_destroy = u_transfer_helper_resource_destroy;
1743 
1744    pscreen->transfer_helper =
1745       u_transfer_helper_create(&transfer_vtbl,
1746                                U_TRANSFER_HELPER_SEPARATE_Z32S8 |
1747                                U_TRANSFER_HELPER_MSAA_MAP);
1748 
1749    if (!screen->layout_resource_for_modifier)
1750       screen->layout_resource_for_modifier = fd_layout_resource_for_modifier;
1751 
1752    /* GL_EXT_memory_object */
1753    pscreen->memobj_create_from_handle = fd_memobj_create_from_handle;
1754    pscreen->memobj_destroy = fd_memobj_destroy;
1755    pscreen->resource_from_memobj = fd_resource_from_memobj;
1756 }
1757 
1758 static void
fd_blit_pipe(struct pipe_context * pctx,const struct pipe_blit_info * blit_info)1759 fd_blit_pipe(struct pipe_context *pctx,
1760              const struct pipe_blit_info *blit_info) in_dt
1761 {
1762    /* wrap fd_blit to return void */
1763    fd_blit(pctx, blit_info);
1764 }
1765 
1766 void
fd_resource_context_init(struct pipe_context * pctx)1767 fd_resource_context_init(struct pipe_context *pctx)
1768 {
1769    pctx->buffer_map = u_transfer_helper_transfer_map;
1770    pctx->texture_map = u_transfer_helper_transfer_map;
1771    pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
1772    pctx->buffer_unmap = u_transfer_helper_transfer_unmap;
1773    pctx->texture_unmap = u_transfer_helper_transfer_unmap;
1774    pctx->buffer_subdata = u_default_buffer_subdata;
1775    pctx->texture_subdata = u_default_texture_subdata;
1776    pctx->create_surface = fd_create_surface;
1777    pctx->surface_destroy = fd_surface_destroy;
1778    pctx->resource_copy_region = fd_resource_copy_region;
1779    pctx->blit = fd_blit_pipe;
1780    pctx->flush_resource = fd_flush_resource;
1781    pctx->invalidate_resource = fd_invalidate_resource;
1782    pctx->get_sample_position = u_default_get_sample_position;
1783 }
1784