xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/virgl/virgl_resource.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2014, 2015 Red Hat.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 #include "util/format/u_format.h"
24 #include "util/u_inlines.h"
25 #include "util/u_memory.h"
26 #include "util/u_upload_mgr.h"
27 #include "virgl_context.h"
28 #include "virgl_resource.h"
29 #include "virgl_screen.h"
30 #include "virgl_staging_mgr.h"
31 #include "virgl_encode.h" // for declaration of virgl_encode_copy_transfer
32 
33 /* A (soft) limit for the amount of memory we want to allow for queued staging
34  * resources. This is used to decide when we should force a flush, in order to
35  * avoid exhausting virtio-gpu memory.
36  */
37 #define VIRGL_QUEUED_STAGING_RES_SIZE_LIMIT (128 * 1024 * 1024)
38 
39 enum virgl_transfer_map_type {
40    VIRGL_TRANSFER_MAP_ERROR = -1,
41    VIRGL_TRANSFER_MAP_HW_RES,
42 
43    /* Map a range of a staging buffer. The updated contents should be transferred
44     * with a copy transfer.
45     */
46    VIRGL_TRANSFER_MAP_WRITE_TO_STAGING,
47 
48    /* Reallocate the underlying virgl_hw_res. */
49    VIRGL_TRANSFER_MAP_REALLOC,
50 
51    /* Map type for read of texture data from host to guest
52     * using staging buffer. */
53    VIRGL_TRANSFER_MAP_READ_FROM_STAGING,
54    /* Map type for write of texture data to host using staging
55     * buffer that needs a readback first. */
56    VIRGL_TRANSFER_MAP_WRITE_TO_STAGING_WITH_READBACK,
57 };
58 
59 /* Check if copy transfer from host can be used:
60  *  1. if resource is a texture,
61  *  2. if renderer supports copy transfer from host,
62  *  3. the host is not GLES (no fake FP64)
63  *  4. the format can be rendered to and the format is a readback format
64  *     or the format is a scanout format and we can read back from scanout
65  */
virgl_can_readback_from_rendertarget(struct virgl_screen * vs,struct virgl_resource * res)66 static bool virgl_can_readback_from_rendertarget(struct virgl_screen *vs,
67                                                  struct virgl_resource *res)
68 {
69    return res->b.nr_samples < 2 &&
70          vs->base.is_format_supported(&vs->base, res->b.format, res->b.target,
71                                       res->b.nr_samples, res->b.nr_samples,
72                                       PIPE_BIND_RENDER_TARGET);
73 }
74 
virgl_can_readback_from_scanout(struct virgl_screen * vs,struct virgl_resource * res,int bind)75 static bool virgl_can_readback_from_scanout(struct virgl_screen *vs,
76                                             struct virgl_resource *res,
77                                             int bind)
78 {
79    return (vs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_SCANOUT_USES_GBM) &&
80          (bind & VIRGL_BIND_SCANOUT) &&
81          virgl_has_scanout_format(vs, res->b.format, true);
82 }
83 
virgl_can_use_staging(struct virgl_screen * vs,struct virgl_resource * res)84 static bool virgl_can_use_staging(struct virgl_screen *vs,
85                                   struct virgl_resource *res)
86 {
87    return (vs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_COPY_TRANSFER_BOTH_DIRECTIONS) &&
88          (res->b.target != PIPE_BUFFER);
89 }
90 
is_stencil_array(struct virgl_resource * res)91 static bool is_stencil_array(struct virgl_resource *res)
92 {
93    const struct util_format_description *descr = util_format_description(res->b.format);
94    return (res->b.array_size > 1 || res->b.depth0 > 1) && util_format_has_stencil(descr);
95 }
96 
virgl_can_copy_transfer_from_host(struct virgl_screen * vs,struct virgl_resource * res,int bind)97 static bool virgl_can_copy_transfer_from_host(struct virgl_screen *vs,
98                                               struct virgl_resource *res,
99                                               int bind)
100 {
101    return virgl_can_use_staging(vs, res) &&
102          !is_stencil_array(res) &&
103          !(bind & VIRGL_BIND_SHARED) &&
104          virgl_has_readback_format(&vs->base, pipe_to_virgl_format(res->b.format), false) &&
105          ((!(vs->caps.caps.v2.capability_bits & VIRGL_CAP_HOST_IS_GLES)) ||
106           virgl_can_readback_from_rendertarget(vs, res) ||
107           virgl_can_readback_from_scanout(vs, res, bind));
108 }
109 
110 /* We need to flush to properly sync the transfer with the current cmdbuf.
111  * But there are cases where the flushing can be skipped:
112  *
113  *  - synchronization is disabled
114  *  - the resource is not referenced by the current cmdbuf
115  */
virgl_res_needs_flush(struct virgl_context * vctx,struct virgl_transfer * trans)116 static bool virgl_res_needs_flush(struct virgl_context *vctx,
117                                   struct virgl_transfer *trans)
118 {
119    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
120    struct virgl_resource *res = virgl_resource(trans->base.resource);
121 
122    if (trans->base.usage & PIPE_MAP_UNSYNCHRONIZED)
123       return false;
124 
125    if (!vws->res_is_referenced(vws, vctx->cbuf, res->hw_res))
126       return false;
127 
128    return true;
129 }
130 
131 /* We need to read back from the host storage to make sure the guest storage
132  * is up-to-date.  But there are cases where the readback can be skipped:
133  *
134  *  - the content can be discarded
135  *  - the host storage is read-only
136  *
137  * Note that PIPE_MAP_WRITE without discard bits requires readback.
138  * PIPE_MAP_READ becomes irrelevant.  PIPE_MAP_UNSYNCHRONIZED and
139  * PIPE_MAP_FLUSH_EXPLICIT are also irrelevant.
140  */
virgl_res_needs_readback(struct virgl_context * vctx,struct virgl_resource * res,unsigned usage,unsigned level)141 static bool virgl_res_needs_readback(struct virgl_context *vctx,
142                                      struct virgl_resource *res,
143                                      unsigned usage, unsigned level)
144 {
145    if (usage & (PIPE_MAP_DISCARD_RANGE |
146                 PIPE_MAP_DISCARD_WHOLE_RESOURCE))
147       return false;
148 
149    if (res->clean_mask & (1 << level))
150       return false;
151 
152    return true;
153 }
154 
155 static enum virgl_transfer_map_type
virgl_resource_transfer_prepare(struct virgl_context * vctx,struct virgl_transfer * xfer,bool is_blob)156 virgl_resource_transfer_prepare(struct virgl_context *vctx,
157                                 struct virgl_transfer *xfer,
158                                 bool is_blob)
159 {
160    struct virgl_screen *vs = virgl_screen(vctx->base.screen);
161    struct virgl_winsys *vws = vs->vws;
162    struct virgl_resource *res = virgl_resource(xfer->base.resource);
163    enum virgl_transfer_map_type map_type = VIRGL_TRANSFER_MAP_HW_RES;
164    bool flush;
165    bool readback;
166    bool wait;
167 
168    /* there is no way to map the host storage currently */
169    if (xfer->base.usage & PIPE_MAP_DIRECTLY)
170       return VIRGL_TRANSFER_MAP_ERROR;
171 
172    /* We break the logic down into four steps
173     *
174     * step 1: determine the required operations independently
175     * step 2: look for chances to skip the operations
176     * step 3: resolve dependencies between the operations
177     * step 4: execute the operations
178     */
179 
180    flush = virgl_res_needs_flush(vctx, xfer);
181    readback = virgl_res_needs_readback(vctx, res, xfer->base.usage,
182                                        xfer->base.level);
183    /* We need to wait for all cmdbufs, current or previous, that access the
184     * resource to finish unless synchronization is disabled.
185     */
186    wait = !(xfer->base.usage & PIPE_MAP_UNSYNCHRONIZED);
187 
188    /* When the transfer range consists of only uninitialized data, we can
189     * assume the GPU is not accessing the range and readback is unnecessary.
190     * We can proceed as if PIPE_MAP_UNSYNCHRONIZED and
191     * PIPE_MAP_DISCARD_RANGE are set.
192     */
193    if (res->b.target == PIPE_BUFFER &&
194        !util_ranges_intersect(&res->valid_buffer_range, xfer->base.box.x,
195                               xfer->base.box.x + xfer->base.box.width) &&
196        likely(!(virgl_debug & VIRGL_DEBUG_XFER))) {
197       flush = false;
198       readback = false;
199       wait = false;
200    }
201 
202    /* When the resource is busy but its content can be discarded, we can
203     * replace its HW resource or use a staging buffer to avoid waiting.
204     */
205    if (wait && !is_blob &&
206        (xfer->base.usage & (PIPE_MAP_DISCARD_RANGE |
207                             PIPE_MAP_DISCARD_WHOLE_RESOURCE)) &&
208        likely(!(virgl_debug & VIRGL_DEBUG_XFER))) {
209       bool can_realloc = false;
210 
211       /* A PIPE_MAP_DISCARD_WHOLE_RESOURCE transfer may be followed by
212        * PIPE_MAP_UNSYNCHRONIZED transfers to non-overlapping regions.
213        * It cannot be treated as a PIPE_MAP_DISCARD_RANGE transfer,
214        * otherwise those following unsynchronized transfers may overwrite
215        * valid data.
216        */
217       if (xfer->base.usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
218          can_realloc = virgl_can_rebind_resource(vctx, &res->b);
219       }
220 
221       /* discard implies no readback */
222       assert(!readback);
223 
224       if (can_realloc || vctx->supports_staging) {
225          /* Both map types have some costs.  Do them only when the resource is
226           * (or will be) busy for real.  Otherwise, set wait to false.
227           */
228          wait = (flush || vws->resource_is_busy(vws, res->hw_res));
229          if (wait) {
230             map_type = (can_realloc) ?
231                VIRGL_TRANSFER_MAP_REALLOC :
232                VIRGL_TRANSFER_MAP_WRITE_TO_STAGING;
233 
234             wait = false;
235 
236             /* There is normally no need to flush either, unless the amount of
237              * memory we are using for staging resources starts growing, in
238              * which case we want to flush to keep our memory consumption in
239              * check.
240              */
241             flush = (vctx->queued_staging_res_size >
242                VIRGL_QUEUED_STAGING_RES_SIZE_LIMIT);
243          }
244       }
245    }
246 
247    /* readback has some implications */
248    if (readback) {
249       /* If we are performing readback for textures and renderer supports
250        * copy_transfer_from_host, then we can return here with proper map.
251        */
252       if (res->use_staging) {
253          if (xfer->base.usage & PIPE_MAP_READ)
254             return VIRGL_TRANSFER_MAP_READ_FROM_STAGING;
255          else
256             return VIRGL_TRANSFER_MAP_WRITE_TO_STAGING_WITH_READBACK;
257       }
258 
259       /* When the transfer queue has pending writes to this transfer's region,
260        * we have to flush before readback.
261        */
262       if (!flush && virgl_transfer_queue_is_queued(&vctx->queue, xfer))
263          flush = true;
264    }
265 
266    if (flush)
267       vctx->base.flush(&vctx->base, NULL, 0);
268 
269    /* If we are not allowed to block, and we know that we will have to wait,
270     * either because the resource is busy, or because it will become busy due
271     * to a readback, return early to avoid performing an incomplete
272     * transfer_get. Such an incomplete transfer_get may finish at any time,
273     * during which another unsynchronized map could write to the resource
274     * contents, leaving the contents in an undefined state.
275     */
276    if ((xfer->base.usage & PIPE_MAP_DONTBLOCK) &&
277        (readback || (wait && vws->resource_is_busy(vws, res->hw_res))))
278       return VIRGL_TRANSFER_MAP_ERROR;
279 
280    if (readback) {
281       /* Readback is yet another command and is transparent to the state
282        * trackers.  It should be waited for in all cases, including when
283        * PIPE_MAP_UNSYNCHRONIZED is set.
284        */
285       if (!is_blob) {
286          vws->resource_wait(vws, res->hw_res);
287          vws->transfer_get(vws, res->hw_res, &xfer->base.box, xfer->base.stride,
288                            xfer->l_stride, xfer->offset, xfer->base.level);
289       }
290       /* transfer_get puts the resource into a maybe_busy state, so we will have
291        * to wait another time if we want to use that resource. */
292       wait = true;
293    }
294 
295    if (wait)
296       vws->resource_wait(vws, res->hw_res);
297 
298    if (res->use_staging) {
299       map_type = VIRGL_TRANSFER_MAP_WRITE_TO_STAGING;
300    }
301 
302    return map_type;
303 }
304 
305 /* Calculate the minimum size of the memory required to service a resource
306  * transfer map. Also return the stride and layer_stride for the corresponding
307  * layout.
308  */
309 static unsigned
virgl_transfer_map_size(struct virgl_transfer * vtransfer,unsigned * out_stride,uintptr_t * out_layer_stride)310 virgl_transfer_map_size(struct virgl_transfer *vtransfer,
311                         unsigned *out_stride,
312                         uintptr_t *out_layer_stride)
313 {
314    struct pipe_resource *pres = vtransfer->base.resource;
315    struct pipe_box *box = &vtransfer->base.box;
316    unsigned stride;
317    uintptr_t layer_stride;
318    unsigned size;
319 
320    assert(out_stride);
321    assert(out_layer_stride);
322 
323    stride = util_format_get_stride(pres->format, box->width);
324    layer_stride = util_format_get_2d_size(pres->format, stride, box->height);
325 
326    if (pres->target == PIPE_TEXTURE_CUBE ||
327        pres->target == PIPE_TEXTURE_CUBE_ARRAY ||
328        pres->target == PIPE_TEXTURE_3D ||
329        pres->target == PIPE_TEXTURE_2D_ARRAY) {
330       size = box->depth * layer_stride;
331    } else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {
332       size = box->depth * stride;
333    } else {
334       size = layer_stride;
335    }
336 
337    *out_stride = stride;
338    *out_layer_stride = layer_stride;
339 
340    return size;
341 }
342 
343 /* Maps a region from staging to service the transfer. */
344 static void *
virgl_staging_map(struct virgl_context * vctx,struct virgl_transfer * vtransfer)345 virgl_staging_map(struct virgl_context *vctx,
346                   struct virgl_transfer *vtransfer)
347 {
348    struct virgl_resource *vres = virgl_resource(vtransfer->base.resource);
349    unsigned size;
350    unsigned align_offset;
351    unsigned stride;
352    uintptr_t layer_stride;
353    uint8_t *map_addr;
354    bool alloc_succeeded;
355 
356    assert(vctx->supports_staging);
357 
358    size = virgl_transfer_map_size(vtransfer, &stride, &layer_stride);
359 
360    /* For buffers we need to ensure that the start of the buffer would be
361     * aligned to VIRGL_MAP_BUFFER_ALIGNMENT, even if our transfer doesn't
362     * actually include it. To achieve this we may need to allocate a slightly
363     * larger range from the upload buffer, and later update the uploader
364     * resource offset and map address to point to the requested x coordinate
365     * within that range.
366     *
367     * 0       A       2A      3A
368     * |-------|---bbbb|bbbbb--|
369     *             |--------|    ==> size
370     *         |---|             ==> align_offset
371     *         |------------|    ==> allocation of size + align_offset
372     */
373    align_offset = vres->b.target == PIPE_BUFFER ?
374                   vtransfer->base.box.x % VIRGL_MAP_BUFFER_ALIGNMENT :
375                   0;
376 
377    alloc_succeeded =
378       virgl_staging_alloc(&vctx->staging, size + align_offset,
379                           VIRGL_MAP_BUFFER_ALIGNMENT,
380                           &vtransfer->copy_src_offset,
381                           &vtransfer->copy_src_hw_res,
382                           &map_addr);
383    if (alloc_succeeded) {
384       /* Update source offset and address to point to the requested x coordinate
385        * if we have an align_offset (see above for more information). */
386       vtransfer->copy_src_offset += align_offset;
387       map_addr += align_offset;
388 
389       /* Mark as dirty, since we are updating the host side resource
390        * without going through the corresponding guest side resource, and
391        * hence the two will diverge.
392        */
393       virgl_resource_dirty(vres, vtransfer->base.level);
394 
395       /* We are using the minimum required size to hold the contents,
396        * possibly using a layout different from the layout of the resource,
397        * so update the transfer strides accordingly.
398        */
399       vtransfer->base.stride = stride;
400       vtransfer->base.layer_stride = layer_stride;
401 
402       /* Track the total size of active staging resources. */
403       vctx->queued_staging_res_size += size + align_offset;
404    }
405 
406    return map_addr;
407 }
408 
409 /* Maps a region from staging to service the transfer from host.
410  * This function should be called only for texture readbacks
411  * from host. */
412 static void *
virgl_staging_read_map(struct virgl_context * vctx,struct virgl_transfer * vtransfer)413 virgl_staging_read_map(struct virgl_context *vctx,
414                   struct virgl_transfer *vtransfer)
415 {
416    struct virgl_screen *vscreen = virgl_screen(vctx->base.screen);
417    struct virgl_winsys *vws = vscreen->vws;
418    assert(vtransfer->base.resource->target != PIPE_BUFFER);
419    void *map_addr;
420 
421    /* There are two possibilities to perform readback via:
422     * a) calling transfer_get();
423     * b) calling submit_cmd() with encoded transfer inside cmd.
424     *
425     * For b) we need:
426     *   1. select offset from staging buffer
427     *   2. encode this transfer in wire
428     *   3. flush the execbuffer to the host
429     *   4. wait till copy on the host is done
430     */
431    map_addr = virgl_staging_map(vctx, vtransfer);
432    vtransfer->direction = VIRGL_TRANSFER_FROM_HOST;
433    virgl_encode_copy_transfer(vctx, vtransfer);
434    vctx->base.flush(&vctx->base, NULL, 0);
435    vws->resource_wait(vws, vtransfer->copy_src_hw_res);
436    return map_addr;
437 }
438 
439 static bool
virgl_resource_realloc(struct virgl_context * vctx,struct virgl_resource * res)440 virgl_resource_realloc(struct virgl_context *vctx, struct virgl_resource *res)
441 {
442    struct virgl_screen *vs = virgl_screen(vctx->base.screen);
443    const struct pipe_resource *templ = &res->b;
444    unsigned vbind, vflags;
445    struct virgl_hw_res *hw_res;
446 
447    vbind = pipe_to_virgl_bind(vs, templ->bind);
448    vflags = pipe_to_virgl_flags(vs, templ->flags);
449 
450    int alloc_size = res->use_staging ? 1 : res->metadata.total_size;
451 
452    hw_res = vs->vws->resource_create(vs->vws,
453                                      templ->target,
454                                      NULL,
455                                      templ->format,
456                                      vbind,
457                                      templ->width0,
458                                      templ->height0,
459                                      templ->depth0,
460                                      templ->array_size,
461                                      templ->last_level,
462                                      templ->nr_samples,
463                                      vflags,
464                                      alloc_size);
465    if (!hw_res)
466       return false;
467 
468    vs->vws->resource_reference(vs->vws, &res->hw_res, NULL);
469    res->hw_res = hw_res;
470 
471    /* We can safely clear the range here, since it will be repopulated in the
472     * following rebind operation, according to the active buffer binds.
473     */
474    util_range_set_empty(&res->valid_buffer_range);
475 
476    /* count toward the staging resource size limit */
477    vctx->queued_staging_res_size += res->metadata.total_size;
478 
479    virgl_rebind_resource(vctx, &res->b);
480 
481    return true;
482 }
483 
484 void *
virgl_resource_transfer_map(struct pipe_context * ctx,struct pipe_resource * resource,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** transfer)485 virgl_resource_transfer_map(struct pipe_context *ctx,
486                             struct pipe_resource *resource,
487                             unsigned level,
488                             unsigned usage,
489                             const struct pipe_box *box,
490                             struct pipe_transfer **transfer)
491 {
492    struct virgl_context *vctx = virgl_context(ctx);
493    struct virgl_screen *vscreen = virgl_screen(ctx->screen);
494    struct virgl_winsys *vws = vscreen->vws;
495    struct virgl_resource *vres = virgl_resource(resource);
496    struct virgl_transfer *trans;
497    enum virgl_transfer_map_type map_type;
498    void *map_addr;
499 
500    /* Multisampled resources require resolve before mapping. */
501    assert(resource->nr_samples <= 1);
502 
503    /* If virgl resource was created using persistence and coherency flags,
504     * then its memory mapping can be only made in accordance to these
505     * flags. We record the "usage" flags in struct virgl_transfer and
506     * then virgl_buffer_transfer_unmap() uses them to differentiate
507     * unmapping of a host blob resource from guest.
508     */
509    if (resource->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
510       usage |= PIPE_MAP_PERSISTENT;
511 
512    if (resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
513       usage |= PIPE_MAP_COHERENT;
514 
515    bool is_blob = usage & (PIPE_MAP_COHERENT | PIPE_MAP_PERSISTENT);
516 
517    trans = virgl_resource_create_transfer(vctx, resource,
518                                           &vres->metadata, level, usage, box);
519 
520    map_type = virgl_resource_transfer_prepare(vctx, trans, is_blob);
521    switch (map_type) {
522    case VIRGL_TRANSFER_MAP_REALLOC:
523       if (!virgl_resource_realloc(vctx, vres)) {
524          map_addr = NULL;
525          break;
526       }
527       vws->resource_reference(vws, &trans->hw_res, vres->hw_res);
528       FALLTHROUGH;
529    case VIRGL_TRANSFER_MAP_HW_RES:
530       trans->hw_res_map = vws->resource_map(vws, vres->hw_res);
531       if (trans->hw_res_map)
532          map_addr = (uint8_t *)trans->hw_res_map + trans->offset;
533       else
534          map_addr = NULL;
535       break;
536    case VIRGL_TRANSFER_MAP_WRITE_TO_STAGING:
537       map_addr = virgl_staging_map(vctx, trans);
538       /* Copy transfers don't make use of hw_res_map at the moment. */
539       trans->hw_res_map = NULL;
540       trans->direction = VIRGL_TRANSFER_TO_HOST;
541       break;
542    case VIRGL_TRANSFER_MAP_READ_FROM_STAGING:
543       map_addr = virgl_staging_read_map(vctx, trans);
544       /* Copy transfers don't make use of hw_res_map at the moment. */
545       trans->hw_res_map = NULL;
546       break;
547    case VIRGL_TRANSFER_MAP_WRITE_TO_STAGING_WITH_READBACK:
548       map_addr = virgl_staging_read_map(vctx, trans);
549       /* Copy transfers don't make use of hw_res_map at the moment. */
550       trans->hw_res_map = NULL;
551       trans->direction = VIRGL_TRANSFER_TO_HOST;
552       break;
553    case VIRGL_TRANSFER_MAP_ERROR:
554    default:
555       trans->hw_res_map = NULL;
556       map_addr = NULL;
557       break;
558    }
559 
560    if (!map_addr) {
561       virgl_resource_destroy_transfer(vctx, trans);
562       return NULL;
563    }
564 
565    if (vres->b.target == PIPE_BUFFER) {
566       /* For the checks below to be able to use 'usage', we assume that
567        * transfer preparation doesn't affect the usage.
568        */
569       assert(usage == trans->base.usage);
570 
571       /* If we are doing a whole resource discard with a hw_res map, the buffer
572        * storage can now be considered unused and we don't care about previous
573        * contents.  We can thus mark the storage as uninitialized, but only if
574        * the buffer is not host writable (in which case we can't clear the
575        * valid range, since that would result in missed readbacks in future
576        * transfers).  We only do this for VIRGL_TRANSFER_MAP_HW_RES, since for
577        * VIRGL_TRANSFER_MAP_REALLOC we already take care of the buffer range
578        * when reallocating and rebinding, and VIRGL_TRANSFER_MAP_STAGING is not
579        * currently used for whole resource discards.
580        */
581       if (map_type == VIRGL_TRANSFER_MAP_HW_RES &&
582           (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) &&
583           (vres->clean_mask & 1)) {
584          util_range_set_empty(&vres->valid_buffer_range);
585       }
586 
587       if (usage & PIPE_MAP_WRITE)
588           util_range_add(&vres->b, &vres->valid_buffer_range, box->x, box->x + box->width);
589    }
590 
591    *transfer = &trans->base;
592    return map_addr;
593 }
594 
virgl_resource_layout(struct pipe_resource * pt,struct virgl_resource_metadata * metadata,uint32_t plane,uint32_t winsys_stride,uint32_t plane_offset,uint64_t modifier)595 static void virgl_resource_layout(struct pipe_resource *pt,
596                                   struct virgl_resource_metadata *metadata,
597                                   uint32_t plane,
598                                   uint32_t winsys_stride,
599                                   uint32_t plane_offset,
600                                   uint64_t modifier)
601 {
602    unsigned level, nblocksy;
603    unsigned width = pt->width0;
604    unsigned height = pt->height0;
605    unsigned depth = pt->depth0;
606    unsigned buffer_size = 0;
607 
608    for (level = 0; level <= pt->last_level; level++) {
609       unsigned slices;
610 
611       if (pt->target == PIPE_TEXTURE_CUBE)
612          slices = 6;
613       else if (pt->target == PIPE_TEXTURE_3D)
614          slices = depth;
615       else
616          slices = pt->array_size;
617 
618       nblocksy = util_format_get_nblocksy(pt->format, height);
619       metadata->stride[level] = winsys_stride ? winsys_stride :
620                                 util_format_get_stride(pt->format, width);
621       metadata->layer_stride[level] = nblocksy * metadata->stride[level];
622       metadata->level_offset[level] = buffer_size;
623 
624       buffer_size += slices * metadata->layer_stride[level];
625 
626       width = u_minify(width, 1);
627       height = u_minify(height, 1);
628       depth = u_minify(depth, 1);
629    }
630 
631    metadata->plane = plane;
632    metadata->plane_offset = plane_offset;
633    metadata->modifier = modifier;
634    if (pt->nr_samples <= 1)
635       metadata->total_size = buffer_size;
636    else /* don't create guest backing store for MSAA */
637       metadata->total_size = 0;
638 }
639 
virgl_resource_create_front(struct pipe_screen * screen,const struct pipe_resource * templ,const void * map_front_private)640 static struct pipe_resource *virgl_resource_create_front(struct pipe_screen *screen,
641                                                          const struct pipe_resource *templ,
642                                                          const void *map_front_private)
643 {
644    unsigned vbind, vflags;
645    struct virgl_screen *vs = virgl_screen(screen);
646    struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);
647    uint32_t alloc_size;
648 
649    res->b = *templ;
650    res->b.screen = &vs->base;
651    pipe_reference_init(&res->b.reference, 1);
652    vbind = pipe_to_virgl_bind(vs, templ->bind);
653    vflags = pipe_to_virgl_flags(vs, templ->flags);
654    virgl_resource_layout(&res->b, &res->metadata, 0, 0, 0, 0);
655 
656    if ((vs->caps.caps.v2.capability_bits & VIRGL_CAP_APP_TWEAK_SUPPORT) &&
657        vs->tweak_gles_emulate_bgra &&
658       (templ->format == PIPE_FORMAT_B8G8R8A8_SRGB ||
659         templ->format == PIPE_FORMAT_B8G8R8A8_UNORM ||
660         templ->format == PIPE_FORMAT_B8G8R8X8_SRGB ||
661         templ->format == PIPE_FORMAT_B8G8R8X8_UNORM)) {
662       vbind |= VIRGL_BIND_PREFER_EMULATED_BGRA;
663    }
664 
665    // If renderer supports copy transfer from host, and we either have support
666    // for then for textures alloc minimum size of bo
667    // This size is not passed to the host
668    res->use_staging = virgl_can_copy_transfer_from_host(vs, res, vbind);
669 
670    if (res->use_staging)
671       alloc_size = 1;
672    else
673       alloc_size = res->metadata.total_size;
674 
675    res->hw_res = vs->vws->resource_create(vs->vws, templ->target,
676                                           map_front_private,
677                                           templ->format, vbind,
678                                           templ->width0,
679                                           templ->height0,
680                                           templ->depth0,
681                                           templ->array_size,
682                                           templ->last_level,
683                                           templ->nr_samples,
684                                           vflags,
685                                           alloc_size);
686    if (!res->hw_res) {
687       FREE(res);
688       return NULL;
689    }
690 
691    res->clean_mask = (1 << VR_MAX_TEXTURE_2D_LEVELS) - 1;
692 
693    if (templ->target == PIPE_BUFFER) {
694       util_range_init(&res->valid_buffer_range);
695       virgl_buffer_init(res);
696    } else {
697       virgl_texture_init(res);
698    }
699 
700    return &res->b;
701 
702 }
703 
virgl_resource_create(struct pipe_screen * screen,const struct pipe_resource * templ)704 static struct pipe_resource *virgl_resource_create(struct pipe_screen *screen,
705                                                    const struct pipe_resource *templ)
706 {
707    return virgl_resource_create_front(screen, templ, NULL);
708 }
709 
virgl_resource_from_handle(struct pipe_screen * screen,const struct pipe_resource * templ,struct winsys_handle * whandle,unsigned usage)710 static struct pipe_resource *virgl_resource_from_handle(struct pipe_screen *screen,
711                                                         const struct pipe_resource *templ,
712                                                         struct winsys_handle *whandle,
713                                                         unsigned usage)
714 {
715    uint32_t winsys_stride, plane_offset, plane;
716    uint64_t modifier;
717    uint32_t storage_size;
718 
719    struct virgl_screen *vs = virgl_screen(screen);
720    if (templ && templ->target == PIPE_BUFFER)
721       return NULL;
722 
723    struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);
724    if (templ)
725       res->b = *templ;
726    res->b.screen = &vs->base;
727    pipe_reference_init(&res->b.reference, 1);
728 
729    plane = winsys_stride = plane_offset = modifier = 0;
730    res->hw_res = vs->vws->resource_create_from_handle(vs->vws, whandle,
731                                                       &res->b,
732                                                       &plane,
733                                                       &winsys_stride,
734                                                       &plane_offset,
735                                                       &modifier,
736                                                       &res->blob_mem);
737 
738    if (!res->hw_res) {
739       FREE(res);
740       return NULL;
741    }
742 
743    /* do not use winsys returns for guest storage info of classic resource */
744    if (!res->blob_mem) {
745       winsys_stride = 0;
746       plane_offset = 0;
747       modifier = 0;
748    }
749 
750    virgl_resource_layout(&res->b, &res->metadata, plane, winsys_stride,
751                          plane_offset, modifier);
752 
753    /*
754    *  If the overall resource is larger than a single page in size, we can
755    *  compare it with the amount of memory allocated on the guest to determine
756    *  if we should be using the staging path.
757    *
758    *  If not, the decision is not as clear. However, since the resource can
759    *  fit within a single page, the import will function correctly.
760    */
761   storage_size = vs->vws->resource_get_storage_size(vs->vws, res->hw_res);
762 
763    if (res->metadata.total_size > storage_size)
764       res->use_staging = 1;
765 
766    /* assign blob resource a type in case it was created untyped */
767    if (res->blob_mem && plane == 0 &&
768        (vs->caps.caps.v2.host_feature_check_version >= 18 ||
769 	(vs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_UNTYPED_RESOURCE))) {
770       uint32_t plane_strides[VIRGL_MAX_PLANE_COUNT];
771       uint32_t plane_offsets[VIRGL_MAX_PLANE_COUNT];
772       uint32_t plane_count = 0;
773       struct pipe_resource *iter = &res->b;
774 
775       do {
776          struct virgl_resource *plane = virgl_resource(iter);
777 
778          /* must be a plain 2D texture sharing the same hw_res */
779          if (plane->b.target != PIPE_TEXTURE_2D ||
780              plane->b.depth0 != 1 ||
781              plane->b.array_size != 1 ||
782              plane->b.last_level != 0 ||
783              plane->b.nr_samples > 1 ||
784              plane->hw_res != res->hw_res ||
785              plane_count >= VIRGL_MAX_PLANE_COUNT) {
786             vs->vws->resource_reference(vs->vws, &res->hw_res, NULL);
787             FREE(res);
788             return NULL;
789          }
790 
791          plane_strides[plane_count] = plane->metadata.stride[0];
792          plane_offsets[plane_count] = plane->metadata.plane_offset;
793          plane_count++;
794          iter = iter->next;
795       } while (iter);
796 
797       vs->vws->resource_set_type(vs->vws,
798                                  res->hw_res,
799                                  pipe_to_virgl_format(res->b.format),
800                                  pipe_to_virgl_bind(vs, res->b.bind),
801                                  res->b.width0,
802                                  res->b.height0,
803                                  usage,
804                                  res->metadata.modifier,
805                                  plane_count,
806                                  plane_strides,
807                                  plane_offsets);
808    }
809 
810    virgl_texture_init(res);
811 
812    return &res->b;
813 }
814 
815 static bool
virgl_resource_get_param(struct pipe_screen * screen,struct pipe_context * context,struct pipe_resource * resource,unsigned plane,unsigned layer,unsigned level,enum pipe_resource_param param,unsigned handle_usage,uint64_t * value)816 virgl_resource_get_param(struct pipe_screen *screen,
817                          struct pipe_context *context,
818                          struct pipe_resource *resource,
819                          unsigned plane,
820                          unsigned layer,
821                          unsigned level,
822                          enum pipe_resource_param param,
823                          unsigned handle_usage,
824                          uint64_t *value)
825 {
826    struct virgl_resource *res = virgl_resource(resource);
827 
828    switch(param) {
829    case PIPE_RESOURCE_PARAM_MODIFIER:
830       *value = res->metadata.modifier;
831       return true;
832    default:
833       return false;
834    }
835 }
836 
virgl_init_screen_resource_functions(struct pipe_screen * screen)837 void virgl_init_screen_resource_functions(struct pipe_screen *screen)
838 {
839     screen->resource_create_front = virgl_resource_create_front;
840     screen->resource_create = virgl_resource_create;
841     screen->resource_from_handle = virgl_resource_from_handle;
842     screen->resource_get_handle = virgl_resource_get_handle;
843     screen->resource_destroy = virgl_resource_destroy;
844     screen->resource_get_param = virgl_resource_get_param;
845 }
846 
virgl_buffer_subdata(struct pipe_context * pipe,struct pipe_resource * resource,unsigned usage,unsigned offset,unsigned size,const void * data)847 static void virgl_buffer_subdata(struct pipe_context *pipe,
848                                  struct pipe_resource *resource,
849                                  unsigned usage, unsigned offset,
850                                  unsigned size, const void *data)
851 {
852    struct virgl_context *vctx = virgl_context(pipe);
853    struct virgl_resource *vbuf = virgl_resource(resource);
854 
855    /* We can try virgl_transfer_queue_extend_buffer when there is no
856     * flush/readback/wait required.  Based on virgl_resource_transfer_prepare,
857     * the simplest way to make sure that is the case is to check the valid
858     * buffer range.
859     */
860    if (!util_ranges_intersect(&vbuf->valid_buffer_range,
861                               offset, offset + size) &&
862        likely(!(virgl_debug & VIRGL_DEBUG_XFER)) &&
863        virgl_transfer_queue_extend_buffer(&vctx->queue,
864                                           vbuf->hw_res, offset, size, data)) {
865       util_range_add(&vbuf->b, &vbuf->valid_buffer_range, offset, offset + size);
866       return;
867    }
868 
869    u_default_buffer_subdata(pipe, resource, usage, offset, size, data);
870 }
871 
virgl_init_context_resource_functions(struct pipe_context * ctx)872 void virgl_init_context_resource_functions(struct pipe_context *ctx)
873 {
874     ctx->buffer_map = virgl_resource_transfer_map;
875     ctx->texture_map = virgl_texture_transfer_map;
876     ctx->transfer_flush_region = virgl_buffer_transfer_flush_region;
877     ctx->buffer_unmap = virgl_buffer_transfer_unmap;
878     ctx->texture_unmap = virgl_texture_transfer_unmap;
879     ctx->buffer_subdata = virgl_buffer_subdata;
880     ctx->texture_subdata = u_default_texture_subdata;
881 }
882 
883 
884 struct virgl_transfer *
virgl_resource_create_transfer(struct virgl_context * vctx,struct pipe_resource * pres,const struct virgl_resource_metadata * metadata,unsigned level,unsigned usage,const struct pipe_box * box)885 virgl_resource_create_transfer(struct virgl_context *vctx,
886                                struct pipe_resource *pres,
887                                const struct virgl_resource_metadata *metadata,
888                                unsigned level, unsigned usage,
889                                const struct pipe_box *box)
890 {
891    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
892    struct virgl_transfer *trans;
893    enum pipe_format format = pres->format;
894    const unsigned blocksy = box->y / util_format_get_blockheight(format);
895    const unsigned blocksx = box->x / util_format_get_blockwidth(format);
896 
897    unsigned offset = metadata->plane_offset + metadata->level_offset[level];
898    if (pres->target == PIPE_TEXTURE_CUBE ||
899        pres->target == PIPE_TEXTURE_CUBE_ARRAY ||
900        pres->target == PIPE_TEXTURE_3D ||
901        pres->target == PIPE_TEXTURE_2D_ARRAY) {
902       offset += box->z * metadata->layer_stride[level];
903    }
904    else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {
905       offset += box->z * metadata->stride[level];
906       assert(box->y == 0);
907    } else if (pres->target == PIPE_BUFFER) {
908       assert(box->y == 0 && box->z == 0);
909    } else {
910       assert(box->z == 0);
911    }
912 
913    offset += blocksy * metadata->stride[level];
914    offset += blocksx * util_format_get_blocksize(format);
915 
916    trans = slab_zalloc(&vctx->transfer_pool);
917    if (!trans)
918       return NULL;
919 
920    pipe_resource_reference(&trans->base.resource, pres);
921    vws->resource_reference(vws, &trans->hw_res, virgl_resource(pres)->hw_res);
922 
923    trans->base.level = level;
924    trans->base.usage = usage;
925    trans->base.box = *box;
926    trans->base.stride = metadata->stride[level];
927    trans->base.layer_stride = metadata->layer_stride[level];
928    trans->offset = offset;
929    util_range_init(&trans->range);
930 
931    if (trans->base.resource->target != PIPE_TEXTURE_3D &&
932        trans->base.resource->target != PIPE_TEXTURE_CUBE &&
933        trans->base.resource->target != PIPE_TEXTURE_1D_ARRAY &&
934        trans->base.resource->target != PIPE_TEXTURE_2D_ARRAY &&
935        trans->base.resource->target != PIPE_TEXTURE_CUBE_ARRAY)
936       trans->l_stride = 0;
937    else
938       trans->l_stride = trans->base.layer_stride;
939 
940    return trans;
941 }
942 
virgl_resource_destroy_transfer(struct virgl_context * vctx,struct virgl_transfer * trans)943 void virgl_resource_destroy_transfer(struct virgl_context *vctx,
944                                      struct virgl_transfer *trans)
945 {
946    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
947 
948    vws->resource_reference(vws, &trans->copy_src_hw_res, NULL);
949 
950    util_range_destroy(&trans->range);
951    vws->resource_reference(vws, &trans->hw_res, NULL);
952    pipe_resource_reference(&trans->base.resource, NULL);
953    slab_free(&vctx->transfer_pool, trans);
954 }
955 
virgl_resource_destroy(struct pipe_screen * screen,struct pipe_resource * resource)956 void virgl_resource_destroy(struct pipe_screen *screen,
957                             struct pipe_resource *resource)
958 {
959    struct virgl_screen *vs = virgl_screen(screen);
960    struct virgl_resource *res = virgl_resource(resource);
961 
962    if (res->b.target == PIPE_BUFFER)
963       util_range_destroy(&res->valid_buffer_range);
964 
965    vs->vws->resource_reference(vs->vws, &res->hw_res, NULL);
966    FREE(res);
967 }
968 
virgl_resource_get_handle(struct pipe_screen * screen,struct pipe_context * context,struct pipe_resource * resource,struct winsys_handle * whandle,unsigned usage)969 bool virgl_resource_get_handle(struct pipe_screen *screen,
970                                struct pipe_context *context,
971                                struct pipe_resource *resource,
972                                struct winsys_handle *whandle,
973                                unsigned usage)
974 {
975    struct virgl_screen *vs = virgl_screen(screen);
976    struct virgl_resource *res = virgl_resource(resource);
977 
978    if (res->b.target == PIPE_BUFFER)
979       return false;
980 
981    return vs->vws->resource_get_handle(vs->vws, res->hw_res,
982                                        res->metadata.stride[0],
983                                        whandle);
984 }
985 
virgl_resource_dirty(struct virgl_resource * res,uint32_t level)986 void virgl_resource_dirty(struct virgl_resource *res, uint32_t level)
987 {
988    if (res) {
989       if (res->b.target == PIPE_BUFFER)
990          res->clean_mask &= ~1;
991       else
992          res->clean_mask &= ~(1 << level);
993    }
994 }
995