xref: /aosp_15_r20/external/virglrenderer/src/drm/msm/msm_renderer.c (revision bbecb9d118dfdb95f99bd754f8fa9be01f189df3)
1 /*
2  * Copyright 2022 Google LLC
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include <errno.h>
7 #include <fcntl.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <sys/mman.h>
12 #include <sys/types.h>
13 
14 #include <xf86drm.h>
15 
16 #include "virgl_context.h"
17 #include "virgl_util.h"
18 #include "virglrenderer.h"
19 
20 #include "util/anon_file.h"
21 #include "util/hash_table.h"
22 #include "util/macros.h"
23 #include "util/os_file.h"
24 #include "util/u_atomic.h"
25 #include "util/u_thread.h"
26 
27 #include "drm_fence.h"
28 
29 #include "msm_drm.h"
30 #include "msm_proto.h"
31 #include "msm_renderer.h"
32 
33 static unsigned nr_timelines;
34 
35 /**
36  * A single context (from the PoV of the virtio-gpu protocol) maps to
37  * a single drm device open.  Other drm/msm constructs (ie. submitqueue)
38  * are opaque to the protocol.
39  *
40  * Typically each guest process will open a single virtio-gpu "context".
41  * The single drm device open maps to an individual msm_gem_address_space
42  * on the kernel side, providing GPU address space isolation between
43  * guest processes.
44  *
45  * GEM buffer objects are tracked via one of two id's:
46  *  - resource-id:  global, assigned by guest kernel
47  *  - blob-id:      context specific, assigned by guest userspace
48  *
49  * The blob-id is used to link the bo created via MSM_CCMD_GEM_NEW and
50  * the get_blob() cb.  It is unused in the case of a bo that is imported
51  * from another context.  An object is added to the blob table in GEM_NEW
52  * and removed in ctx->get_blob() (where it is added to resource_table).
53  * By avoiding having an obj in both tables, we can safely free remaining
54  * entries in either hashtable at context teardown.
55  */
56 struct msm_context {
57    struct virgl_context base;
58 
59    struct msm_shmem *shmem;
60    uint8_t *rsp_mem;
61    uint32_t rsp_mem_sz;
62 
63    struct msm_ccmd_rsp *current_rsp;
64 
65    int fd;
66 
67    struct hash_table *blob_table;
68    struct hash_table *resource_table;
69 
70    /**
71     * Maps submit-queue id to ring_idx
72     */
73    struct hash_table *sq_to_ring_idx_table;
74 
75    int eventfd;
76 
77    /**
78     * Indexed by ring_idx-1, which is the same as the submitqueue priority+1.
79     * On the kernel side, there is some drm_sched_entity per {drm_file, prio}
80     * tuple, and the sched entity determines the fence timeline, ie. submits
81     * against a single sched entity complete in fifo order.
82     */
83    struct drm_timeline timelines[];
84 };
DEFINE_CAST(virgl_context,msm_context)85 DEFINE_CAST(virgl_context, msm_context)
86 
87 #define valid_payload_len(req) ((req)->len <= ((req)->hdr.len - sizeof(*(req))))
88 
89 static struct hash_entry *
90 table_search(struct hash_table *ht, uint32_t key)
91 {
92    /* zero is not a valid key for u32_keys hashtable: */
93    if (!key)
94       return NULL;
95    return _mesa_hash_table_search(ht, (void *)(uintptr_t)key);
96 }
97 
98 static int
gem_info(struct msm_context * mctx,uint32_t handle,uint32_t param,uint64_t * val)99 gem_info(struct msm_context *mctx, uint32_t handle, uint32_t param, uint64_t *val)
100 {
101    struct drm_msm_gem_info args = {
102       .handle = handle,
103       .info = param,
104       .value = *val,
105    };
106    int ret;
107 
108    ret = drmCommandWriteRead(mctx->fd, DRM_MSM_GEM_INFO, &args, sizeof(args));
109    if (ret)
110       return ret;
111 
112    *val = args.value;
113    return 0;
114 }
115 
116 static int
gem_close(int fd,uint32_t handle)117 gem_close(int fd, uint32_t handle)
118 {
119    struct drm_gem_close close_req = {
120       .handle = handle,
121    };
122    return drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &close_req);
123 }
124 
125 struct msm_object {
126    uint32_t blob_id;
127    uint32_t res_id;
128    uint32_t handle;
129    uint32_t flags;
130    uint32_t size;
131    bool exported   : 1;
132    bool exportable : 1;
133    struct virgl_resource *res;
134    void *map;
135 };
136 
137 static struct msm_object *
msm_object_create(uint32_t handle,uint32_t flags,uint32_t size)138 msm_object_create(uint32_t handle, uint32_t flags, uint32_t size)
139 {
140    struct msm_object *obj = calloc(1, sizeof(*obj));
141 
142    if (!obj)
143       return NULL;
144 
145    obj->handle = handle;
146    obj->flags = flags;
147    obj->size = size;
148 
149    return obj;
150 }
151 
152 static bool
valid_blob_id(struct msm_context * mctx,uint32_t blob_id)153 valid_blob_id(struct msm_context *mctx, uint32_t blob_id)
154 {
155    /* must be non-zero: */
156    if (blob_id == 0)
157       return false;
158 
159    /* must not already be in-use: */
160    if (table_search(mctx->blob_table, blob_id))
161       return false;
162 
163    return true;
164 }
165 
166 static void
msm_object_set_blob_id(struct msm_context * mctx,struct msm_object * obj,uint32_t blob_id)167 msm_object_set_blob_id(struct msm_context *mctx, struct msm_object *obj, uint32_t blob_id)
168 {
169    assert(valid_blob_id(mctx, blob_id));
170 
171    obj->blob_id = blob_id;
172    _mesa_hash_table_insert(mctx->blob_table, (void *)(uintptr_t)obj->blob_id, obj);
173 }
174 
175 static bool
valid_res_id(struct msm_context * mctx,uint32_t res_id)176 valid_res_id(struct msm_context *mctx, uint32_t res_id)
177 {
178    return !table_search(mctx->resource_table, res_id);
179 }
180 
181 static void
msm_object_set_res_id(struct msm_context * mctx,struct msm_object * obj,uint32_t res_id)182 msm_object_set_res_id(struct msm_context *mctx, struct msm_object *obj, uint32_t res_id)
183 {
184    assert(valid_res_id(mctx, res_id));
185 
186    obj->res_id = res_id;
187    _mesa_hash_table_insert(mctx->resource_table, (void *)(uintptr_t)obj->res_id, obj);
188 }
189 
190 static void
msm_remove_object(struct msm_context * mctx,struct msm_object * obj)191 msm_remove_object(struct msm_context *mctx, struct msm_object *obj)
192 {
193    drm_dbg("obj=%p, blob_id=%u, res_id=%u", obj, obj->blob_id, obj->res_id);
194    _mesa_hash_table_remove_key(mctx->resource_table, (void *)(uintptr_t)obj->res_id);
195 }
196 
197 static struct msm_object *
msm_retrieve_object_from_blob_id(struct msm_context * mctx,uint64_t blob_id)198 msm_retrieve_object_from_blob_id(struct msm_context *mctx, uint64_t blob_id)
199 {
200    assert((blob_id >> 32) == 0);
201    uint32_t id = blob_id;
202    struct hash_entry *entry = table_search(mctx->blob_table, id);
203    if (!entry)
204       return NULL;
205    struct msm_object *obj = entry->data;
206    _mesa_hash_table_remove(mctx->blob_table, entry);
207    return obj;
208 }
209 
210 static struct msm_object *
msm_get_object_from_res_id(struct msm_context * mctx,uint32_t res_id)211 msm_get_object_from_res_id(struct msm_context *mctx, uint32_t res_id)
212 {
213    const struct hash_entry *entry = table_search(mctx->resource_table, res_id);
214    return likely(entry) ? entry->data : NULL;
215 }
216 
217 static uint32_t
handle_from_res_id(struct msm_context * mctx,uint32_t res_id)218 handle_from_res_id(struct msm_context *mctx, uint32_t res_id)
219 {
220    struct msm_object *obj = msm_get_object_from_res_id(mctx, res_id);
221    if (!obj)
222       return 0;    /* zero is an invalid GEM handle */
223    return obj->handle;
224 }
225 
226 static bool
has_cached_coherent(int fd)227 has_cached_coherent(int fd)
228 {
229    struct drm_msm_gem_new new_req = {
230       .size = 0x1000,
231       .flags = MSM_BO_CACHED_COHERENT,
232    };
233 
234    /* Do a test allocation to see if cached-coherent is supported: */
235    if (!drmCommandWriteRead(fd, DRM_MSM_GEM_NEW, &new_req, sizeof(new_req))) {
236       gem_close(fd, new_req.handle);
237       return true;
238    }
239 
240    return false;
241 }
242 
243 static int
get_param64(int fd,uint32_t param,uint64_t * value)244 get_param64(int fd, uint32_t param, uint64_t *value)
245 {
246    struct drm_msm_param req = {
247       .pipe = MSM_PIPE_3D0,
248       .param = param,
249    };
250    int ret;
251 
252    *value = 0;
253 
254    ret = drmCommandWriteRead(fd, DRM_MSM_GET_PARAM, &req, sizeof(req));
255    if (ret)
256       return ret;
257 
258    *value = req.value;
259 
260    return 0;
261 }
262 
263 static int
get_param32(int fd,uint32_t param,uint32_t * value)264 get_param32(int fd, uint32_t param, uint32_t *value)
265 {
266    uint64_t v64;
267    int ret = get_param64(fd, param, &v64);
268    *value = v64;
269    return ret;
270 }
271 
272 /**
273  * Probe capset params.
274  */
275 int
msm_renderer_probe(int fd,struct virgl_renderer_capset_drm * capset)276 msm_renderer_probe(int fd, struct virgl_renderer_capset_drm *capset)
277 {
278    drm_log("");
279 
280    /* Require MSM_SUBMIT_FENCE_SN_IN: */
281    if (capset->version_minor < 9) {
282       drm_log("Host kernel too old");
283       return -ENOTSUP;
284    }
285 
286    capset->wire_format_version = 2;
287    capset->u.msm.has_cached_coherent = has_cached_coherent(fd);
288 
289    get_param32(fd, MSM_PARAM_PRIORITIES, &capset->u.msm.priorities);
290    get_param64(fd, MSM_PARAM_VA_START,   &capset->u.msm.va_start);
291    get_param64(fd, MSM_PARAM_VA_SIZE,    &capset->u.msm.va_size);
292    get_param32(fd, MSM_PARAM_GPU_ID,     &capset->u.msm.gpu_id);
293    get_param32(fd, MSM_PARAM_GMEM_SIZE,  &capset->u.msm.gmem_size);
294    get_param64(fd, MSM_PARAM_GMEM_BASE,  &capset->u.msm.gmem_base);
295    get_param64(fd, MSM_PARAM_CHIP_ID,    &capset->u.msm.chip_id);
296    get_param32(fd, MSM_PARAM_MAX_FREQ,   &capset->u.msm.max_freq);
297 
298    nr_timelines = capset->u.msm.priorities;
299 
300    drm_log("wire_format_version: %u", capset->wire_format_version);
301    drm_log("version_major:       %u", capset->version_major);
302    drm_log("version_minor:       %u", capset->version_minor);
303    drm_log("version_patchlevel:  %u", capset->version_patchlevel);
304    drm_log("has_cached_coherent: %u", capset->u.msm.has_cached_coherent);
305    drm_log("priorities:          %u", capset->u.msm.priorities);
306    drm_log("va_start:            0x%0" PRIx64, capset->u.msm.va_start);
307    drm_log("va_size:             0x%0" PRIx64, capset->u.msm.va_size);
308    drm_log("gpu_id:              %u", capset->u.msm.gpu_id);
309    drm_log("gmem_size:           %u", capset->u.msm.gmem_size);
310    drm_log("gmem_base:           0x%0" PRIx64, capset->u.msm.gmem_base);
311    drm_log("chip_id:             0x%0" PRIx64, capset->u.msm.chip_id);
312    drm_log("max_freq:            %u", capset->u.msm.max_freq);
313 
314    if (!capset->u.msm.va_size) {
315       drm_log("Host kernel does not support userspace allocated IOVA");
316       return -ENOTSUP;
317    }
318 
319    return 0;
320 }
321 
322 static void
resource_delete_fxn(struct hash_entry * entry)323 resource_delete_fxn(struct hash_entry *entry)
324 {
325    free((void *)entry->data);
326 }
327 
328 static void
msm_renderer_destroy(struct virgl_context * vctx)329 msm_renderer_destroy(struct virgl_context *vctx)
330 {
331    struct msm_context *mctx = to_msm_context(vctx);
332 
333    for (unsigned i = 0; i < nr_timelines; i++)
334       drm_timeline_fini(&mctx->timelines[i]);
335 
336    close(mctx->eventfd);
337 
338    if (mctx->shmem)
339       munmap(mctx->shmem, sizeof(*mctx->shmem));
340 
341    _mesa_hash_table_destroy(mctx->resource_table, resource_delete_fxn);
342    _mesa_hash_table_destroy(mctx->blob_table, resource_delete_fxn);
343    _mesa_hash_table_destroy(mctx->sq_to_ring_idx_table, NULL);
344 
345    close(mctx->fd);
346    free(mctx);
347 }
348 
349 static void
msm_renderer_attach_resource(struct virgl_context * vctx,struct virgl_resource * res)350 msm_renderer_attach_resource(struct virgl_context *vctx, struct virgl_resource *res)
351 {
352    struct msm_context *mctx = to_msm_context(vctx);
353    struct msm_object *obj = msm_get_object_from_res_id(mctx, res->res_id);
354 
355    drm_dbg("obj=%p, res_id=%u", obj, res->res_id);
356 
357    if (!obj) {
358       int fd;
359       enum virgl_resource_fd_type fd_type = virgl_resource_export_fd(res, &fd);
360 
361       /* If importing a dmabuf resource created by another context (or
362        * externally), then import it to create a gem obj handle in our
363        * context:
364        */
365       if (fd_type == VIRGL_RESOURCE_FD_DMABUF) {
366          uint32_t handle;
367          int ret;
368 
369          ret = drmPrimeFDToHandle(mctx->fd, fd, &handle);
370          if (ret) {
371             drm_log("Could not import: %s", strerror(errno));
372             close(fd);
373             return;
374          }
375 
376          /* lseek() to get bo size */
377          int size = lseek(fd, 0, SEEK_END);
378          if (size < 0)
379             drm_log("lseek failed: %d (%s)", size, strerror(errno));
380          close(fd);
381 
382          obj = msm_object_create(handle, 0, size);
383          if (!obj)
384             return;
385 
386          msm_object_set_res_id(mctx, obj, res->res_id);
387 
388          drm_dbg("obj=%p, res_id=%u, handle=%u", obj, obj->res_id, obj->handle);
389       } else {
390          if (fd_type != VIRGL_RESOURCE_FD_INVALID)
391             close(fd);
392          return;
393       }
394    }
395 
396    obj->res = res;
397 }
398 
399 static void
msm_renderer_detach_resource(struct virgl_context * vctx,struct virgl_resource * res)400 msm_renderer_detach_resource(struct virgl_context *vctx, struct virgl_resource *res)
401 {
402    struct msm_context *mctx = to_msm_context(vctx);
403    struct msm_object *obj = msm_get_object_from_res_id(mctx, res->res_id);
404 
405    drm_dbg("obj=%p, res_id=%u", obj, res->res_id);
406 
407    if (!obj || (obj->res != res))
408       return;
409 
410    if (res->fd_type == VIRGL_RESOURCE_FD_SHM) {
411       munmap(mctx->shmem, sizeof(*mctx->shmem));
412 
413       mctx->shmem = NULL;
414       mctx->rsp_mem = NULL;
415       mctx->rsp_mem_sz = 0;
416 
417       /* shmem resources don't have an backing host GEM bo:, so bail now: */
418       return;
419    }
420 
421    msm_remove_object(mctx, obj);
422 
423    if (obj->map)
424       munmap(obj->map, obj->size);
425 
426    gem_close(mctx->fd, obj->handle);
427 
428    free(obj);
429 }
430 
431 static enum virgl_resource_fd_type
msm_renderer_export_opaque_handle(struct virgl_context * vctx,struct virgl_resource * res,int * out_fd)432 msm_renderer_export_opaque_handle(struct virgl_context *vctx, struct virgl_resource *res,
433                                   int *out_fd)
434 {
435    struct msm_context *mctx = to_msm_context(vctx);
436    struct msm_object *obj = msm_get_object_from_res_id(mctx, res->res_id);
437    int ret;
438 
439    drm_dbg("obj=%p, res_id=%u", obj, res->res_id);
440 
441    if (!obj) {
442       drm_log("invalid res_id %u", res->res_id);
443       return VIRGL_RESOURCE_FD_INVALID;
444    }
445 
446    if (!obj->exportable) {
447       /* crosvm seems to like to export things it doesn't actually need an
448        * fd for.. don't let it spam our fd table!
449        */
450       return VIRGL_RESOURCE_FD_INVALID;
451    }
452 
453    ret = drmPrimeHandleToFD(mctx->fd, obj->handle, DRM_CLOEXEC | DRM_RDWR, out_fd);
454    if (ret) {
455       drm_log("failed to get dmabuf fd: %s", strerror(errno));
456       return VIRGL_RESOURCE_FD_INVALID;
457    }
458 
459    return VIRGL_RESOURCE_FD_DMABUF;
460 }
461 
462 static int
msm_renderer_transfer_3d(UNUSED struct virgl_context * vctx,UNUSED struct virgl_resource * res,UNUSED const struct vrend_transfer_info * info,UNUSED int transfer_mode)463 msm_renderer_transfer_3d(UNUSED struct virgl_context *vctx,
464                          UNUSED struct virgl_resource *res,
465                          UNUSED const struct vrend_transfer_info *info,
466                          UNUSED int transfer_mode)
467 {
468    drm_log("unsupported");
469    return -1;
470 }
471 
472 static int
msm_renderer_get_blob(struct virgl_context * vctx,uint32_t res_id,uint64_t blob_id,uint64_t blob_size,uint32_t blob_flags,struct virgl_context_blob * blob)473 msm_renderer_get_blob(struct virgl_context *vctx, uint32_t res_id, uint64_t blob_id,
474                       uint64_t blob_size, uint32_t blob_flags,
475                       struct virgl_context_blob *blob)
476 {
477    struct msm_context *mctx = to_msm_context(vctx);
478 
479    drm_dbg("blob_id=%" PRIu64 ", res_id=%u, blob_size=%" PRIu64 ", blob_flags=0x%x",
480            blob_id, res_id, blob_size, blob_flags);
481 
482    if ((blob_id >> 32) != 0) {
483       drm_log("invalid blob_id: %" PRIu64, blob_id);
484       return -EINVAL;
485    }
486 
487    /* blob_id of zero is reserved for the shmem buffer: */
488    if (blob_id == 0) {
489       int fd;
490 
491       if (blob_flags != VIRGL_RENDERER_BLOB_FLAG_USE_MAPPABLE) {
492          drm_log("invalid blob_flags: 0x%x", blob_flags);
493          return -EINVAL;
494       }
495 
496       if (mctx->shmem) {
497          drm_log("There can be only one!");
498          return -EINVAL;
499       }
500 
501       fd = os_create_anonymous_file(blob_size, "msm-shmem");
502       if (fd < 0) {
503          drm_log("Failed to create shmem file: %s", strerror(errno));
504          return -ENOMEM;
505       }
506 
507       int ret = fcntl(fd, F_ADD_SEALS, F_SEAL_SEAL | F_SEAL_SHRINK | F_SEAL_GROW);
508       if (ret) {
509          drm_log("fcntl failed: %s", strerror(errno));
510          close(fd);
511          return -ENOMEM;
512       }
513 
514       mctx->shmem = mmap(NULL, blob_size, PROT_WRITE | PROT_READ, MAP_SHARED, fd, 0);
515       if (mctx->shmem == MAP_FAILED) {
516          drm_log("shmem mmap failed: %s", strerror(errno));
517          close(fd);
518          return -ENOMEM;
519       }
520 
521       mctx->shmem->rsp_mem_offset = sizeof(*mctx->shmem);
522 
523       uint8_t *ptr = (uint8_t *)mctx->shmem;
524       mctx->rsp_mem = &ptr[mctx->shmem->rsp_mem_offset];
525       mctx->rsp_mem_sz = blob_size - mctx->shmem->rsp_mem_offset;
526 
527       blob->type = VIRGL_RESOURCE_FD_SHM;
528       blob->u.fd = fd;
529       blob->map_info = VIRGL_RENDERER_MAP_CACHE_CACHED;
530 
531       return 0;
532    }
533 
534    if (!valid_res_id(mctx, res_id)) {
535       drm_log("Invalid res_id %u", res_id);
536       return -EINVAL;
537    }
538 
539    struct msm_object *obj = msm_retrieve_object_from_blob_id(mctx, blob_id);
540 
541    /* If GEM_NEW fails, we can end up here without a backing obj: */
542    if (!obj) {
543       drm_log("No object");
544       return -ENOENT;
545    }
546 
547    /* a memory can only be exported once; we don't want two resources to point
548     * to the same storage.
549     */
550    if (obj->exported) {
551       drm_log("Already exported!");
552       return -EINVAL;
553    }
554 
555    msm_object_set_res_id(mctx, obj, res_id);
556 
557    if (blob_flags & VIRGL_RENDERER_BLOB_FLAG_USE_SHAREABLE) {
558       int fd, ret;
559 
560       ret = drmPrimeHandleToFD(mctx->fd, obj->handle, DRM_CLOEXEC | DRM_RDWR, &fd);
561       if (ret) {
562          drm_log("Export to fd failed");
563          return -EINVAL;
564       }
565 
566       blob->type = VIRGL_RESOURCE_FD_DMABUF;
567       blob->u.fd = fd;
568    } else {
569       blob->type = VIRGL_RESOURCE_OPAQUE_HANDLE;
570       blob->u.opaque_handle = obj->handle;
571    }
572 
573    if (obj->flags & MSM_BO_CACHED_COHERENT) {
574       blob->map_info = VIRGL_RENDERER_MAP_CACHE_CACHED;
575    } else {
576       blob->map_info = VIRGL_RENDERER_MAP_CACHE_WC;
577    }
578 
579    obj->exported = true;
580    obj->exportable = !!(blob_flags & VIRGL_RENDERER_BLOB_FLAG_USE_MAPPABLE);
581 
582    return 0;
583 }
584 
585 static void *
msm_context_rsp_noshadow(struct msm_context * mctx,const struct msm_ccmd_req * hdr)586 msm_context_rsp_noshadow(struct msm_context *mctx, const struct msm_ccmd_req *hdr)
587 {
588    return &mctx->rsp_mem[hdr->rsp_off];
589 }
590 
591 static void *
msm_context_rsp(struct msm_context * mctx,const struct msm_ccmd_req * hdr,unsigned len)592 msm_context_rsp(struct msm_context *mctx, const struct msm_ccmd_req *hdr, unsigned len)
593 {
594    unsigned rsp_mem_sz = mctx->rsp_mem_sz;
595    unsigned off = hdr->rsp_off;
596 
597    if ((off > rsp_mem_sz) || (len > rsp_mem_sz - off)) {
598       drm_log("invalid shm offset: off=%u, len=%u (shmem_size=%u)", off, len, rsp_mem_sz);
599       return NULL;
600    }
601 
602    struct msm_ccmd_rsp *rsp = msm_context_rsp_noshadow(mctx, hdr);
603 
604    assert(len >= sizeof(*rsp));
605 
606    /* With newer host and older guest, we could end up wanting a larger rsp struct
607     * than guest expects, so allocate a shadow buffer in this case rather than
608     * having to deal with this in all the different ccmd handlers.  This is similar
609     * in a way to what drm_ioctl() does.
610     */
611    if (len > rsp->len) {
612       rsp = malloc(len);
613       if (!rsp)
614          return NULL;
615       rsp->len = len;
616    }
617 
618    mctx->current_rsp = rsp;
619 
620    return rsp;
621 }
622 
623 static int
msm_ccmd_nop(UNUSED struct msm_context * mctx,UNUSED const struct msm_ccmd_req * hdr)624 msm_ccmd_nop(UNUSED struct msm_context *mctx, UNUSED const struct msm_ccmd_req *hdr)
625 {
626    return 0;
627 }
628 
629 static int
msm_ccmd_ioctl_simple(struct msm_context * mctx,const struct msm_ccmd_req * hdr)630 msm_ccmd_ioctl_simple(struct msm_context *mctx, const struct msm_ccmd_req *hdr)
631 {
632    const struct msm_ccmd_ioctl_simple_req *req = to_msm_ccmd_ioctl_simple_req(hdr);
633    unsigned payload_len = _IOC_SIZE(req->cmd);
634    unsigned req_len = size_add(sizeof(*req), payload_len);
635 
636    if (hdr->len != req_len) {
637       drm_log("%u != %u", hdr->len, req_len);
638       return -EINVAL;
639    }
640 
641    /* Apply a reasonable upper bound on ioctl size: */
642    if (payload_len > 128) {
643       drm_log("invalid ioctl payload length: %u", payload_len);
644       return -EINVAL;
645    }
646 
647    /* Allow-list of supported ioctls: */
648    unsigned iocnr = _IOC_NR(req->cmd) - DRM_COMMAND_BASE;
649    switch (iocnr) {
650    case DRM_MSM_GET_PARAM:
651    case DRM_MSM_SUBMITQUEUE_NEW:
652    case DRM_MSM_SUBMITQUEUE_CLOSE:
653       break;
654    default:
655       drm_log("invalid ioctl: %08x (%u)", req->cmd, iocnr);
656       return -EINVAL;
657    }
658 
659    struct msm_ccmd_ioctl_simple_rsp *rsp;
660    unsigned rsp_len = sizeof(*rsp);
661 
662    if (req->cmd & IOC_OUT)
663       rsp_len = size_add(rsp_len, payload_len);
664 
665    rsp = msm_context_rsp(mctx, hdr, rsp_len);
666 
667    if (!rsp)
668       return -ENOMEM;
669 
670    /* Copy the payload because the kernel can write (if IOC_OUT bit
671     * is set) and to avoid casting away the const:
672     */
673    char payload[payload_len];
674    memcpy(payload, req->payload, payload_len);
675 
676    rsp->ret = drmIoctl(mctx->fd, req->cmd, payload);
677 
678    if (req->cmd & IOC_OUT)
679       memcpy(rsp->payload, payload, payload_len);
680 
681    if (iocnr == DRM_MSM_SUBMITQUEUE_NEW && !rsp->ret) {
682       struct drm_msm_submitqueue *args = (void *)payload;
683 
684       drm_dbg("submitqueue %u, prio %u", args->id, args->prio);
685 
686       _mesa_hash_table_insert(mctx->sq_to_ring_idx_table, (void *)(uintptr_t)args->id,
687                               (void *)(uintptr_t)args->prio);
688    }
689 
690    return 0;
691 }
692 
693 static int
msm_ccmd_gem_new(struct msm_context * mctx,const struct msm_ccmd_req * hdr)694 msm_ccmd_gem_new(struct msm_context *mctx, const struct msm_ccmd_req *hdr)
695 {
696    const struct msm_ccmd_gem_new_req *req = to_msm_ccmd_gem_new_req(hdr);
697    int ret = 0;
698 
699    if (!valid_blob_id(mctx, req->blob_id)) {
700       drm_log("Invalid blob_id %u", req->blob_id);
701       ret = -EINVAL;
702       goto out_error;
703    }
704 
705    /*
706     * First part, allocate the GEM bo:
707     */
708    struct drm_msm_gem_new gem_new = {
709       .size = req->size,
710       .flags = req->flags,
711    };
712 
713    ret = drmCommandWriteRead(mctx->fd, DRM_MSM_GEM_NEW, &gem_new, sizeof(gem_new));
714    if (ret) {
715       drm_log("GEM_NEW failed: %d (%s)", ret, strerror(errno));
716       goto out_error;
717    }
718 
719    /*
720     * Second part, set the iova:
721     */
722    uint64_t iova = req->iova;
723    ret = gem_info(mctx, gem_new.handle, MSM_INFO_SET_IOVA, &iova);
724    if (ret) {
725       drm_log("SET_IOVA failed: %d (%s)", ret, strerror(errno));
726       goto out_close;
727    }
728 
729    /*
730     * And then finally create our msm_object for tracking the resource,
731     * and add to blob table:
732     */
733    struct msm_object *obj = msm_object_create(gem_new.handle, req->flags, req->size);
734 
735    if (!obj) {
736       ret = -ENOMEM;
737       goto out_close;
738    }
739 
740    msm_object_set_blob_id(mctx, obj, req->blob_id);
741 
742    drm_dbg("obj=%p, blob_id=%u, handle=%u, iova=%" PRIx64, obj, obj->blob_id,
743            obj->handle, iova);
744 
745    return 0;
746 
747 out_close:
748    gem_close(mctx->fd, gem_new.handle);
749 out_error:
750    if (mctx->shmem)
751       mctx->shmem->async_error++;
752    return ret;
753 }
754 
755 static int
msm_ccmd_gem_set_iova(struct msm_context * mctx,const struct msm_ccmd_req * hdr)756 msm_ccmd_gem_set_iova(struct msm_context *mctx, const struct msm_ccmd_req *hdr)
757 {
758    const struct msm_ccmd_gem_set_iova_req *req = to_msm_ccmd_gem_set_iova_req(hdr);
759    struct msm_object *obj = msm_get_object_from_res_id(mctx, req->res_id);
760    int ret = 0;
761 
762    if (!obj) {
763       drm_log("Could not lookup obj: res_id=%u", req->res_id);
764       ret = -ENOENT;
765       goto out_error;
766    }
767 
768    uint64_t iova = req->iova;
769    if (iova) {
770       TRACE_SCOPE_BEGIN("SET_IOVA");
771       ret = gem_info(mctx, obj->handle, MSM_INFO_SET_IOVA, &iova);
772       TRACE_SCOPE_END("SET_IOVA");
773    } else {
774       TRACE_SCOPE_BEGIN("CLEAR_IOVA");
775       ret = gem_info(mctx, obj->handle, MSM_INFO_SET_IOVA, &iova);
776       TRACE_SCOPE_END("CLEAR_IOVA");
777    }
778    if (ret) {
779       drm_log("SET_IOVA failed: %d (%s)", ret, strerror(errno));
780       goto out_error;
781    }
782 
783    drm_dbg("obj=%p, blob_id=%u, handle=%u, iova=%" PRIx64, obj, obj->blob_id,
784            obj->handle, iova);
785 
786    return 0;
787 
788 out_error:
789    if (mctx->shmem)
790       mctx->shmem->async_error++;
791    return 0;
792 }
793 
794 static int
msm_ccmd_gem_cpu_prep(struct msm_context * mctx,const struct msm_ccmd_req * hdr)795 msm_ccmd_gem_cpu_prep(struct msm_context *mctx, const struct msm_ccmd_req *hdr)
796 {
797    const struct msm_ccmd_gem_cpu_prep_req *req = to_msm_ccmd_gem_cpu_prep_req(hdr);
798    struct msm_ccmd_gem_cpu_prep_rsp *rsp = msm_context_rsp(mctx, hdr, sizeof(*rsp));
799 
800    if (!rsp)
801       return -ENOMEM;
802 
803    struct drm_msm_gem_cpu_prep args = {
804       .handle = handle_from_res_id(mctx, req->res_id),
805       .op = req->op | MSM_PREP_NOSYNC,
806    };
807 
808    rsp->ret = drmCommandWrite(mctx->fd, DRM_MSM_GEM_CPU_PREP, &args, sizeof(args));
809 
810    return 0;
811 }
812 
813 static int
msm_ccmd_gem_set_name(struct msm_context * mctx,const struct msm_ccmd_req * hdr)814 msm_ccmd_gem_set_name(struct msm_context *mctx, const struct msm_ccmd_req *hdr)
815 {
816    const struct msm_ccmd_gem_set_name_req *req = to_msm_ccmd_gem_set_name_req(hdr);
817 
818    struct drm_msm_gem_info args = {
819       .handle = handle_from_res_id(mctx, req->res_id),
820       .info = MSM_INFO_SET_NAME,
821       .value = VOID2U64(req->payload),
822       .len = req->len,
823    };
824 
825    if (!valid_payload_len(req))
826       return -EINVAL;
827 
828    int ret = drmCommandWrite(mctx->fd, DRM_MSM_GEM_INFO, &args, sizeof(args));
829    if (ret)
830       drm_log("ret=%d, len=%u, name=%.*s", ret, req->len, req->len, req->payload);
831 
832    return 0;
833 }
834 
835 static void
msm_dump_submit(struct drm_msm_gem_submit * req)836 msm_dump_submit(struct drm_msm_gem_submit *req)
837 {
838 #ifndef NDEBUG
839    drm_log("  flags=0x%x, queueid=%u", req->flags, req->queueid);
840    for (unsigned i = 0; i < req->nr_bos; i++) {
841       struct drm_msm_gem_submit_bo *bos = U642VOID(req->bos);
842       struct drm_msm_gem_submit_bo *bo = &bos[i];
843       drm_log("  bos[%d]: handle=%u, flags=%x", i, bo->handle, bo->flags);
844    }
845    for (unsigned i = 0; i < req->nr_cmds; i++) {
846       struct drm_msm_gem_submit_cmd *cmds = U642VOID(req->cmds);
847       struct drm_msm_gem_submit_cmd *cmd = &cmds[i];
848       drm_log("  cmd[%d]: type=%u, submit_idx=%u, submit_offset=%u, size=%u", i,
849               cmd->type, cmd->submit_idx, cmd->submit_offset, cmd->size);
850    }
851 #else
852    (void)req;
853 #endif
854 }
855 
856 static int
msm_ccmd_gem_submit(struct msm_context * mctx,const struct msm_ccmd_req * hdr)857 msm_ccmd_gem_submit(struct msm_context *mctx, const struct msm_ccmd_req *hdr)
858 {
859    const struct msm_ccmd_gem_submit_req *req = to_msm_ccmd_gem_submit_req(hdr);
860 
861    size_t sz = sizeof(*req);
862    sz = size_add(sz, size_mul(req->nr_bos,  sizeof(struct drm_msm_gem_submit_bo)));
863    sz = size_add(sz, size_mul(req->nr_cmds, sizeof(struct drm_msm_gem_submit_cmd)));
864 
865    /* Normally kernel would validate out of bounds situations and return -EFAULT,
866     * but since we are copying the bo handles, we need to validate that the
867     * guest can't trigger us to make an out of bounds memory access:
868     */
869    if (sz > hdr->len) {
870       drm_log("out of bounds: nr_bos=%u, nr_cmds=%u", req->nr_bos, req->nr_cmds);
871       return -ENOSPC;
872    }
873 
874    const unsigned bo_limit = 8192 / sizeof(struct drm_msm_gem_submit_bo);
875    bool bos_on_stack = req->nr_bos < bo_limit;
876    struct drm_msm_gem_submit_bo _bos[bos_on_stack ? req->nr_bos : 0];
877    struct drm_msm_gem_submit_bo *bos;
878 
879    if (bos_on_stack) {
880       bos = _bos;
881    } else {
882       bos = malloc(req->nr_bos * sizeof(bos[0]));
883       if (!bos)
884          return -ENOMEM;
885    }
886 
887    memcpy(bos, req->payload, req->nr_bos * sizeof(bos[0]));
888 
889    for (uint32_t i = 0; i < req->nr_bos; i++)
890       bos[i].handle = handle_from_res_id(mctx, bos[i].handle);
891 
892    struct drm_msm_gem_submit args = {
893       .flags = req->flags | MSM_SUBMIT_FENCE_FD_OUT | MSM_SUBMIT_FENCE_SN_IN,
894       .fence = req->fence,
895       .nr_bos = req->nr_bos,
896       .nr_cmds = req->nr_cmds,
897       .bos = VOID2U64(bos),
898       .cmds = VOID2U64(&req->payload[req->nr_bos * sizeof(struct drm_msm_gem_submit_bo)]),
899       .queueid = req->queue_id,
900    };
901 
902    int ret = drmCommandWriteRead(mctx->fd, DRM_MSM_GEM_SUBMIT, &args, sizeof(args));
903    drm_dbg("fence=%u, ret=%d", args.fence, ret);
904 
905    if (unlikely(ret)) {
906       drm_log("submit failed: %s", strerror(errno));
907       msm_dump_submit(&args);
908       if (mctx->shmem)
909          mctx->shmem->async_error++;
910    } else {
911       const struct hash_entry *entry =
912             table_search(mctx->sq_to_ring_idx_table, args.queueid);
913 
914       if (!entry) {
915          drm_log("unknown submitqueue: %u", args.queueid);
916          goto out;
917       }
918 
919       unsigned prio = (uintptr_t)entry->data;
920 
921       drm_timeline_set_last_fence_fd(&mctx->timelines[prio], args.fence_fd);
922    }
923 
924 out:
925    if (!bos_on_stack)
926       free(bos);
927    return 0;
928 }
929 
930 static int
map_object(struct msm_context * mctx,struct msm_object * obj)931 map_object(struct msm_context *mctx, struct msm_object *obj)
932 {
933    uint64_t offset;
934    int ret;
935 
936    if (obj->map)
937       return 0;
938 
939    uint32_t handle = handle_from_res_id(mctx, obj->res_id);
940    ret = gem_info(mctx, handle, MSM_INFO_GET_OFFSET, &offset);
941    if (ret) {
942       drm_log("alloc failed: %s", strerror(errno));
943       return ret;
944    }
945 
946    uint8_t *map =
947       mmap(0, obj->size, PROT_READ | PROT_WRITE, MAP_SHARED, mctx->fd, offset);
948    if (map == MAP_FAILED) {
949       drm_log("mmap failed: %s", strerror(errno));
950       return -ENOMEM;
951    }
952 
953    obj->map = map;
954 
955    return 0;
956 }
957 
958 static int
msm_ccmd_gem_upload(struct msm_context * mctx,const struct msm_ccmd_req * hdr)959 msm_ccmd_gem_upload(struct msm_context *mctx, const struct msm_ccmd_req *hdr)
960 {
961    const struct msm_ccmd_gem_upload_req *req = to_msm_ccmd_gem_upload_req(hdr);
962    int ret;
963 
964    if (req->pad || !valid_payload_len(req)) {
965       drm_log("Invalid upload ccmd");
966       return -EINVAL;
967    }
968 
969    struct msm_object *obj = msm_get_object_from_res_id(mctx, req->res_id);
970    if (!obj) {
971       drm_log("No obj: res_id=%u", req->res_id);
972       return -ENOENT;
973    }
974 
975    ret = map_object(mctx, obj);
976    if (ret)
977       return ret;
978 
979    memcpy(&obj->map[req->off], req->payload, req->len);
980 
981    return 0;
982 }
983 
984 static int
msm_ccmd_submitqueue_query(struct msm_context * mctx,const struct msm_ccmd_req * hdr)985 msm_ccmd_submitqueue_query(struct msm_context *mctx, const struct msm_ccmd_req *hdr)
986 {
987    const struct msm_ccmd_submitqueue_query_req *req =
988       to_msm_ccmd_submitqueue_query_req(hdr);
989    struct msm_ccmd_submitqueue_query_rsp *rsp =
990       msm_context_rsp(mctx, hdr, size_add(sizeof(*rsp), req->len));
991 
992    if (!rsp)
993       return -ENOMEM;
994 
995    struct drm_msm_submitqueue_query args = {
996       .data = VOID2U64(rsp->payload),
997       .id = req->queue_id,
998       .param = req->param,
999       .len = req->len,
1000    };
1001 
1002    rsp->ret =
1003       drmCommandWriteRead(mctx->fd, DRM_MSM_SUBMITQUEUE_QUERY, &args, sizeof(args));
1004 
1005    rsp->out_len = args.len;
1006 
1007    return 0;
1008 }
1009 
1010 static int
msm_ccmd_wait_fence(struct msm_context * mctx,const struct msm_ccmd_req * hdr)1011 msm_ccmd_wait_fence(struct msm_context *mctx, const struct msm_ccmd_req *hdr)
1012 {
1013    const struct msm_ccmd_wait_fence_req *req = to_msm_ccmd_wait_fence_req(hdr);
1014    struct msm_ccmd_wait_fence_rsp *rsp = msm_context_rsp(mctx, hdr, sizeof(*rsp));
1015 
1016    if (!rsp)
1017       return -ENOMEM;
1018 
1019    struct timespec t;
1020 
1021    /* Use current time as timeout, to avoid blocking: */
1022    clock_gettime(CLOCK_MONOTONIC, &t);
1023 
1024    struct drm_msm_wait_fence args = {
1025       .fence = req->fence,
1026       .queueid = req->queue_id,
1027       .timeout =
1028          {
1029             .tv_sec = t.tv_sec,
1030             .tv_nsec = t.tv_nsec,
1031          },
1032    };
1033 
1034    rsp->ret = drmCommandWrite(mctx->fd, DRM_MSM_WAIT_FENCE, &args, sizeof(args));
1035 
1036    return 0;
1037 }
1038 
1039 static int
msm_ccmd_set_debuginfo(struct msm_context * mctx,const struct msm_ccmd_req * hdr)1040 msm_ccmd_set_debuginfo(struct msm_context *mctx, const struct msm_ccmd_req *hdr)
1041 {
1042    const struct msm_ccmd_set_debuginfo_req *req = to_msm_ccmd_set_debuginfo_req(hdr);
1043 
1044    size_t sz = sizeof(*req);
1045    sz = size_add(sz, req->comm_len);
1046    sz = size_add(sz, req->cmdline_len);
1047 
1048    if (sz > hdr->len) {
1049       drm_log("out of bounds: comm_len=%u, cmdline_len=%u", req->comm_len, req->cmdline_len);
1050       return -ENOSPC;
1051    }
1052 
1053    struct drm_msm_param set_comm = {
1054       .pipe = MSM_PIPE_3D0,
1055       .param = MSM_PARAM_COMM,
1056       .value = VOID2U64(&req->payload[0]),
1057       .len = req->comm_len,
1058    };
1059 
1060    drmCommandWriteRead(mctx->fd, DRM_MSM_SET_PARAM, &set_comm, sizeof(set_comm));
1061 
1062    struct drm_msm_param set_cmdline = {
1063       .pipe = MSM_PIPE_3D0,
1064       .param = MSM_PARAM_CMDLINE,
1065       .value = VOID2U64(&req->payload[req->comm_len]),
1066       .len = req->cmdline_len,
1067    };
1068 
1069    drmCommandWriteRead(mctx->fd, DRM_MSM_SET_PARAM, &set_cmdline, sizeof(set_cmdline));
1070 
1071    return 0;
1072 }
1073 
1074 static const struct ccmd {
1075    const char *name;
1076    int (*handler)(struct msm_context *mctx, const struct msm_ccmd_req *hdr);
1077    size_t size;
1078 } ccmd_dispatch[] = {
1079 #define HANDLER(N, n)                                                                    \
1080    [MSM_CCMD_##N] = {#N, msm_ccmd_##n, sizeof(struct msm_ccmd_##n##_req)}
1081    HANDLER(NOP, nop),
1082    HANDLER(IOCTL_SIMPLE, ioctl_simple),
1083    HANDLER(GEM_NEW, gem_new),
1084    HANDLER(GEM_SET_IOVA, gem_set_iova),
1085    HANDLER(GEM_CPU_PREP, gem_cpu_prep),
1086    HANDLER(GEM_SET_NAME, gem_set_name),
1087    HANDLER(GEM_SUBMIT, gem_submit),
1088    HANDLER(GEM_UPLOAD, gem_upload),
1089    HANDLER(SUBMITQUEUE_QUERY, submitqueue_query),
1090    HANDLER(WAIT_FENCE, wait_fence),
1091    HANDLER(SET_DEBUGINFO, set_debuginfo),
1092 };
1093 
1094 static int
submit_cmd_dispatch(struct msm_context * mctx,const struct msm_ccmd_req * hdr)1095 submit_cmd_dispatch(struct msm_context *mctx, const struct msm_ccmd_req *hdr)
1096 {
1097    int ret;
1098 
1099    if (hdr->cmd >= ARRAY_SIZE(ccmd_dispatch)) {
1100       drm_log("invalid cmd: %u", hdr->cmd);
1101       return -EINVAL;
1102    }
1103 
1104    const struct ccmd *ccmd = &ccmd_dispatch[hdr->cmd];
1105 
1106    if (!ccmd->handler) {
1107       drm_log("no handler: %u", hdr->cmd);
1108       return -EINVAL;
1109    }
1110 
1111    drm_dbg("%s: hdr={cmd=%u, len=%u, seqno=%u, rsp_off=0x%x)", ccmd->name, hdr->cmd,
1112            hdr->len, hdr->seqno, hdr->rsp_off);
1113 
1114    TRACE_SCOPE_BEGIN(ccmd->name);
1115 
1116    /* If the request length from the guest is smaller than the expected
1117     * size, ie. newer host and older guest, we need to make a copy of
1118     * the request with the new fields at the end zero initialized.
1119     */
1120    if (ccmd->size > hdr->len) {
1121       uint8_t buf[ccmd->size];
1122 
1123       memcpy(&buf[0], hdr, hdr->len);
1124       memset(&buf[hdr->len], 0, ccmd->size - hdr->len);
1125 
1126       ret = ccmd->handler(mctx, (struct msm_ccmd_req *)buf);
1127    } else {
1128       ret = ccmd->handler(mctx, hdr);
1129    }
1130 
1131    TRACE_SCOPE_END(ccmd->name);
1132 
1133    if (ret) {
1134       drm_log("%s: dispatch failed: %d (%s)", ccmd->name, ret, strerror(errno));
1135       return ret;
1136    }
1137 
1138    /* If the response length from the guest is smaller than the
1139     * expected size, ie. newer host and older guest, then a shadow
1140     * copy is used, and we need to copy back to the actual rsp
1141     * buffer.
1142     */
1143    struct msm_ccmd_rsp *rsp = msm_context_rsp_noshadow(mctx, hdr);
1144    if (mctx->current_rsp && (mctx->current_rsp != rsp)) {
1145       unsigned len = rsp->len;
1146       memcpy(rsp, mctx->current_rsp, len);
1147       rsp->len = len;
1148       free(mctx->current_rsp);
1149    }
1150    mctx->current_rsp = NULL;
1151 
1152    /* Note that commands with no response, like SET_DEBUGINFO, could
1153     * be sent before the shmem buffer is allocated:
1154     */
1155    if (mctx->shmem) {
1156       /* TODO better way to do this?  We need ACQ_REL semanatics (AFAIU)
1157        * to ensure that writes to response buffer are visible to the
1158        * guest process before the update of the seqno.  Otherwise we
1159        * could just use p_atomic_set.
1160        */
1161       uint32_t seqno = hdr->seqno;
1162       p_atomic_xchg(&mctx->shmem->seqno, seqno);
1163    }
1164 
1165    return 0;
1166 }
1167 
1168 static int
msm_renderer_submit_cmd(struct virgl_context * vctx,const void * _buffer,size_t size)1169 msm_renderer_submit_cmd(struct virgl_context *vctx, const void *_buffer, size_t size)
1170 {
1171    struct msm_context *mctx = to_msm_context(vctx);
1172    const uint8_t *buffer = _buffer;
1173 
1174    while (size >= sizeof(struct msm_ccmd_req)) {
1175       const struct msm_ccmd_req *hdr = (const struct msm_ccmd_req *)buffer;
1176 
1177       /* Sanity check first: */
1178       if ((hdr->len > size) || (hdr->len < sizeof(*hdr)) || (hdr->len % 4)) {
1179          drm_log("bad size, %u vs %zu (%u)", hdr->len, size, hdr->cmd);
1180          return -EINVAL;
1181       }
1182 
1183       if (hdr->rsp_off % 4) {
1184          drm_log("bad rsp_off, %u", hdr->rsp_off);
1185          return -EINVAL;
1186       }
1187 
1188       int ret = submit_cmd_dispatch(mctx, hdr);
1189       if (ret) {
1190          drm_log("dispatch failed: %d (%u)", ret, hdr->cmd);
1191          return ret;
1192       }
1193 
1194       buffer += hdr->len;
1195       size -= hdr->len;
1196    }
1197 
1198    if (size > 0) {
1199       drm_log("bad size, %zu trailing bytes", size);
1200       return -EINVAL;
1201    }
1202 
1203    return 0;
1204 }
1205 
1206 static int
msm_renderer_get_fencing_fd(struct virgl_context * vctx)1207 msm_renderer_get_fencing_fd(struct virgl_context *vctx)
1208 {
1209    struct msm_context *mctx = to_msm_context(vctx);
1210    return mctx->eventfd;
1211 }
1212 
1213 static void
msm_renderer_retire_fences(UNUSED struct virgl_context * vctx)1214 msm_renderer_retire_fences(UNUSED struct virgl_context *vctx)
1215 {
1216    /* No-op as VIRGL_RENDERER_ASYNC_FENCE_CB is required */
1217 }
1218 
1219 static int
msm_renderer_submit_fence(struct virgl_context * vctx,uint32_t flags,uint32_t ring_idx,uint64_t fence_id)1220 msm_renderer_submit_fence(struct virgl_context *vctx, uint32_t flags, uint32_t ring_idx,
1221                           uint64_t fence_id)
1222 {
1223    struct msm_context *mctx = to_msm_context(vctx);
1224 
1225    drm_dbg("flags=0x%x, ring_idx=%" PRIu32 ", fence_id=%" PRIu64, flags,
1226            ring_idx, fence_id);
1227 
1228    /* timeline is ring_idx-1 (because ring_idx 0 is host CPU timeline) */
1229    if (ring_idx > nr_timelines) {
1230       drm_log("invalid ring_idx: %" PRIu32, ring_idx);
1231       return -EINVAL;
1232    }
1233 
1234    /* ring_idx zero is used for the guest to synchronize with host CPU,
1235     * meaning by the time ->submit_fence() is called, the fence has
1236     * already passed.. so just immediate signal:
1237     */
1238    if (ring_idx == 0) {
1239       vctx->fence_retire(vctx, ring_idx, fence_id);
1240       return 0;
1241    }
1242 
1243    return drm_timeline_submit_fence(&mctx->timelines[ring_idx - 1], flags, fence_id);
1244 }
1245 
1246 struct virgl_context *
msm_renderer_create(int fd)1247 msm_renderer_create(int fd)
1248 {
1249    struct msm_context *mctx;
1250 
1251    drm_log("");
1252 
1253    mctx = calloc(1, sizeof(*mctx) + (nr_timelines * sizeof(mctx->timelines[0])));
1254    if (!mctx)
1255       return NULL;
1256 
1257    mctx->fd = fd;
1258 
1259    /* Indexed by blob_id, but only lower 32b of blob_id are used: */
1260    mctx->blob_table = _mesa_hash_table_create_u32_keys(NULL);
1261    /* Indexed by res_id: */
1262    mctx->resource_table = _mesa_hash_table_create_u32_keys(NULL);
1263    /* Indexed by submitqueue-id: */
1264    mctx->sq_to_ring_idx_table = _mesa_hash_table_create_u32_keys(NULL);
1265 
1266    mctx->eventfd = create_eventfd(0);
1267 
1268    for (unsigned i = 0; i < nr_timelines; i++) {
1269       unsigned ring_idx = i + 1; /* ring_idx 0 is host CPU */
1270       drm_timeline_init(&mctx->timelines[i], &mctx->base, "msm-sync", mctx->eventfd,
1271                         ring_idx);
1272    }
1273 
1274    mctx->base.destroy = msm_renderer_destroy;
1275    mctx->base.attach_resource = msm_renderer_attach_resource;
1276    mctx->base.detach_resource = msm_renderer_detach_resource;
1277    mctx->base.export_opaque_handle = msm_renderer_export_opaque_handle;
1278    mctx->base.transfer_3d = msm_renderer_transfer_3d;
1279    mctx->base.get_blob = msm_renderer_get_blob;
1280    mctx->base.submit_cmd = msm_renderer_submit_cmd;
1281    mctx->base.get_fencing_fd = msm_renderer_get_fencing_fd;
1282    mctx->base.retire_fences = msm_renderer_retire_fences;
1283    mctx->base.submit_fence = msm_renderer_submit_fence;
1284 
1285    return &mctx->base;
1286 }
1287