xref: /aosp_15_r20/external/virglrenderer/src/venus/vkr_context.c (revision bbecb9d118dfdb95f99bd754f8fa9be01f189df3)
1 /*
2  * Copyright 2020 Google LLC
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "vkr_context.h"
7 
8 #include <sys/mman.h>
9 #include <sys/types.h>
10 #include <unistd.h>
11 
12 #include "pipe/p_state.h"
13 #include "util/anon_file.h"
14 #include "venus-protocol/vn_protocol_renderer_dispatches.h"
15 
16 #define XXH_INLINE_ALL
17 #include "util/xxhash.h"
18 
19 #include "vkr_buffer.h"
20 #include "vkr_command_buffer.h"
21 #include "vkr_context.h"
22 #include "vkr_cs.h"
23 #include "vkr_descriptor_set.h"
24 #include "vkr_device.h"
25 #include "vkr_device_memory.h"
26 #include "vkr_image.h"
27 #include "vkr_instance.h"
28 #include "vkr_physical_device.h"
29 #include "vkr_pipeline.h"
30 #include "vkr_query_pool.h"
31 #include "vkr_queue.h"
32 #include "vkr_render_pass.h"
33 #include "vkr_ring.h"
34 #include "vkr_transport.h"
35 
36 void
vkr_context_add_instance(struct vkr_context * ctx,struct vkr_instance * instance,const char * name)37 vkr_context_add_instance(struct vkr_context *ctx,
38                          struct vkr_instance *instance,
39                          const char *name)
40 {
41    vkr_context_add_object(ctx, &instance->base);
42 
43    assert(!ctx->instance);
44    ctx->instance = instance;
45 
46    if (name && name[0] != '\0') {
47       assert(!ctx->instance_name);
48       ctx->instance_name = strdup(name);
49    }
50 }
51 
52 void
vkr_context_remove_instance(struct vkr_context * ctx,struct vkr_instance * instance)53 vkr_context_remove_instance(struct vkr_context *ctx, struct vkr_instance *instance)
54 {
55    assert(ctx->instance && ctx->instance == instance);
56    ctx->instance = NULL;
57 
58    if (ctx->instance_name) {
59       free(ctx->instance_name);
60       ctx->instance_name = NULL;
61    }
62 
63    vkr_context_remove_object(ctx, &instance->base);
64 }
65 
66 static void
vkr_dispatch_debug_log(UNUSED struct vn_dispatch_context * dispatch,const char * msg)67 vkr_dispatch_debug_log(UNUSED struct vn_dispatch_context *dispatch, const char *msg)
68 {
69    vkr_log(msg);
70 }
71 
72 static void
vkr_context_init_dispatch(struct vkr_context * ctx)73 vkr_context_init_dispatch(struct vkr_context *ctx)
74 {
75    struct vn_dispatch_context *dispatch = &ctx->dispatch;
76 
77    dispatch->data = ctx;
78    dispatch->debug_log = vkr_dispatch_debug_log;
79 
80    dispatch->encoder = (struct vn_cs_encoder *)&ctx->encoder;
81    dispatch->decoder = (struct vn_cs_decoder *)&ctx->decoder;
82 
83    vkr_context_init_transport_dispatch(ctx);
84 
85    vkr_context_init_instance_dispatch(ctx);
86    vkr_context_init_physical_device_dispatch(ctx);
87    vkr_context_init_device_dispatch(ctx);
88 
89    vkr_context_init_queue_dispatch(ctx);
90    vkr_context_init_fence_dispatch(ctx);
91    vkr_context_init_semaphore_dispatch(ctx);
92    vkr_context_init_event_dispatch(ctx);
93 
94    vkr_context_init_device_memory_dispatch(ctx);
95 
96    vkr_context_init_buffer_dispatch(ctx);
97    vkr_context_init_buffer_view_dispatch(ctx);
98 
99    vkr_context_init_image_dispatch(ctx);
100    vkr_context_init_image_view_dispatch(ctx);
101    vkr_context_init_sampler_dispatch(ctx);
102    vkr_context_init_sampler_ycbcr_conversion_dispatch(ctx);
103 
104    vkr_context_init_descriptor_set_layout_dispatch(ctx);
105    vkr_context_init_descriptor_pool_dispatch(ctx);
106    vkr_context_init_descriptor_set_dispatch(ctx);
107    vkr_context_init_descriptor_update_template_dispatch(ctx);
108 
109    vkr_context_init_render_pass_dispatch(ctx);
110    vkr_context_init_framebuffer_dispatch(ctx);
111 
112    vkr_context_init_query_pool_dispatch(ctx);
113 
114    vkr_context_init_shader_module_dispatch(ctx);
115    vkr_context_init_pipeline_layout_dispatch(ctx);
116    vkr_context_init_pipeline_cache_dispatch(ctx);
117    vkr_context_init_pipeline_dispatch(ctx);
118 
119    vkr_context_init_command_pool_dispatch(ctx);
120    vkr_context_init_command_buffer_dispatch(ctx);
121 }
122 
123 static struct vkr_cpu_sync *
vkr_alloc_cpu_sync(uint32_t flags,uint32_t ring_idx,uint64_t fence_id)124 vkr_alloc_cpu_sync(uint32_t flags, uint32_t ring_idx, uint64_t fence_id)
125 {
126    struct vkr_cpu_sync *sync;
127    sync = malloc(sizeof(*sync));
128    if (!sync)
129       return NULL;
130 
131    sync->flags = flags;
132    sync->fence_id = fence_id;
133    sync->ring_idx = ring_idx;
134    list_inithead(&sync->head);
135 
136    return sync;
137 }
138 
139 static int
vkr_context_submit_fence_locked(struct virgl_context * base,uint32_t flags,uint32_t ring_idx,uint64_t fence_id)140 vkr_context_submit_fence_locked(struct virgl_context *base,
141                                 uint32_t flags,
142                                 uint32_t ring_idx,
143                                 uint64_t fence_id)
144 {
145    struct vkr_context *ctx = (struct vkr_context *)base;
146    VkResult result;
147 
148    if (ring_idx >= ARRAY_SIZE(ctx->sync_queues)) {
149       vkr_log("invalid sync ring_idx %u", ring_idx);
150       return -EINVAL;
151    }
152 
153    if (ring_idx == 0) {
154       if (vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB) {
155          ctx->base.fence_retire(&ctx->base, ring_idx, fence_id);
156       } else {
157          struct vkr_cpu_sync *sync = vkr_alloc_cpu_sync(flags, ring_idx, fence_id);
158          if (!sync)
159             return -ENOMEM;
160 
161          list_addtail(&sync->head, &ctx->signaled_cpu_syncs);
162       }
163       return 0;
164    } else if (!ctx->sync_queues[ring_idx]) {
165       vkr_log("invalid ring_idx %u", ring_idx);
166       return -EINVAL;
167    }
168 
169    struct vkr_queue *queue = ctx->sync_queues[ring_idx];
170    struct vkr_device *dev = queue->device;
171    struct vn_device_proc_table *vk = &dev->proc_table;
172 
173    struct vkr_queue_sync *sync =
174       vkr_device_alloc_queue_sync(dev, flags, ring_idx, fence_id);
175    if (!sync)
176       return -ENOMEM;
177 
178    result = vk->QueueSubmit(queue->base.handle.queue, 0, NULL, sync->fence);
179    if (result == VK_ERROR_DEVICE_LOST) {
180       sync->device_lost = true;
181    } else if (result != VK_SUCCESS) {
182       vkr_device_free_queue_sync(dev, sync);
183       return -1;
184    }
185 
186    if (vkr_renderer_flags & VKR_RENDERER_THREAD_SYNC) {
187       mtx_lock(&queue->mutex);
188       list_addtail(&sync->head, &queue->pending_syncs);
189       mtx_unlock(&queue->mutex);
190       cnd_signal(&queue->cond);
191    } else {
192       list_addtail(&sync->head, &queue->pending_syncs);
193    }
194 
195    if (LIST_IS_EMPTY(&queue->busy_head))
196       list_addtail(&queue->busy_head, &ctx->busy_queues);
197 
198    return 0;
199 }
200 
201 static int
vkr_context_submit_fence(struct virgl_context * base,uint32_t flags,uint32_t ring_idx,uint64_t fence_id)202 vkr_context_submit_fence(struct virgl_context *base,
203                          uint32_t flags,
204                          uint32_t ring_idx,
205                          uint64_t fence_id)
206 {
207    struct vkr_context *ctx = (struct vkr_context *)base;
208    int ret;
209 
210    /* always merge fences */
211    assert(!(flags & ~VIRGL_RENDERER_FENCE_FLAG_MERGEABLE));
212    flags = VIRGL_RENDERER_FENCE_FLAG_MERGEABLE;
213 
214    mtx_lock(&ctx->mutex);
215    ret = vkr_context_submit_fence_locked(base, flags, ring_idx, fence_id);
216    mtx_unlock(&ctx->mutex);
217    return ret;
218 }
219 
220 static void
vkr_context_retire_fences_locked(struct virgl_context * base)221 vkr_context_retire_fences_locked(struct virgl_context *base)
222 {
223    struct vkr_context *ctx = (struct vkr_context *)base;
224    struct vkr_queue_sync *sync, *sync_tmp;
225    struct vkr_queue *queue, *queue_tmp;
226 
227    assert(!(vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB));
228 
229    /* retire syncs from destroyed devices */
230    LIST_FOR_EACH_ENTRY_SAFE (sync, sync_tmp, &ctx->signaled_syncs, head) {
231       /* ring_idx might have already get reused but is opaque to the clients */
232       ctx->base.fence_retire(&ctx->base, sync->ring_idx, sync->fence_id);
233       free(sync);
234    }
235    list_inithead(&ctx->signaled_syncs);
236 
237    /* retire syncs from CPU timeline */
238    struct vkr_cpu_sync *cpu_sync, *cpu_sync_tmp;
239    LIST_FOR_EACH_ENTRY_SAFE (cpu_sync, cpu_sync_tmp, &ctx->signaled_cpu_syncs, head) {
240       ctx->base.fence_retire(&ctx->base, cpu_sync->ring_idx, cpu_sync->fence_id);
241       free(cpu_sync);
242    }
243    list_inithead(&ctx->signaled_cpu_syncs);
244 
245    /* flush first and once because the per-queue sync threads might write to
246     * it any time
247     */
248    if (ctx->fence_eventfd >= 0)
249       flush_eventfd(ctx->fence_eventfd);
250 
251    LIST_FOR_EACH_ENTRY_SAFE (queue, queue_tmp, &ctx->busy_queues, busy_head) {
252       struct vkr_device *dev = queue->device;
253       struct list_head retired_syncs;
254       bool queue_empty;
255 
256       vkr_queue_get_signaled_syncs(queue, &retired_syncs, &queue_empty);
257 
258       LIST_FOR_EACH_ENTRY_SAFE (sync, sync_tmp, &retired_syncs, head) {
259          ctx->base.fence_retire(&ctx->base, sync->ring_idx, sync->fence_id);
260          vkr_device_free_queue_sync(dev, sync);
261       }
262 
263       if (queue_empty)
264          list_delinit(&queue->busy_head);
265    }
266 }
267 
268 static void
vkr_context_retire_fences(struct virgl_context * base)269 vkr_context_retire_fences(struct virgl_context *base)
270 {
271    struct vkr_context *ctx = (struct vkr_context *)base;
272 
273    if (vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB)
274       return;
275 
276    mtx_lock(&ctx->mutex);
277    vkr_context_retire_fences_locked(base);
278    mtx_unlock(&ctx->mutex);
279 }
280 
281 static int
vkr_context_get_fencing_fd(struct virgl_context * base)282 vkr_context_get_fencing_fd(struct virgl_context *base)
283 {
284    struct vkr_context *ctx = (struct vkr_context *)base;
285    return ctx->fence_eventfd;
286 }
287 
288 static int
vkr_context_submit_cmd(struct virgl_context * base,const void * buffer,size_t size)289 vkr_context_submit_cmd(struct virgl_context *base, const void *buffer, size_t size)
290 {
291    struct vkr_context *ctx = (struct vkr_context *)base;
292    int ret = 0;
293 
294    mtx_lock(&ctx->mutex);
295 
296    /* CS error is considered fatal (destroy the context?) */
297    if (vkr_cs_decoder_get_fatal(&ctx->decoder)) {
298       mtx_unlock(&ctx->mutex);
299       return -EINVAL;
300    }
301 
302    vkr_cs_decoder_set_stream(&ctx->decoder, buffer, size);
303 
304    while (vkr_cs_decoder_has_command(&ctx->decoder)) {
305       vn_dispatch_command(&ctx->dispatch);
306       if (vkr_cs_decoder_get_fatal(&ctx->decoder)) {
307          ret = -EINVAL;
308          break;
309       }
310    }
311 
312    vkr_cs_decoder_reset(&ctx->decoder);
313 
314    mtx_unlock(&ctx->mutex);
315 
316    return ret;
317 }
318 
319 static int
vkr_context_get_blob_locked(struct virgl_context * base,uint64_t blob_id,uint64_t blob_size,uint32_t flags,struct virgl_context_blob * blob)320 vkr_context_get_blob_locked(struct virgl_context *base,
321                             uint64_t blob_id,
322                             uint64_t blob_size,
323                             uint32_t flags,
324                             struct virgl_context_blob *blob)
325 {
326    struct vkr_context *ctx = (struct vkr_context *)base;
327    struct vkr_device_memory *mem;
328    enum virgl_resource_fd_type fd_type = VIRGL_RESOURCE_FD_INVALID;
329    int fd = -1;
330 
331    /* blob_id == 0 does not refer to an existing VkDeviceMemory, but implies a
332     * shm allocation.  It serves a similar purpose as iov does, but it is
333     * logically contiguous and it can be exported.
334     */
335    if (!blob_id && flags == VIRGL_RENDERER_BLOB_FLAG_USE_MAPPABLE) {
336       fd = os_create_anonymous_file(blob_size, "vkr-shmem");
337       if (fd < 0)
338          return -ENOMEM;
339 
340       blob->type = VIRGL_RESOURCE_FD_SHM;
341       blob->u.fd = fd;
342       blob->map_info = VIRGL_RENDERER_MAP_CACHE_CACHED;
343       return 0;
344    }
345 
346    mem = vkr_context_get_object(ctx, blob_id);
347    if (!mem || mem->base.type != VK_OBJECT_TYPE_DEVICE_MEMORY)
348       return -EINVAL;
349 
350    /* a memory can only be exported once; we don't want two resources to point
351     * to the same storage.
352     */
353    if (mem->exported)
354       return -EINVAL;
355 
356    if (!mem->valid_fd_types)
357       return -EINVAL;
358 
359    if (flags & VIRGL_RENDERER_BLOB_FLAG_USE_MAPPABLE) {
360       const bool host_visible = mem->property_flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
361       if (!host_visible)
362          return -EINVAL;
363    }
364 
365    if (flags & VIRGL_RENDERER_BLOB_FLAG_USE_CROSS_DEVICE) {
366       if (!(mem->valid_fd_types & (1 << VIRGL_RESOURCE_FD_DMABUF)))
367          return -EINVAL;
368 
369       fd_type = VIRGL_RESOURCE_FD_DMABUF;
370    }
371 
372    if (fd_type == VIRGL_RESOURCE_FD_INVALID) {
373       /* prefer dmabuf for easier mapping?  prefer opaque for performance? */
374       if (mem->valid_fd_types & (1 << VIRGL_RESOURCE_FD_DMABUF))
375          fd_type = VIRGL_RESOURCE_FD_DMABUF;
376       else if (mem->valid_fd_types & (1 << VIRGL_RESOURCE_FD_OPAQUE))
377          fd_type = VIRGL_RESOURCE_FD_OPAQUE;
378    }
379 
380    if (fd_type != VIRGL_RESOURCE_FD_INVALID) {
381       VkExternalMemoryHandleTypeFlagBits handle_type;
382       int ret;
383 
384       switch (fd_type) {
385       case VIRGL_RESOURCE_FD_DMABUF:
386          handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
387          break;
388       case VIRGL_RESOURCE_FD_OPAQUE:
389          handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
390          assert(sizeof(blob->opaque_fd_metadata.driver_uuid) == VK_UUID_SIZE);
391          memcpy(blob->opaque_fd_metadata.driver_uuid,
392                 mem->device->physical_device->id_properties.driverUUID, VK_UUID_SIZE);
393          memcpy(blob->opaque_fd_metadata.device_uuid,
394                 mem->device->physical_device->id_properties.deviceUUID, VK_UUID_SIZE);
395          blob->opaque_fd_metadata.allocation_size = mem->allocation_size;
396          blob->opaque_fd_metadata.memory_type_index = mem->memory_type_index;
397          break;
398       default:
399          return -EINVAL;
400       }
401 
402       ret = vkr_device_memory_export_fd(mem, handle_type, &fd);
403       if (ret)
404          return ret;
405 
406       if (fd_type == VIRGL_RESOURCE_FD_DMABUF &&
407           (uint64_t)lseek(fd, 0, SEEK_END) < blob_size) {
408          close(fd);
409          return -EINVAL;
410       }
411 
412       mem->exported = true;
413    }
414 
415    blob->type = fd_type;
416    blob->u.fd = fd;
417 
418    if (flags & VIRGL_RENDERER_BLOB_FLAG_USE_MAPPABLE) {
419       const bool host_coherent =
420          mem->property_flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
421       const bool host_cached = mem->property_flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
422 
423       /* XXX guessed */
424       if (host_coherent) {
425          blob->map_info =
426             host_cached ? VIRGL_RENDERER_MAP_CACHE_CACHED : VIRGL_RENDERER_MAP_CACHE_WC;
427       } else {
428          blob->map_info = VIRGL_RENDERER_MAP_CACHE_WC;
429       }
430    } else {
431       blob->map_info = VIRGL_RENDERER_MAP_CACHE_NONE;
432    }
433 
434    return 0;
435 }
436 
437 static int
vkr_context_get_blob(struct virgl_context * base,UNUSED uint32_t res_id,uint64_t blob_id,uint64_t blob_size,uint32_t flags,struct virgl_context_blob * blob)438 vkr_context_get_blob(struct virgl_context *base,
439                      UNUSED uint32_t res_id,
440                      uint64_t blob_id,
441                      uint64_t blob_size,
442                      uint32_t flags,
443                      struct virgl_context_blob *blob)
444 {
445    struct vkr_context *ctx = (struct vkr_context *)base;
446    int ret;
447 
448    mtx_lock(&ctx->mutex);
449    ret = vkr_context_get_blob_locked(base, blob_id, blob_size, flags, blob);
450    mtx_unlock(&ctx->mutex);
451 
452    return ret;
453 }
454 
455 static int
vkr_context_transfer_3d(struct virgl_context * base,struct virgl_resource * res,UNUSED const struct vrend_transfer_info * info,UNUSED int transfer_mode)456 vkr_context_transfer_3d(struct virgl_context *base,
457                         struct virgl_resource *res,
458                         UNUSED const struct vrend_transfer_info *info,
459                         UNUSED int transfer_mode)
460 {
461    struct vkr_context *ctx = (struct vkr_context *)base;
462 
463    vkr_log("no transfer support for ctx %d and res %d", ctx->base.ctx_id, res->res_id);
464    return -1;
465 }
466 
467 static void
vkr_context_attach_resource_locked(struct virgl_context * base,struct virgl_resource * res)468 vkr_context_attach_resource_locked(struct virgl_context *base, struct virgl_resource *res)
469 {
470    struct vkr_context *ctx = (struct vkr_context *)base;
471    struct vkr_resource_attachment *att;
472 
473    att = vkr_context_get_resource(ctx, res->res_id);
474    if (att) {
475       assert(att->resource == res);
476       return;
477    }
478 
479    att = calloc(1, sizeof(*att));
480    if (!att)
481       return;
482 
483    void *mmap_ptr = NULL;
484    if (res->fd_type == VIRGL_RESOURCE_FD_SHM) {
485       mmap_ptr =
486          mmap(NULL, res->map_size, PROT_WRITE | PROT_READ, MAP_SHARED, res->fd, 0);
487       if (mmap_ptr == MAP_FAILED) {
488          free(att);
489          return;
490       }
491    }
492 
493    att->resource = res;
494 
495    if (mmap_ptr) {
496       att->shm_iov.iov_base = mmap_ptr;
497       att->shm_iov.iov_len = res->map_size;
498       att->iov = &att->shm_iov;
499       att->iov_count = 1;
500    } else {
501       att->iov = res->iov;
502       att->iov_count = res->iov_count;
503    }
504 
505    vkr_context_add_resource(ctx, att);
506 }
507 
508 static void
vkr_context_attach_resource(struct virgl_context * base,struct virgl_resource * res)509 vkr_context_attach_resource(struct virgl_context *base, struct virgl_resource *res)
510 {
511    struct vkr_context *ctx = (struct vkr_context *)base;
512    mtx_lock(&ctx->mutex);
513    vkr_context_attach_resource_locked(base, res);
514    mtx_unlock(&ctx->mutex);
515 }
516 
517 static void
vkr_context_detach_resource(struct virgl_context * base,struct virgl_resource * res)518 vkr_context_detach_resource(struct virgl_context *base, struct virgl_resource *res)
519 {
520    struct vkr_context *ctx = (struct vkr_context *)base;
521 
522    mtx_lock(&ctx->mutex);
523 
524    const struct vkr_resource_attachment *att = ctx->encoder.stream.attachment;
525    if (att && att->resource == res) {
526       /* TODO vkSetReplyCommandStreamMESA should support res_id 0 to unset.
527        * Until then, and until we can ignore older guests, treat this as
528        * non-fatal
529        */
530       vkr_cs_encoder_set_stream(&ctx->encoder, NULL, 0, 0);
531    }
532 
533    struct vkr_ring *ring, *ring_tmp;
534    LIST_FOR_EACH_ENTRY_SAFE (ring, ring_tmp, &ctx->rings, head) {
535       if (ring->attachment->resource != res)
536          continue;
537 
538       vkr_cs_decoder_set_fatal(&ctx->decoder);
539       mtx_unlock(&ctx->mutex);
540 
541       vkr_ring_stop(ring);
542 
543       mtx_lock(&ctx->mutex);
544       vkr_ring_destroy(ring);
545    }
546 
547    if (res->fd_type == VIRGL_RESOURCE_FD_SHM) {
548       struct vkr_resource_attachment *att = vkr_context_get_resource(ctx, res->res_id);
549       if (att)
550          munmap(att->shm_iov.iov_base, att->shm_iov.iov_len);
551    }
552 
553    vkr_context_remove_resource(ctx, res->res_id);
554 
555    mtx_unlock(&ctx->mutex);
556 }
557 
558 static void
vkr_context_destroy(struct virgl_context * base)559 vkr_context_destroy(struct virgl_context *base)
560 {
561    /* TODO Move the entire teardown process to a separate thread so that the main thread
562     * cannot get blocked by the vkDeviceWaitIdle upon device destruction.
563     */
564    struct vkr_context *ctx = (struct vkr_context *)base;
565 
566    struct vkr_ring *ring, *ring_tmp;
567    LIST_FOR_EACH_ENTRY_SAFE (ring, ring_tmp, &ctx->rings, head) {
568       vkr_ring_stop(ring);
569       vkr_ring_destroy(ring);
570    }
571 
572    if (ctx->instance) {
573       vkr_log("destroying context %d (%s) with a valid instance", ctx->base.ctx_id,
574               vkr_context_get_name(ctx));
575 
576       vkr_instance_destroy(ctx, ctx->instance);
577    }
578 
579    _mesa_hash_table_destroy(ctx->resource_table, vkr_context_free_resource);
580    _mesa_hash_table_destroy(ctx->object_table, vkr_context_free_object);
581 
582    struct vkr_queue_sync *sync, *tmp;
583    LIST_FOR_EACH_ENTRY_SAFE (sync, tmp, &ctx->signaled_syncs, head)
584       free(sync);
585 
586    struct vkr_queue_sync *cpu_sync, *cpu_sync_tmp;
587    LIST_FOR_EACH_ENTRY_SAFE (cpu_sync, cpu_sync_tmp, &ctx->signaled_cpu_syncs, head)
588       free(cpu_sync);
589 
590    if (ctx->fence_eventfd >= 0)
591       close(ctx->fence_eventfd);
592 
593    vkr_cs_decoder_fini(&ctx->decoder);
594 
595    mtx_destroy(&ctx->mutex);
596    free(ctx->debug_name);
597    free(ctx);
598 }
599 
600 static void
vkr_context_init_base(struct vkr_context * ctx)601 vkr_context_init_base(struct vkr_context *ctx)
602 {
603    ctx->base.destroy = vkr_context_destroy;
604    ctx->base.attach_resource = vkr_context_attach_resource;
605    ctx->base.detach_resource = vkr_context_detach_resource;
606    ctx->base.transfer_3d = vkr_context_transfer_3d;
607    ctx->base.get_blob = vkr_context_get_blob;
608    ctx->base.submit_cmd = vkr_context_submit_cmd;
609 
610    ctx->base.get_fencing_fd = vkr_context_get_fencing_fd;
611    ctx->base.retire_fences = vkr_context_retire_fences;
612    ctx->base.submit_fence = vkr_context_submit_fence;
613 }
614 
615 static uint32_t
vkr_hash_u64(const void * key)616 vkr_hash_u64(const void *key)
617 {
618    return XXH32(key, sizeof(uint64_t), 0);
619 }
620 
621 static bool
vkr_key_u64_equal(const void * key1,const void * key2)622 vkr_key_u64_equal(const void *key1, const void *key2)
623 {
624    return *(const uint64_t *)key1 == *(const uint64_t *)key2;
625 }
626 
627 void
vkr_context_free_object(struct hash_entry * entry)628 vkr_context_free_object(struct hash_entry *entry)
629 {
630    struct vkr_object *obj = entry->data;
631    free(obj);
632 }
633 
634 void
vkr_context_free_resource(struct hash_entry * entry)635 vkr_context_free_resource(struct hash_entry *entry)
636 {
637    struct vkr_resource_attachment *att = entry->data;
638    free(att);
639 }
640 
641 struct virgl_context *
vkr_context_create(size_t debug_len,const char * debug_name)642 vkr_context_create(size_t debug_len, const char *debug_name)
643 {
644    struct vkr_context *ctx;
645 
646    ctx = calloc(1, sizeof(*ctx));
647    if (!ctx)
648       return NULL;
649 
650    ctx->debug_name = malloc(debug_len + 1);
651    if (!ctx->debug_name)
652       goto err_debug_name;
653 
654    memcpy(ctx->debug_name, debug_name, debug_len);
655    ctx->debug_name[debug_len] = '\0';
656 
657 #ifdef ENABLE_VENUS_VALIDATE
658    ctx->validate_level = VKR_CONTEXT_VALIDATE_ON;
659    ctx->validate_fatal = false; /* TODO set this to true */
660 #else
661    ctx->validate_level = VKR_CONTEXT_VALIDATE_NONE;
662    ctx->validate_fatal = false;
663 #endif
664    if (VKR_DEBUG(VALIDATE))
665       ctx->validate_level = VKR_CONTEXT_VALIDATE_FULL;
666 
667    if (mtx_init(&ctx->mutex, mtx_plain) != thrd_success)
668       goto err_mtx_init;
669 
670    ctx->object_table = _mesa_hash_table_create(NULL, vkr_hash_u64, vkr_key_u64_equal);
671    if (!ctx->object_table)
672       goto err_ctx_object_table;
673 
674    ctx->resource_table =
675       _mesa_hash_table_create(NULL, _mesa_hash_u32, _mesa_key_u32_equal);
676    if (!ctx->resource_table)
677       goto err_ctx_resource_table;
678 
679    vkr_cs_decoder_init(&ctx->decoder, ctx->object_table);
680    vkr_cs_encoder_init(&ctx->encoder, &ctx->decoder.fatal_error);
681 
682    vkr_context_init_base(ctx);
683    vkr_context_init_dispatch(ctx);
684 
685    if ((vkr_renderer_flags & VKR_RENDERER_THREAD_SYNC) &&
686        !(vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB)) {
687       ctx->fence_eventfd = create_eventfd(0);
688       if (ctx->fence_eventfd < 0)
689          goto err_eventfd;
690    } else {
691       ctx->fence_eventfd = -1;
692    }
693 
694    list_inithead(&ctx->rings);
695    list_inithead(&ctx->busy_queues);
696    list_inithead(&ctx->signaled_syncs);
697    list_inithead(&ctx->signaled_cpu_syncs);
698 
699    return &ctx->base;
700 
701 err_eventfd:
702    _mesa_hash_table_destroy(ctx->resource_table, vkr_context_free_resource);
703 err_ctx_resource_table:
704    _mesa_hash_table_destroy(ctx->object_table, vkr_context_free_object);
705 err_ctx_object_table:
706    mtx_destroy(&ctx->mutex);
707 err_mtx_init:
708    free(ctx->debug_name);
709 err_debug_name:
710    free(ctx);
711    return NULL;
712 }
713