1 /*
2 * Copyright 2021 Google LLC
3 * SPDX-License-Identifier: MIT
4 */
5
6 #include "proxy_context.h"
7
8 #include <fcntl.h>
9 #include <poll.h>
10 #include <sys/mman.h>
11 #include <unistd.h>
12
13 #include "server/render_protocol.h"
14 #include "util/anon_file.h"
15 #include "util/bitscan.h"
16
17 #include "proxy_client.h"
18
19 struct proxy_fence {
20 uint32_t flags;
21 uint32_t seqno;
22 uint64_t fence_id;
23 struct list_head head;
24 };
25
26 static inline void
proxy_context_resource_add(struct proxy_context * ctx,uint32_t res_id)27 proxy_context_resource_add(struct proxy_context *ctx, uint32_t res_id)
28 {
29 assert(!_mesa_hash_table_search(ctx->resource_table, (void *)(uintptr_t)res_id));
30 _mesa_hash_table_insert(ctx->resource_table, (void *)(uintptr_t)res_id, NULL);
31 }
32
33 static inline bool
proxy_context_resource_find(struct proxy_context * ctx,uint32_t res_id)34 proxy_context_resource_find(struct proxy_context *ctx, uint32_t res_id)
35 {
36 return _mesa_hash_table_search(ctx->resource_table, (void *)(uintptr_t)res_id);
37 }
38
39 static inline void
proxy_context_resource_remove(struct proxy_context * ctx,uint32_t res_id)40 proxy_context_resource_remove(struct proxy_context *ctx, uint32_t res_id)
41 {
42 _mesa_hash_table_remove_key(ctx->resource_table, (void *)(uintptr_t)res_id);
43 }
44
45 static inline bool
proxy_context_resource_table_init(struct proxy_context * ctx)46 proxy_context_resource_table_init(struct proxy_context *ctx)
47 {
48 ctx->resource_table = _mesa_hash_table_create_u32_keys(NULL);
49 return ctx->resource_table;
50 }
51
52 static inline void
proxy_context_resource_table_fini(struct proxy_context * ctx)53 proxy_context_resource_table_fini(struct proxy_context *ctx)
54 {
55 _mesa_hash_table_destroy(ctx->resource_table, NULL);
56 }
57
58 static bool
proxy_fence_is_signaled(const struct proxy_fence * fence,uint32_t cur_seqno)59 proxy_fence_is_signaled(const struct proxy_fence *fence, uint32_t cur_seqno)
60 {
61 /* takes wrapping into account */
62 const uint32_t d = cur_seqno - fence->seqno;
63 return d < INT32_MAX;
64 }
65
66 static struct proxy_fence *
proxy_context_alloc_fence(struct proxy_context * ctx)67 proxy_context_alloc_fence(struct proxy_context *ctx)
68 {
69 struct proxy_fence *fence = NULL;
70
71 if (proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB)
72 mtx_lock(&ctx->free_fences_mutex);
73
74 if (!list_is_empty(&ctx->free_fences)) {
75 fence = list_first_entry(&ctx->free_fences, struct proxy_fence, head);
76 list_del(&fence->head);
77 }
78
79 if (proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB)
80 mtx_unlock(&ctx->free_fences_mutex);
81
82 return fence ? fence : malloc(sizeof(*fence));
83 }
84
85 static void
proxy_context_free_fence(struct proxy_context * ctx,struct proxy_fence * fence)86 proxy_context_free_fence(struct proxy_context *ctx, struct proxy_fence *fence)
87 {
88 if (proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB)
89 mtx_lock(&ctx->free_fences_mutex);
90
91 list_add(&fence->head, &ctx->free_fences);
92
93 if (proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB)
94 mtx_unlock(&ctx->free_fences_mutex);
95 }
96
97 static uint32_t
proxy_context_load_timeline_seqno(struct proxy_context * ctx,uint32_t ring_idx)98 proxy_context_load_timeline_seqno(struct proxy_context *ctx, uint32_t ring_idx)
99 {
100 return atomic_load(&ctx->timeline_seqnos[ring_idx]);
101 }
102
103 static bool
proxy_context_retire_timeline_fences_locked(struct proxy_context * ctx,uint32_t ring_idx,uint32_t cur_seqno)104 proxy_context_retire_timeline_fences_locked(struct proxy_context *ctx,
105 uint32_t ring_idx,
106 uint32_t cur_seqno)
107 {
108 struct proxy_timeline *timeline = &ctx->timelines[ring_idx];
109 bool force_retire_all = false;
110
111 /* check if the socket has been disconnected (i.e., the other end has
112 * crashed) if no progress is made after a while
113 */
114 if (timeline->cur_seqno == cur_seqno && !list_is_empty(&timeline->fences)) {
115 timeline->cur_seqno_stall_count++;
116 if (timeline->cur_seqno_stall_count < 100 ||
117 proxy_socket_is_connected(&ctx->socket))
118 return false;
119
120 /* socket has been disconnected */
121 force_retire_all = true;
122 }
123
124 timeline->cur_seqno = cur_seqno;
125 timeline->cur_seqno_stall_count = 0;
126
127 list_for_each_entry_safe (struct proxy_fence, fence, &timeline->fences, head) {
128 if (!proxy_fence_is_signaled(fence, timeline->cur_seqno) && !force_retire_all)
129 return false;
130
131 ctx->base.fence_retire(&ctx->base, ring_idx, fence->fence_id);
132
133 list_del(&fence->head);
134 proxy_context_free_fence(ctx, fence);
135 }
136
137 return true;
138 }
139
140 static void
proxy_context_retire_fences_internal(struct proxy_context * ctx)141 proxy_context_retire_fences_internal(struct proxy_context *ctx)
142 {
143 if (ctx->sync_thread.fence_eventfd >= 0)
144 flush_eventfd(ctx->sync_thread.fence_eventfd);
145
146 if (proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB)
147 mtx_lock(&ctx->timeline_mutex);
148
149 uint64_t new_busy_mask = 0;
150 uint64_t old_busy_mask = ctx->timeline_busy_mask;
151 while (old_busy_mask) {
152 const uint32_t ring_idx = u_bit_scan64(&old_busy_mask);
153 const uint32_t cur_seqno = proxy_context_load_timeline_seqno(ctx, ring_idx);
154 if (!proxy_context_retire_timeline_fences_locked(ctx, ring_idx, cur_seqno))
155 new_busy_mask |= 1ull << ring_idx;
156 }
157
158 ctx->timeline_busy_mask = new_busy_mask;
159
160 if (proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB)
161 mtx_unlock(&ctx->timeline_mutex);
162 }
163
164 static int
proxy_context_sync_thread(void * arg)165 proxy_context_sync_thread(void *arg)
166 {
167 struct proxy_context *ctx = arg;
168 struct pollfd poll_fds[2] = {
169 [0] = {
170 .fd = ctx->sync_thread.fence_eventfd,
171 .events = POLLIN,
172 },
173 [1] = {
174 .fd = ctx->socket.fd,
175 },
176 };
177
178 assert(proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB);
179
180 while (!ctx->sync_thread.stop) {
181 const int ret = poll(poll_fds, ARRAY_SIZE(poll_fds), -1);
182 if (ret <= 0) {
183 if (ret < 0 && (errno == EINTR || errno == EAGAIN))
184 continue;
185
186 proxy_log("failed to poll fence eventfd");
187 break;
188 }
189
190 proxy_context_retire_fences_internal(ctx);
191 }
192
193 return 0;
194 }
195
196 static int
proxy_context_submit_fence(struct virgl_context * base,uint32_t flags,uint32_t ring_idx,uint64_t fence_id)197 proxy_context_submit_fence(struct virgl_context *base,
198 uint32_t flags,
199 uint32_t ring_idx,
200 uint64_t fence_id)
201 {
202 struct proxy_context *ctx = (struct proxy_context *)base;
203 const uint64_t old_busy_mask = ctx->timeline_busy_mask;
204
205 if (ring_idx >= PROXY_CONTEXT_TIMELINE_COUNT)
206 return -EINVAL;
207
208 struct proxy_timeline *timeline = &ctx->timelines[ring_idx];
209 struct proxy_fence *fence = proxy_context_alloc_fence(ctx);
210 if (!fence)
211 return -ENOMEM;
212
213 fence->flags = flags;
214 fence->seqno = timeline->next_seqno++;
215 fence->fence_id = fence_id;
216
217 if (proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB)
218 mtx_lock(&ctx->timeline_mutex);
219
220 list_addtail(&fence->head, &timeline->fences);
221 ctx->timeline_busy_mask |= 1ull << ring_idx;
222
223 if (proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB)
224 mtx_unlock(&ctx->timeline_mutex);
225
226 const struct render_context_op_submit_fence_request req = {
227 .header.op = RENDER_CONTEXT_OP_SUBMIT_FENCE,
228 .flags = flags,
229 .ring_index = ring_idx,
230 .seqno = fence->seqno,
231 };
232 if (proxy_socket_send_request(&ctx->socket, &req, sizeof(req)))
233 return 0;
234
235 /* recover timeline fences and busy_mask on submit_fence request failure */
236 if (proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB)
237 mtx_lock(&ctx->timeline_mutex);
238
239 list_del(&fence->head);
240 ctx->timeline_busy_mask = old_busy_mask;
241
242 if (proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB)
243 mtx_unlock(&ctx->timeline_mutex);
244
245 proxy_context_free_fence(ctx, fence);
246 proxy_log("failed to submit fence");
247 return -1;
248 }
249
250 static void
proxy_context_retire_fences(struct virgl_context * base)251 proxy_context_retire_fences(struct virgl_context *base)
252 {
253 struct proxy_context *ctx = (struct proxy_context *)base;
254
255 assert(!(proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB));
256 proxy_context_retire_fences_internal(ctx);
257 }
258
259 static int
proxy_context_get_fencing_fd(struct virgl_context * base)260 proxy_context_get_fencing_fd(struct virgl_context *base)
261 {
262 struct proxy_context *ctx = (struct proxy_context *)base;
263
264 assert(!(proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB));
265 return ctx->sync_thread.fence_eventfd;
266 }
267
268 static int
proxy_context_submit_cmd(struct virgl_context * base,const void * buffer,size_t size)269 proxy_context_submit_cmd(struct virgl_context *base, const void *buffer, size_t size)
270 {
271 struct proxy_context *ctx = (struct proxy_context *)base;
272
273 if (!size)
274 return 0;
275
276 struct render_context_op_submit_cmd_request req = {
277 .header.op = RENDER_CONTEXT_OP_SUBMIT_CMD,
278 .size = size,
279 };
280
281 const size_t inlined = MIN2(size, sizeof(req.cmd));
282 memcpy(req.cmd, buffer, inlined);
283
284 if (!proxy_socket_send_request(&ctx->socket, &req, sizeof(req))) {
285 proxy_log("failed to submit cmd");
286 return -1;
287 }
288
289 if (size > inlined) {
290 if (!proxy_socket_send_request(&ctx->socket, (const char *)buffer + inlined,
291 size - inlined)) {
292 proxy_log("failed to submit large cmd buffer");
293 return -1;
294 }
295 }
296
297 /* XXX this is forced a roundtrip to avoid surprises; vtest requires this
298 * at least
299 */
300 struct render_context_op_submit_cmd_reply reply;
301 if (!proxy_socket_receive_reply(&ctx->socket, &reply, sizeof(reply))) {
302 proxy_log("failed to get submit result");
303 return -1;
304 }
305
306 return reply.ok ? 0 : -1;
307 }
308
309 static bool
validate_resource_fd_shm(int fd,uint64_t expected_size)310 validate_resource_fd_shm(int fd, uint64_t expected_size)
311 {
312 static const int blocked_seals = F_SEAL_WRITE;
313
314 const int seals = fcntl(fd, F_GET_SEALS);
315 if (seals & blocked_seals) {
316 proxy_log("failed to validate shm seals(%d): blocked(%d)", seals, blocked_seals);
317 return false;
318 }
319
320 const uint64_t size = lseek(fd, 0, SEEK_END);
321 if (size != expected_size) {
322 proxy_log("failed to validate shm size(%" PRIu64 ") expected(%" PRIu64 ")", size,
323 expected_size);
324 return false;
325 }
326
327 return true;
328 }
329
330 static inline int
add_required_seals_to_fd(int fd)331 add_required_seals_to_fd(int fd)
332 {
333 return fcntl(fd, F_ADD_SEALS, F_SEAL_SEAL | F_SEAL_SHRINK | F_SEAL_GROW);
334 }
335
336 static int
proxy_context_get_blob(struct virgl_context * base,uint32_t res_id,uint64_t blob_id,uint64_t blob_size,uint32_t blob_flags,struct virgl_context_blob * blob)337 proxy_context_get_blob(struct virgl_context *base,
338 uint32_t res_id,
339 uint64_t blob_id,
340 uint64_t blob_size,
341 uint32_t blob_flags,
342 struct virgl_context_blob *blob)
343 {
344 /* RENDER_CONTEXT_OP_CREATE_RESOURCE implies resource attach, thus proxy tracks
345 * resources created here to avoid double attaching the same resource when proxy is on
346 * attach_resource callback.
347 */
348 struct proxy_context *ctx = (struct proxy_context *)base;
349
350 const struct render_context_op_create_resource_request req = {
351 .header.op = RENDER_CONTEXT_OP_CREATE_RESOURCE,
352 .res_id = res_id,
353 .blob_id = blob_id,
354 .blob_size = blob_size,
355 .blob_flags = blob_flags,
356 };
357 if (!proxy_socket_send_request(&ctx->socket, &req, sizeof(req))) {
358 proxy_log("failed to get blob %" PRIu64, blob_id);
359 return -1;
360 }
361
362 struct render_context_op_create_resource_reply reply;
363 int reply_fd;
364 int reply_fd_count;
365 if (!proxy_socket_receive_reply_with_fds(&ctx->socket, &reply, sizeof(reply),
366 &reply_fd, 1, &reply_fd_count)) {
367 proxy_log("failed to get reply of blob %" PRIu64, blob_id);
368 return -1;
369 }
370
371 if (!reply_fd_count) {
372 proxy_log("invalid reply for blob %" PRIu64, blob_id);
373 return -1;
374 }
375
376 bool reply_fd_valid = false;
377 switch (reply.fd_type) {
378 case VIRGL_RESOURCE_FD_DMABUF:
379 /* TODO validate the fd is dmabuf >= blob_size */
380 reply_fd_valid = true;
381 break;
382 case VIRGL_RESOURCE_FD_OPAQUE:
383 /* this will be validated when imported by the client */
384 reply_fd_valid = true;
385 break;
386 case VIRGL_RESOURCE_FD_SHM:
387 /* validate the seals and size here */
388 reply_fd_valid = !add_required_seals_to_fd(reply_fd) &&
389 validate_resource_fd_shm(reply_fd, blob_size);
390 break;
391 default:
392 break;
393 }
394 if (!reply_fd_valid) {
395 proxy_log("invalid fd type %d for blob %" PRIu64, reply.fd_type, blob_id);
396 close(reply_fd);
397 return -1;
398 }
399
400 blob->type = reply.fd_type;
401 blob->u.fd = reply_fd;
402 blob->map_info = reply.map_info;
403
404 proxy_context_resource_add(ctx, res_id);
405
406 return 0;
407 }
408
409 static int
proxy_context_transfer_3d(struct virgl_context * base,struct virgl_resource * res,UNUSED const struct vrend_transfer_info * info,UNUSED int transfer_mode)410 proxy_context_transfer_3d(struct virgl_context *base,
411 struct virgl_resource *res,
412 UNUSED const struct vrend_transfer_info *info,
413 UNUSED int transfer_mode)
414 {
415 struct proxy_context *ctx = (struct proxy_context *)base;
416
417 proxy_log("no transfer support for ctx %d and res %d", ctx->base.ctx_id, res->res_id);
418 return -1;
419 }
420
421 static void
proxy_context_detach_resource(struct virgl_context * base,struct virgl_resource * res)422 proxy_context_detach_resource(struct virgl_context *base, struct virgl_resource *res)
423 {
424 struct proxy_context *ctx = (struct proxy_context *)base;
425 const uint32_t res_id = res->res_id;
426
427 const struct render_context_op_destroy_resource_request req = {
428 .header.op = RENDER_CONTEXT_OP_DESTROY_RESOURCE,
429 .res_id = res_id,
430 };
431 if (!proxy_socket_send_request(&ctx->socket, &req, sizeof(req)))
432 proxy_log("failed to detach res %d", res_id);
433
434 proxy_context_resource_remove(ctx, res_id);
435 }
436
437 static void
proxy_context_attach_resource(struct virgl_context * base,struct virgl_resource * res)438 proxy_context_attach_resource(struct virgl_context *base, struct virgl_resource *res)
439 {
440 struct proxy_context *ctx = (struct proxy_context *)base;
441 const uint32_t res_id = res->res_id;
442
443 /* avoid importing resources created from RENDER_CONTEXT_OP_CREATE_RESOURCE */
444 if (proxy_context_resource_find(ctx, res_id))
445 return;
446
447 enum virgl_resource_fd_type res_fd_type = res->fd_type;
448 int res_fd = res->fd;
449 bool close_res_fd = false;
450 if (res_fd_type == VIRGL_RESOURCE_FD_INVALID) {
451 res_fd_type = virgl_resource_export_fd(res, &res_fd);
452 if (res_fd_type == VIRGL_RESOURCE_FD_INVALID) {
453 proxy_log("failed to export res %d", res_id);
454 return;
455 }
456
457 close_res_fd = true;
458 }
459
460 /* the proxy ignores iovs since transfer_3d is not supported */
461 const struct render_context_op_import_resource_request req = {
462 .header.op = RENDER_CONTEXT_OP_IMPORT_RESOURCE,
463 .res_id = res_id,
464 .fd_type = res_fd_type,
465 .size = virgl_resource_get_size(res),
466 };
467 if (!proxy_socket_send_request_with_fds(&ctx->socket, &req, sizeof(req), &res_fd, 1))
468 proxy_log("failed to attach res %d", res_id);
469
470 if (res_fd >= 0 && close_res_fd)
471 close(res_fd);
472
473 proxy_context_resource_add(ctx, res_id);
474 }
475
476 static void
proxy_context_destroy(struct virgl_context * base)477 proxy_context_destroy(struct virgl_context *base)
478 {
479 struct proxy_context *ctx = (struct proxy_context *)base;
480
481 /* ask the server process to terminate the context process */
482 if (!proxy_client_destroy_context(ctx->client, ctx->base.ctx_id))
483 proxy_log("failed to destroy ctx %d", ctx->base.ctx_id);
484
485 if (ctx->sync_thread.fence_eventfd >= 0) {
486 if (ctx->sync_thread.created) {
487 ctx->sync_thread.stop = true;
488 write_eventfd(ctx->sync_thread.fence_eventfd, 1);
489 thrd_join(ctx->sync_thread.thread, NULL);
490 }
491
492 close(ctx->sync_thread.fence_eventfd);
493 }
494
495 if (ctx->shmem.ptr)
496 munmap(ctx->shmem.ptr, ctx->shmem.size);
497 if (ctx->shmem.fd >= 0)
498 close(ctx->shmem.fd);
499
500 if (ctx->timeline_seqnos) {
501 for (uint32_t i = 0; i < PROXY_CONTEXT_TIMELINE_COUNT; i++) {
502 struct proxy_timeline *timeline = &ctx->timelines[i];
503 list_for_each_entry_safe (struct proxy_fence, fence, &timeline->fences, head)
504 free(fence);
505 }
506 }
507 mtx_destroy(&ctx->timeline_mutex);
508
509 list_for_each_entry_safe (struct proxy_fence, fence, &ctx->free_fences, head)
510 free(fence);
511 mtx_destroy(&ctx->free_fences_mutex);
512
513 proxy_context_resource_table_fini(ctx);
514
515 proxy_socket_fini(&ctx->socket);
516
517 free(ctx);
518 }
519
520 static void
proxy_context_init_base(struct proxy_context * ctx)521 proxy_context_init_base(struct proxy_context *ctx)
522 {
523 ctx->base.destroy = proxy_context_destroy;
524 ctx->base.attach_resource = proxy_context_attach_resource;
525 ctx->base.detach_resource = proxy_context_detach_resource;
526 ctx->base.transfer_3d = proxy_context_transfer_3d;
527 ctx->base.get_blob = proxy_context_get_blob;
528 ctx->base.submit_cmd = proxy_context_submit_cmd;
529
530 ctx->base.get_fencing_fd = proxy_context_get_fencing_fd;
531 ctx->base.retire_fences = proxy_context_retire_fences;
532 ctx->base.submit_fence = proxy_context_submit_fence;
533 }
534
535 static bool
proxy_context_init_fencing(struct proxy_context * ctx)536 proxy_context_init_fencing(struct proxy_context *ctx)
537 {
538 /* The render server updates the shmem for the current seqnos and
539 * optionally notifies using the eventfd. That means, when only
540 * VIRGL_RENDERER_THREAD_SYNC is set, we just need to set up the eventfd.
541 * When VIRGL_RENDERER_ASYNC_FENCE_CB is also set, we need to create a sync
542 * thread as well.
543 *
544 * Fence polling can always check the shmem directly.
545 */
546 if (!(proxy_renderer.flags & VIRGL_RENDERER_THREAD_SYNC))
547 return true;
548
549 ctx->sync_thread.fence_eventfd = create_eventfd(0);
550 if (ctx->sync_thread.fence_eventfd < 0) {
551 proxy_log("failed to create fence eventfd");
552 return false;
553 }
554
555 if (proxy_renderer.flags & VIRGL_RENDERER_ASYNC_FENCE_CB) {
556 int ret = thrd_create(&ctx->sync_thread.thread, proxy_context_sync_thread, ctx);
557 if (ret != thrd_success) {
558 proxy_log("failed to create sync thread");
559 return false;
560 }
561 ctx->sync_thread.created = true;
562 }
563
564 return true;
565 }
566
567 static bool
proxy_context_init_timelines(struct proxy_context * ctx)568 proxy_context_init_timelines(struct proxy_context *ctx)
569 {
570 atomic_uint *timeline_seqnos = ctx->shmem.ptr;
571 for (uint32_t i = 0; i < ARRAY_SIZE(ctx->timelines); i++) {
572 atomic_init(&timeline_seqnos[i], 0);
573
574 struct proxy_timeline *timeline = &ctx->timelines[i];
575 timeline->cur_seqno = 0;
576 timeline->next_seqno = 1;
577 list_inithead(&timeline->fences);
578 }
579
580 ctx->timeline_seqnos = timeline_seqnos;
581
582 return true;
583 }
584
585 static int
alloc_memfd(const char * name,size_t size,void ** out_ptr)586 alloc_memfd(const char *name, size_t size, void **out_ptr)
587 {
588 int fd = os_create_anonymous_file(size, name);
589 if (fd < 0)
590 return -1;
591
592 int ret = add_required_seals_to_fd(fd);
593 if (ret)
594 goto fail;
595
596 if (!out_ptr)
597 return fd;
598
599 void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
600 if (ptr == MAP_FAILED)
601 goto fail;
602
603 *out_ptr = ptr;
604 return fd;
605
606 fail:
607 close(fd);
608 return -1;
609 }
610
611 static bool
proxy_context_init_shmem(struct proxy_context * ctx)612 proxy_context_init_shmem(struct proxy_context *ctx)
613 {
614 const size_t shmem_size = sizeof(*ctx->timeline_seqnos) * PROXY_CONTEXT_TIMELINE_COUNT;
615 ctx->shmem.fd = alloc_memfd("proxy-ctx", shmem_size, &ctx->shmem.ptr);
616 if (ctx->shmem.fd < 0)
617 return false;
618
619 ctx->shmem.size = shmem_size;
620
621 return true;
622 }
623
624 static bool
proxy_context_init(struct proxy_context * ctx,uint32_t ctx_flags)625 proxy_context_init(struct proxy_context *ctx, uint32_t ctx_flags)
626 {
627 if (!proxy_context_init_shmem(ctx) || !proxy_context_init_timelines(ctx) ||
628 !proxy_context_init_fencing(ctx) || !proxy_context_resource_table_init(ctx))
629 return false;
630
631 const struct render_context_op_init_request req = {
632 .header.op = RENDER_CONTEXT_OP_INIT,
633 .flags = ctx_flags,
634 .shmem_size = ctx->shmem.size,
635 };
636 const int req_fds[2] = { ctx->shmem.fd, ctx->sync_thread.fence_eventfd };
637 const int req_fd_count = req_fds[1] >= 0 ? 2 : 1;
638 if (!proxy_socket_send_request_with_fds(&ctx->socket, &req, sizeof(req), req_fds,
639 req_fd_count)) {
640 proxy_log("failed to initialize context");
641 return false;
642 }
643
644 return true;
645 }
646
647 struct virgl_context *
proxy_context_create(uint32_t ctx_id,uint32_t ctx_flags,size_t debug_len,const char * debug_name)648 proxy_context_create(uint32_t ctx_id,
649 uint32_t ctx_flags,
650 size_t debug_len,
651 const char *debug_name)
652 {
653 struct proxy_client *client = proxy_renderer.client;
654 struct proxy_context *ctx;
655
656 int ctx_fd;
657 if (!proxy_client_create_context(client, ctx_id, debug_len, debug_name, &ctx_fd)) {
658 proxy_log("failed to create a context");
659 return NULL;
660 }
661
662 ctx = calloc(1, sizeof(*ctx));
663 if (!ctx) {
664 close(ctx_fd);
665 return NULL;
666 }
667
668 proxy_context_init_base(ctx);
669 ctx->client = client;
670 proxy_socket_init(&ctx->socket, ctx_fd);
671 ctx->shmem.fd = -1;
672 mtx_init(&ctx->timeline_mutex, mtx_plain);
673 mtx_init(&ctx->free_fences_mutex, mtx_plain);
674 list_inithead(&ctx->free_fences);
675 ctx->sync_thread.fence_eventfd = -1;
676
677 if (!proxy_context_init(ctx, ctx_flags)) {
678 proxy_context_destroy(&ctx->base);
679 return NULL;
680 }
681
682 return &ctx->base;
683 }
684