xref: /aosp_15_r20/external/virglrenderer/server/render_context.c (revision bbecb9d118dfdb95f99bd754f8fa9be01f189df3)
1 /*
2  * Copyright 2021 Google LLC
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "render_context.h"
7 
8 #include <sys/mman.h>
9 
10 #include "util/u_thread.h"
11 #include "virgl_util.h"
12 #include "virglrenderer.h"
13 #include "vrend_iov.h"
14 
15 #include "render_virgl.h"
16 
17 static bool
render_context_import_resource(struct render_context * ctx,const struct render_context_op_import_resource_request * req,int res_fd)18 render_context_import_resource(struct render_context *ctx,
19                                const struct render_context_op_import_resource_request *req,
20                                int res_fd)
21 {
22    const uint32_t res_id = req->res_id;
23    const enum virgl_resource_fd_type fd_type = req->fd_type;
24    const uint64_t size = req->size;
25 
26    if (fd_type == VIRGL_RESOURCE_FD_INVALID || !size) {
27       render_log("failed to attach invalid resource %d", res_id);
28       return false;
29    }
30 
31    uint32_t import_fd_type;
32    switch (fd_type) {
33    case VIRGL_RESOURCE_FD_DMABUF:
34       import_fd_type = VIRGL_RENDERER_BLOB_FD_TYPE_DMABUF;
35       break;
36    case VIRGL_RESOURCE_FD_OPAQUE:
37       import_fd_type = VIRGL_RENDERER_BLOB_FD_TYPE_OPAQUE;
38       break;
39    case VIRGL_RESOURCE_FD_SHM:
40       import_fd_type = VIRGL_RENDERER_BLOB_FD_TYPE_SHM;
41       break;
42    default:
43       import_fd_type = 0;
44       break;
45    }
46    const struct virgl_renderer_resource_import_blob_args import_args = {
47       .res_handle = res_id,
48       .blob_mem = VIRGL_RENDERER_BLOB_MEM_HOST3D,
49       .fd_type = import_fd_type,
50       .fd = res_fd,
51       .size = size,
52    };
53 
54    int ret = virgl_renderer_resource_import_blob(&import_args);
55    if (ret) {
56       render_log("failed to import blob resource %d (%d)", res_id, ret);
57       return false;
58    }
59 
60    virgl_renderer_ctx_attach_resource(ctx->ctx_id, res_id);
61 
62    return true;
63 }
64 
65 void
render_context_update_timeline(struct render_context * ctx,uint32_t ring_idx,uint32_t seqno)66 render_context_update_timeline(struct render_context *ctx,
67                                uint32_t ring_idx,
68                                uint32_t seqno)
69 {
70    /* this can be called by the context's main thread and sync threads */
71    atomic_store(&ctx->shmem_timelines[ring_idx], seqno);
72    if (ctx->fence_eventfd >= 0)
73       write_eventfd(ctx->fence_eventfd, 1);
74 }
75 
76 static bool
render_context_init_virgl_context(struct render_context * ctx,const struct render_context_op_init_request * req,int shmem_fd,int fence_eventfd)77 render_context_init_virgl_context(struct render_context *ctx,
78                                   const struct render_context_op_init_request *req,
79                                   int shmem_fd,
80                                   int fence_eventfd)
81 {
82    const int timeline_count = req->shmem_size / sizeof(*ctx->shmem_timelines);
83 
84    void *shmem_ptr = mmap(NULL, req->shmem_size, PROT_WRITE, MAP_SHARED, shmem_fd, 0);
85    if (shmem_ptr == MAP_FAILED)
86       return false;
87 
88    int ret = virgl_renderer_context_create_with_flags(ctx->ctx_id, req->flags,
89                                                       ctx->name_len, ctx->name);
90    if (ret) {
91       munmap(shmem_ptr, req->shmem_size);
92       return false;
93    }
94 
95    ctx->shmem_fd = shmem_fd;
96    ctx->shmem_size = req->shmem_size;
97    ctx->shmem_ptr = shmem_ptr;
98    ctx->shmem_timelines = shmem_ptr;
99 
100    for (int i = 0; i < timeline_count; i++)
101       atomic_store(&ctx->shmem_timelines[i], 0);
102 
103    ctx->timeline_count = timeline_count;
104 
105    ctx->fence_eventfd = fence_eventfd;
106 
107    return true;
108 }
109 
110 static bool
render_context_create_resource(struct render_context * ctx,const struct render_context_op_create_resource_request * req,enum virgl_resource_fd_type * out_fd_type,uint32_t * out_map_info,int * out_res_fd)111 render_context_create_resource(struct render_context *ctx,
112                                const struct render_context_op_create_resource_request *req,
113                                enum virgl_resource_fd_type *out_fd_type,
114                                uint32_t *out_map_info,
115                                int *out_res_fd)
116 {
117    const uint32_t res_id = req->res_id;
118    const struct virgl_renderer_resource_create_blob_args blob_args = {
119       .res_handle = res_id,
120       .ctx_id = ctx->ctx_id,
121       .blob_mem = VIRGL_RENDERER_BLOB_MEM_HOST3D,
122       .blob_flags = req->blob_flags,
123       .blob_id = req->blob_id,
124       .size = req->blob_size,
125    };
126    int ret = virgl_renderer_resource_create_blob(&blob_args);
127    if (ret) {
128       render_log("failed to create blob resource");
129       return false;
130    }
131 
132    uint32_t map_info;
133    ret = virgl_renderer_resource_get_map_info(res_id, &map_info);
134    if (ret) {
135       /* properly set map_info when the resource has no map cache info */
136       map_info = VIRGL_RENDERER_MAP_CACHE_NONE;
137    }
138 
139    uint32_t fd_type;
140    int res_fd;
141    ret = virgl_renderer_resource_export_blob(res_id, &fd_type, &res_fd);
142    if (ret) {
143       virgl_renderer_resource_unref(res_id);
144       return false;
145    }
146 
147    /* RENDER_CONTEXT_OP_CREATE_RESOURCE implies attach and proxy will not send
148     * RENDER_CONTEXT_OP_IMPORT_RESOURCE to attach the resource again.
149     */
150    virgl_renderer_ctx_attach_resource(ctx->ctx_id, res_id);
151 
152    switch (fd_type) {
153    case VIRGL_RENDERER_BLOB_FD_TYPE_DMABUF:
154       *out_fd_type = VIRGL_RESOURCE_FD_DMABUF;
155       break;
156    case VIRGL_RENDERER_BLOB_FD_TYPE_OPAQUE:
157       *out_fd_type = VIRGL_RESOURCE_FD_OPAQUE;
158       break;
159    case VIRGL_RENDERER_BLOB_FD_TYPE_SHM:
160       *out_fd_type = VIRGL_RESOURCE_FD_SHM;
161       break;
162    default:
163       *out_fd_type = 0;
164    }
165 
166    *out_map_info = map_info;
167    *out_res_fd = res_fd;
168 
169    return true;
170 }
171 
172 static bool
render_context_dispatch_submit_fence(struct render_context * ctx,const union render_context_op_request * req,UNUSED const int * fds,UNUSED int fd_count)173 render_context_dispatch_submit_fence(struct render_context *ctx,
174                                      const union render_context_op_request *req,
175                                      UNUSED const int *fds,
176                                      UNUSED int fd_count)
177 {
178    /* always merge fences */
179    assert(!(req->submit_fence.flags & ~VIRGL_RENDERER_FENCE_FLAG_MERGEABLE));
180    const uint32_t flags = VIRGL_RENDERER_FENCE_FLAG_MERGEABLE;
181    const uint32_t ring_idx = req->submit_fence.ring_index;
182    const uint32_t seqno = req->submit_fence.seqno;
183 
184    assert(ring_idx < (uint32_t)ctx->timeline_count);
185    int ret = virgl_renderer_context_create_fence(ctx->ctx_id, flags, ring_idx, seqno);
186 
187    return !ret;
188 }
189 
190 static bool
render_context_dispatch_submit_cmd(struct render_context * ctx,const union render_context_op_request * req,UNUSED const int * fds,UNUSED int fd_count)191 render_context_dispatch_submit_cmd(struct render_context *ctx,
192                                    const union render_context_op_request *req,
193                                    UNUSED const int *fds,
194                                    UNUSED int fd_count)
195 {
196    const int ndw = req->submit_cmd.size / sizeof(uint32_t);
197    void *cmd = (void *)req->submit_cmd.cmd;
198    if (req->submit_cmd.size > sizeof(req->submit_cmd.cmd)) {
199       cmd = malloc(req->submit_cmd.size);
200       if (!cmd)
201          return true;
202 
203       const size_t inlined = sizeof(req->submit_cmd.cmd);
204       const size_t remain = req->submit_cmd.size - inlined;
205 
206       memcpy(cmd, req->submit_cmd.cmd, inlined);
207       if (!render_socket_receive_data(&ctx->socket, (char *)cmd + inlined, remain)) {
208          free(cmd);
209          return false;
210       }
211    }
212 
213    int ret = virgl_renderer_submit_cmd(cmd, ctx->ctx_id, ndw);
214 
215    if (cmd != req->submit_cmd.cmd)
216       free(cmd);
217 
218    const struct render_context_op_submit_cmd_reply reply = {
219       .ok = !ret,
220    };
221    if (!render_socket_send_reply(&ctx->socket, &reply, sizeof(reply)))
222       return false;
223 
224    return true;
225 }
226 
227 static bool
render_context_dispatch_create_resource(struct render_context * ctx,const union render_context_op_request * req,UNUSED const int * fds,UNUSED int fd_count)228 render_context_dispatch_create_resource(struct render_context *ctx,
229                                         const union render_context_op_request *req,
230                                         UNUSED const int *fds,
231                                         UNUSED int fd_count)
232 {
233    struct render_context_op_create_resource_reply reply = {
234       .fd_type = VIRGL_RESOURCE_FD_INVALID,
235    };
236    int res_fd;
237    bool ok = render_context_create_resource(ctx, &req->create_resource, &reply.fd_type,
238                                             &reply.map_info, &res_fd);
239    if (!ok)
240       return render_socket_send_reply(&ctx->socket, &reply, sizeof(reply));
241 
242    ok =
243       render_socket_send_reply_with_fds(&ctx->socket, &reply, sizeof(reply), &res_fd, 1);
244    close(res_fd);
245 
246    return ok;
247 }
248 
249 static bool
render_context_dispatch_destroy_resource(UNUSED struct render_context * ctx,const union render_context_op_request * req,UNUSED const int * fds,UNUSED int fd_count)250 render_context_dispatch_destroy_resource(UNUSED struct render_context *ctx,
251                                          const union render_context_op_request *req,
252                                          UNUSED const int *fds,
253                                          UNUSED int fd_count)
254 {
255    virgl_renderer_resource_unref(req->destroy_resource.res_id);
256    return true;
257 }
258 
259 static bool
render_context_dispatch_import_resource(struct render_context * ctx,const union render_context_op_request * req,const int * fds,int fd_count)260 render_context_dispatch_import_resource(struct render_context *ctx,
261                                         const union render_context_op_request *req,
262                                         const int *fds,
263                                         int fd_count)
264 {
265    if (fd_count != 1) {
266       render_log("failed to attach resource with fd_count %d", fd_count);
267       return false;
268    }
269 
270    /* classic 3d resource with valid size reuses the blob import path here */
271    return render_context_import_resource(ctx, &req->import_resource, fds[0]);
272 }
273 
274 static bool
render_context_dispatch_init(struct render_context * ctx,const union render_context_op_request * req,const int * fds,int fd_count)275 render_context_dispatch_init(struct render_context *ctx,
276                              const union render_context_op_request *req,
277                              const int *fds,
278                              int fd_count)
279 {
280    if (fd_count != 1 && fd_count != 2)
281       return false;
282 
283    const int shmem_fd = fds[0];
284    const int fence_eventfd = fd_count == 2 ? fds[1] : -1;
285    return render_context_init_virgl_context(ctx, &req->init, shmem_fd, fence_eventfd);
286 }
287 
288 static bool
render_context_dispatch_nop(UNUSED struct render_context * ctx,UNUSED const union render_context_op_request * req,UNUSED const int * fds,UNUSED int fd_count)289 render_context_dispatch_nop(UNUSED struct render_context *ctx,
290                             UNUSED const union render_context_op_request *req,
291                             UNUSED const int *fds,
292                             UNUSED int fd_count)
293 {
294    return true;
295 }
296 
297 struct render_context_dispatch_entry {
298    size_t expect_size;
299    int max_fd_count;
300    bool (*dispatch)(struct render_context *ctx,
301                     const union render_context_op_request *req,
302                     const int *fds,
303                     int fd_count);
304 };
305 
306 static const struct render_context_dispatch_entry
307    render_context_dispatch_table[RENDER_CONTEXT_OP_COUNT] = {
308 #define RENDER_CONTEXT_DISPATCH(NAME, name, max_fd)                                      \
309    [RENDER_CONTEXT_OP_##                                                                 \
310       NAME] = { .expect_size = sizeof(struct render_context_op_##name##_request),        \
311                 .max_fd_count = (max_fd),                                                \
312                 .dispatch = render_context_dispatch_##name }
313       RENDER_CONTEXT_DISPATCH(NOP, nop, 0),
314       RENDER_CONTEXT_DISPATCH(INIT, init, 2),
315       RENDER_CONTEXT_DISPATCH(CREATE_RESOURCE, create_resource, 0),
316       RENDER_CONTEXT_DISPATCH(IMPORT_RESOURCE, import_resource, 1),
317       RENDER_CONTEXT_DISPATCH(DESTROY_RESOURCE, destroy_resource, 0),
318       RENDER_CONTEXT_DISPATCH(SUBMIT_CMD, submit_cmd, 0),
319       RENDER_CONTEXT_DISPATCH(SUBMIT_FENCE, submit_fence, 0),
320 #undef RENDER_CONTEXT_DISPATCH
321    };
322 
323 static bool
render_context_dispatch(struct render_context * ctx)324 render_context_dispatch(struct render_context *ctx)
325 {
326    union render_context_op_request req;
327    size_t req_size;
328    int req_fds[8];
329    int req_fd_count;
330    if (!render_socket_receive_request_with_fds(&ctx->socket, &req, sizeof(req), &req_size,
331                                                req_fds, ARRAY_SIZE(req_fds),
332                                                &req_fd_count))
333       return false;
334 
335    assert((unsigned int)req_fd_count <= ARRAY_SIZE(req_fds));
336 
337    if (req.header.op >= RENDER_CONTEXT_OP_COUNT) {
338       render_log("invalid context op %d", req.header.op);
339       goto fail;
340    }
341 
342    const struct render_context_dispatch_entry *entry =
343       &render_context_dispatch_table[req.header.op];
344    if (entry->expect_size != req_size || entry->max_fd_count < req_fd_count) {
345       render_log("invalid request size (%zu) or fd count (%d) for context op %d",
346                  req_size, req_fd_count, req.header.op);
347       goto fail;
348    }
349 
350    render_virgl_lock_dispatch();
351    const bool ok = entry->dispatch(ctx, &req, req_fds, req_fd_count);
352    render_virgl_unlock_dispatch();
353    if (!ok) {
354       render_log("failed to dispatch context op %d", req.header.op);
355       goto fail;
356    }
357 
358    return true;
359 
360 fail:
361    for (int i = 0; i < req_fd_count; i++)
362       close(req_fds[i]);
363    return false;
364 }
365 
366 static bool
render_context_run(struct render_context * ctx)367 render_context_run(struct render_context *ctx)
368 {
369    while (true) {
370       if (!render_context_dispatch(ctx))
371          return false;
372    }
373 
374    return true;
375 }
376 
377 static void
render_context_fini(struct render_context * ctx)378 render_context_fini(struct render_context *ctx)
379 {
380    render_virgl_lock_dispatch();
381    /* destroy the context first to join its sync threads and ring threads */
382    virgl_renderer_context_destroy(ctx->ctx_id);
383    render_virgl_unlock_dispatch();
384 
385    render_virgl_remove_context(ctx);
386 
387    if (ctx->shmem_ptr)
388       munmap(ctx->shmem_ptr, ctx->shmem_size);
389    if (ctx->shmem_fd >= 0)
390       close(ctx->shmem_fd);
391 
392    if (ctx->fence_eventfd >= 0)
393       close(ctx->fence_eventfd);
394 
395    if (ctx->name)
396       free(ctx->name);
397 
398    render_socket_fini(&ctx->socket);
399 }
400 
401 static void
render_context_set_thread_name(uint32_t ctx_id,const char * ctx_name)402 render_context_set_thread_name(uint32_t ctx_id, const char *ctx_name)
403 {
404    char thread_name[16];
405    snprintf(thread_name, ARRAY_SIZE(thread_name), "virgl-%d-%s", ctx_id, ctx_name);
406    u_thread_setname(thread_name);
407 }
408 
409 static bool
render_context_init_name(struct render_context * ctx,uint32_t ctx_id,const char * ctx_name)410 render_context_init_name(struct render_context *ctx,
411                          uint32_t ctx_id,
412                          const char *ctx_name)
413 {
414    ctx->name_len = strlen(ctx_name);
415    ctx->name = malloc(ctx->name_len + 1);
416    if (!ctx->name)
417       return false;
418 
419    strcpy(ctx->name, ctx_name);
420 
421    render_context_set_thread_name(ctx_id, ctx_name);
422 
423 #ifdef _GNU_SOURCE
424    /* Sets the guest app executable name used by mesa to load app-specific driver
425     * configuration. */
426    program_invocation_name = ctx->name;
427    program_invocation_short_name = ctx->name;
428 #endif
429 
430    return true;
431 }
432 
433 static bool
render_context_init(struct render_context * ctx,const struct render_context_args * args)434 render_context_init(struct render_context *ctx, const struct render_context_args *args)
435 {
436    memset(ctx, 0, sizeof(*ctx));
437    ctx->ctx_id = args->ctx_id;
438    render_socket_init(&ctx->socket, args->ctx_fd);
439    ctx->shmem_fd = -1;
440    ctx->fence_eventfd = -1;
441 
442    if (!render_context_init_name(ctx, args->ctx_id, args->ctx_name))
443       return false;
444 
445    render_virgl_add_context(ctx);
446 
447    return true;
448 }
449 
450 bool
render_context_main(const struct render_context_args * args)451 render_context_main(const struct render_context_args *args)
452 {
453    struct render_context ctx;
454 
455    assert(args->valid && args->ctx_id && args->ctx_fd >= 0);
456 
457    if (!render_virgl_init(args->init_flags)) {
458       close(args->ctx_fd);
459       return false;
460    }
461 
462    if (!render_context_init(&ctx, args)) {
463       render_virgl_fini();
464       close(args->ctx_fd);
465       return false;
466    }
467 
468    const bool ok = render_context_run(&ctx);
469    render_context_fini(&ctx);
470 
471    render_virgl_fini();
472 
473    return ok;
474 }
475