xref: /aosp_15_r20/external/virglrenderer/vtest/vtest_renderer.c (revision bbecb9d118dfdb95f99bd754f8fa9be01f189df3)
1 /**************************************************************************
2  *
3  * Copyright (C) 2015 Red Hat Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included
13  * in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  **************************************************************************/
24 
25 #ifdef HAVE_CONFIG_H
26 #include "config.h"
27 #endif
28 
29 #include <stdlib.h>
30 #include <stdio.h>
31 #include <string.h>
32 #include <time.h>
33 #include <unistd.h>
34 #include <fcntl.h>
35 #include <limits.h>
36 
37 #include "virgl_hw.h"
38 #include "virglrenderer.h"
39 
40 #include <sys/uio.h>
41 #include <sys/socket.h>
42 #include <sys/mman.h>
43 #ifdef HAVE_EVENTFD_H
44 #include <sys/eventfd.h>
45 #endif
46 
47 #include "vtest.h"
48 #include "vtest_shm.h"
49 #include "vtest_protocol.h"
50 
51 #include "util.h"
52 #include "util/u_debug.h"
53 #include "util/u_double_list.h"
54 #include "util/u_math.h"
55 #include "util/u_memory.h"
56 #include "util/u_pointer.h"
57 #include "util/u_hash_table.h"
58 
59 #define VTEST_MAX_TIMELINE_COUNT 64
60 
61 struct vtest_resource {
62    struct list_head head;
63 
64    uint32_t server_res_id;
65    uint32_t res_id;
66 
67    struct iovec iov;
68 };
69 
70 struct vtest_sync {
71    struct list_head head;
72 
73    int sync_id;
74    int refcount;
75 
76    uint64_t value;
77 };
78 
79 struct vtest_timeline {
80    struct list_head submits;
81 };
82 
83 struct vtest_timeline_submit {
84    struct list_head head;
85 
86    struct vtest_timeline *timeline;
87 
88    uint32_t count;
89    struct vtest_sync **syncs;
90    uint64_t *values;
91 };
92 
93 struct vtest_sync_wait {
94    struct list_head head;
95 
96    int fd;
97 
98    uint32_t flags;
99    uint64_t valid_before;
100 
101    uint32_t count;
102    struct vtest_sync **syncs;
103    uint64_t *values;
104 
105    uint32_t signaled_count;
106 };
107 
108 struct vtest_context {
109    struct list_head head;
110 
111    int ctx_id;
112 
113    struct vtest_input *input;
114    int out_fd;
115 
116    char *debug_name;
117 
118    unsigned protocol_version;
119    unsigned capset_id;
120    bool context_initialized;
121 
122    struct util_hash_table *resource_table;
123    struct util_hash_table *sync_table;
124 
125    struct vtest_timeline timelines[VTEST_MAX_TIMELINE_COUNT];
126 
127    struct list_head sync_waits;
128 };
129 
130 struct vtest_renderer {
131    const char *rendernode_name;
132    bool multi_clients;
133    uint32_t ctx_flags;
134 
135    uint32_t max_length;
136 
137    int implicit_fence_submitted;
138    int implicit_fence_completed;
139 
140    struct list_head active_contexts;
141    struct list_head free_contexts;
142    int next_context_id;
143 
144    struct list_head free_resources;
145    int next_resource_id;
146 
147    struct list_head free_syncs;
148    int next_sync_id;
149 
150    struct vtest_context *current_context;
151 };
152 
153 /*
154  * VCMD_RESOURCE_BUSY_WAIT is used to wait GPU works (VCMD_SUBMIT_CMD) or CPU
155  * works (VCMD_TRANSFER_GET2).  A fence is needed only for GPU works.
156  */
vtest_create_implicit_fence(struct vtest_renderer * renderer)157 static void vtest_create_implicit_fence(struct vtest_renderer *renderer)
158 {
159    virgl_renderer_create_fence(++renderer->implicit_fence_submitted, 0);
160 }
161 
vtest_write_implicit_fence(UNUSED void * cookie,uint32_t fence_id_in)162 static void vtest_write_implicit_fence(UNUSED void *cookie, uint32_t fence_id_in)
163 {
164    struct vtest_renderer *renderer = (struct vtest_renderer*)cookie;
165    renderer->implicit_fence_completed = fence_id_in;
166 }
167 
168 static void vtest_signal_timeline(struct vtest_timeline *timeline,
169                                   struct vtest_timeline_submit *to_submit);
170 
vtest_write_context_fence(UNUSED void * cookie,UNUSED uint32_t ctx_id,UNUSED uint32_t ring_idx,uint64_t fence_id)171 static void vtest_write_context_fence(UNUSED void *cookie,
172                                       UNUSED uint32_t ctx_id,
173                                       UNUSED uint32_t ring_idx,
174                                       uint64_t fence_id)
175 {
176    struct vtest_timeline_submit *submit = (void*)(uintptr_t)fence_id;
177    vtest_signal_timeline(submit->timeline, submit);
178 }
179 
vtest_get_drm_fd(void * cookie)180 static int vtest_get_drm_fd(void *cookie)
181 {
182    int fd = -1;
183    struct vtest_renderer *renderer = (struct vtest_renderer*)cookie;
184    if (!renderer->rendernode_name)
185       return -1;
186    fd = open(renderer->rendernode_name, O_RDWR | O_CLOEXEC | O_NOCTTY | O_NONBLOCK);
187    if (fd == -1)
188       fprintf(stderr, "Unable to open rendernode '%s' falling back to default search\n",
189               renderer->rendernode_name);
190    return fd;
191 }
192 
193 static struct virgl_renderer_callbacks renderer_cbs = {
194    .version = VIRGL_RENDERER_CALLBACKS_VERSION,
195    .write_fence = vtest_write_implicit_fence,
196    .get_drm_fd = vtest_get_drm_fd,
197    .write_context_fence = vtest_write_context_fence,
198 };
199 
200 
201 static struct vtest_renderer renderer = {
202    .max_length = UINT_MAX,
203    .next_context_id = 1,
204    .next_resource_id = 1,
205    .next_sync_id = 1,
206 };
207 
vtest_new_resource(uint32_t client_res_id)208 static struct vtest_resource *vtest_new_resource(uint32_t client_res_id)
209 {
210    struct vtest_resource *res;
211 
212    if (LIST_IS_EMPTY(&renderer.free_resources)) {
213       res = malloc(sizeof(*res));
214       if (!res) {
215          return NULL;
216       }
217 
218       res->server_res_id = renderer.next_resource_id++;
219    } else {
220       res = LIST_ENTRY(struct vtest_resource, renderer.free_resources.next, head);
221       list_del(&res->head);
222    }
223 
224    res->res_id = client_res_id ? client_res_id : res->server_res_id;
225    res->iov.iov_base = NULL;
226    res->iov.iov_len = 0;
227 
228    return res;
229 }
230 
vtest_unref_resource(struct vtest_resource * res)231 static void vtest_unref_resource(struct vtest_resource *res)
232 {
233    /* virgl_renderer_ctx_detach_resource and virgl_renderer_resource_detach_iov
234     * are implied
235     */
236    virgl_renderer_resource_unref(res->res_id);
237 
238    if (res->iov.iov_base)
239       munmap(res->iov.iov_base, res->iov.iov_len);
240 
241    list_add(&res->head, &renderer.free_resources);
242 }
243 
vtest_new_sync(uint64_t value)244 static struct vtest_sync *vtest_new_sync(uint64_t value)
245 {
246    struct vtest_sync *sync;
247 
248    if (LIST_IS_EMPTY(&renderer.free_syncs)) {
249       sync = malloc(sizeof(*sync));
250       if (!sync) {
251          return NULL;
252       }
253 
254       sync->sync_id = renderer.next_sync_id++;
255    } else {
256       sync = LIST_ENTRY(struct vtest_sync, renderer.free_syncs.next, head);
257       list_del(&sync->head);
258    }
259 
260    sync->refcount = 1;
261    sync->value = value;
262 
263    return sync;
264 }
265 
vtest_ref_sync(struct vtest_sync * sync)266 static struct vtest_sync *vtest_ref_sync(struct vtest_sync *sync)
267 {
268    sync->refcount++;
269    return sync;
270 }
271 
vtest_unref_sync(struct vtest_sync * sync)272 static void vtest_unref_sync(struct vtest_sync *sync)
273 {
274    assert(sync->refcount);
275    sync->refcount--;
276    if (sync->refcount)
277       return;
278 
279    list_add(&sync->head, &renderer.free_syncs);
280 }
281 
vtest_free_timeline_submit(struct vtest_timeline_submit * submit)282 static void vtest_free_timeline_submit(struct vtest_timeline_submit *submit)
283 {
284    uint32_t i;
285    for (i = 0; i < submit->count; i++)
286       vtest_unref_sync(submit->syncs[i]);
287    free(submit);
288 }
289 
vtest_free_sync_wait(struct vtest_sync_wait * wait)290 static void vtest_free_sync_wait(struct vtest_sync_wait *wait)
291 {
292    uint32_t i;
293 
294    for (i = 0; i < wait->count; i++) {
295       if (wait->syncs[i])
296          vtest_unref_sync(wait->syncs[i]);
297    }
298    close(wait->fd);
299    free(wait);
300 }
301 
302 static uint32_t
u32_hash_func(const void * key)303 u32_hash_func(const void *key)
304 {
305    intptr_t ip = pointer_to_intptr(key);
306    return (uint32_t)(ip & 0xffffffff);
307 }
308 
309 static bool
u32_equal_func(const void * key1,const void * key2)310 u32_equal_func(const void *key1, const void *key2)
311 {
312    return key1 == key2;
313 }
314 
315 static void
resource_destroy_func(void * value)316 resource_destroy_func(void *value)
317 {
318    struct vtest_resource *res = value;
319    vtest_unref_resource(res);
320 }
321 
322 static void
sync_destroy_func(void * value)323 sync_destroy_func(void *value)
324 {
325    struct vtest_sync *sync = value;
326    vtest_unref_sync(sync);
327 }
328 
vtest_block_write(int fd,void * buf,int size)329 static int vtest_block_write(int fd, void *buf, int size)
330 {
331    char *ptr = buf;
332    int left;
333    int ret;
334    left = size;
335 
336    do {
337       ret = write(fd, ptr, left);
338       if (ret < 0) {
339          return -errno;
340       }
341 
342       left -= ret;
343       ptr += ret;
344    } while (left);
345 
346    return size;
347 }
348 
vtest_block_read(struct vtest_input * input,void * buf,int size)349 int vtest_block_read(struct vtest_input *input, void *buf, int size)
350 {
351    int fd = input->data.fd;
352    char *ptr = buf;
353    int left;
354    int ret;
355    static int savefd = -1;
356 
357    left = size;
358    do {
359       ret = read(fd, ptr, left);
360       if (ret <= 0) {
361          return ret == -1 ? -errno : 0;
362       }
363 
364       left -= ret;
365       ptr += ret;
366    } while (left);
367 
368    if (getenv("VTEST_SAVE")) {
369       if (savefd == -1) {
370          savefd = open(getenv("VTEST_SAVE"),
371                        O_CLOEXEC|O_CREAT|O_WRONLY|O_TRUNC|O_DSYNC, S_IRUSR|S_IWUSR);
372          if (savefd == -1) {
373             perror("error opening save file");
374             exit(1);
375          }
376       }
377       if (write(savefd, buf, size) != size) {
378          perror("failed to save");
379          exit(1);
380       }
381    }
382 
383    return size;
384 }
385 
vtest_send_fd(int socket_fd,int fd)386 static int vtest_send_fd(int socket_fd, int fd)
387 {
388     struct iovec iovec;
389     char buf[CMSG_SPACE(sizeof(int))];
390     char c = 0;
391     struct msghdr msgh = { 0 };
392     memset(buf, 0, sizeof(buf));
393 
394     iovec.iov_base = &c;
395     iovec.iov_len = sizeof(char);
396 
397     msgh.msg_name = NULL;
398     msgh.msg_namelen = 0;
399     msgh.msg_iov = &iovec;
400     msgh.msg_iovlen = 1;
401     msgh.msg_control = buf;
402     msgh.msg_controllen = sizeof(buf);
403     msgh.msg_flags = 0;
404 
405     struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msgh);
406     cmsg->cmsg_level = SOL_SOCKET;
407     cmsg->cmsg_type = SCM_RIGHTS;
408     cmsg->cmsg_len = CMSG_LEN(sizeof(int));
409 
410     *((int *) CMSG_DATA(cmsg)) = fd;
411 
412     int size = sendmsg(socket_fd, &msgh, 0);
413     if (size < 0) {
414       return report_failure("Failed to send fd", -EINVAL);
415     }
416 
417     return 0;
418 }
419 
vtest_buf_read(struct vtest_input * input,void * buf,int size)420 int vtest_buf_read(struct vtest_input *input, void *buf, int size)
421 {
422    struct vtest_buffer *inbuf = input->data.buffer;
423    if (size > inbuf->size) {
424       return 0;
425    }
426 
427    memcpy(buf, inbuf->buffer, size);
428    inbuf->buffer += size;
429    inbuf->size -= size;
430 
431    return size;
432 }
433 
vtest_init_renderer(bool multi_clients,int ctx_flags,const char * render_device)434 int vtest_init_renderer(bool multi_clients,
435                         int ctx_flags,
436                         const char *render_device)
437 {
438    int ret;
439 
440    renderer.rendernode_name = render_device;
441    list_inithead(&renderer.active_contexts);
442    list_inithead(&renderer.free_contexts);
443    list_inithead(&renderer.free_resources);
444    list_inithead(&renderer.free_syncs);
445 
446    ctx_flags |= VIRGL_RENDERER_THREAD_SYNC |
447                 VIRGL_RENDERER_USE_EXTERNAL_BLOB;
448    ret = virgl_renderer_init(&renderer, ctx_flags, &renderer_cbs);
449    if (ret) {
450       fprintf(stderr, "failed to initialise renderer.\n");
451       return -1;
452    }
453 
454    renderer.multi_clients = multi_clients;
455    renderer.ctx_flags = ctx_flags;
456 
457    return 0;
458 }
459 
460 static void vtest_free_context(struct vtest_context *ctx, bool cleanup);
461 
vtest_cleanup_renderer(void)462 void vtest_cleanup_renderer(void)
463 {
464    if (renderer.next_context_id > 1) {
465       struct vtest_context *ctx, *tmp;
466 
467       LIST_FOR_EACH_ENTRY_SAFE(ctx, tmp, &renderer.active_contexts, head) {
468          vtest_destroy_context(ctx);
469       }
470       LIST_FOR_EACH_ENTRY_SAFE(ctx, tmp, &renderer.free_contexts, head) {
471          vtest_free_context(ctx, true);
472       }
473       list_inithead(&renderer.active_contexts);
474       list_inithead(&renderer.free_contexts);
475 
476       renderer.next_context_id = 1;
477       renderer.current_context = NULL;
478    }
479 
480    if (renderer.next_resource_id > 1) {
481       struct vtest_resource *res, *tmp;
482 
483       LIST_FOR_EACH_ENTRY_SAFE(res, tmp, &renderer.free_resources, head) {
484          free(res);
485       }
486       list_inithead(&renderer.free_resources);
487 
488       renderer.next_resource_id = 1;
489    }
490 
491    if (renderer.next_sync_id > 1) {
492       struct vtest_sync *sync, *tmp;
493 
494       LIST_FOR_EACH_ENTRY_SAFE(sync, tmp, &renderer.free_syncs, head) {
495          assert(!sync->refcount);
496          free(sync);
497       }
498       list_inithead(&renderer.free_syncs);
499 
500       renderer.next_sync_id = 1;
501    }
502 
503    virgl_renderer_cleanup(&renderer);
504 }
505 
vtest_new_context(struct vtest_input * input,int out_fd)506 static struct vtest_context *vtest_new_context(struct vtest_input *input,
507                                                int out_fd)
508 {
509    struct vtest_context *ctx;
510 
511    if (LIST_IS_EMPTY(&renderer.free_contexts)) {
512       uint32_t i;
513 
514       ctx = malloc(sizeof(*ctx));
515       if (!ctx) {
516          return NULL;
517       }
518 
519       ctx->resource_table = util_hash_table_create(u32_hash_func,
520                                                    u32_equal_func,
521                                                    resource_destroy_func);
522       if (!ctx->resource_table) {
523          free(ctx);
524          return NULL;
525       }
526 
527       ctx->sync_table = util_hash_table_create(u32_hash_func,
528                                                u32_equal_func,
529                                                sync_destroy_func);
530       if (!ctx->sync_table) {
531          util_hash_table_destroy(ctx->resource_table);
532          free(ctx);
533          return NULL;
534       }
535 
536       for (i = 0; i < VTEST_MAX_TIMELINE_COUNT; i++) {
537          struct vtest_timeline *timeline = &ctx->timelines[i];
538          list_inithead(&timeline->submits);
539       }
540 
541       list_inithead(&ctx->sync_waits);
542 
543       ctx->ctx_id = renderer.next_context_id++;
544    } else {
545       ctx = LIST_ENTRY(struct vtest_context, renderer.free_contexts.next, head);
546       list_del(&ctx->head);
547    }
548 
549    ctx->input = input;
550    ctx->out_fd = out_fd;
551 
552    ctx->debug_name = NULL;
553    /* By default we support version 0 unless VCMD_PROTOCOL_VERSION is sent */
554    ctx->protocol_version = 0;
555    ctx->capset_id = 0;
556    ctx->context_initialized = false;
557 
558    return ctx;
559 }
560 
vtest_free_context(struct vtest_context * ctx,bool cleanup)561 static void vtest_free_context(struct vtest_context *ctx, bool cleanup)
562 {
563    if (cleanup) {
564       util_hash_table_destroy(ctx->resource_table);
565       util_hash_table_destroy(ctx->sync_table);
566       free(ctx);
567    } else {
568       list_add(&ctx->head, &renderer.free_contexts);
569    }
570 }
571 
vtest_create_context(struct vtest_input * input,int out_fd,uint32_t length,struct vtest_context ** out_ctx)572 int vtest_create_context(struct vtest_input *input, int out_fd,
573                          uint32_t length, struct vtest_context **out_ctx)
574 {
575    struct vtest_context *ctx;
576    char *vtestname;
577    int ret;
578 
579    if (length > 1024 * 1024) {
580       return -1;
581    }
582 
583    ctx = vtest_new_context(input, out_fd);
584    if (!ctx) {
585       return -1;
586    }
587 
588    vtestname = calloc(1, length + 1);
589    if (!vtestname) {
590       ret = -1;
591       goto err;
592    }
593 
594    ret = ctx->input->read(ctx->input, vtestname, length);
595    if (ret != (int)length) {
596       ret = -1;
597       goto err;
598    }
599 
600    ctx->debug_name = vtestname;
601 
602    list_addtail(&ctx->head, &renderer.active_contexts);
603    *out_ctx = ctx;
604 
605    return 0;
606 
607 err:
608    free(vtestname);
609    vtest_free_context(ctx, false);
610    return ret;
611 }
612 
vtest_lazy_init_context(struct vtest_context * ctx)613 int vtest_lazy_init_context(struct vtest_context *ctx)
614 {
615    int ret;
616 
617    if (ctx->context_initialized)
618       return 0;
619 
620    if (renderer.multi_clients && ctx->protocol_version < 3)
621       return report_failed_call("protocol version too low", -EINVAL);
622 
623    if (ctx->capset_id) {
624       ret = virgl_renderer_context_create_with_flags(ctx->ctx_id,
625                                                      ctx->capset_id,
626                                                      strlen(ctx->debug_name),
627                                                      ctx->debug_name);
628    } else {
629       ret = virgl_renderer_context_create(ctx->ctx_id,
630                                           strlen(ctx->debug_name),
631                                           ctx->debug_name);
632    }
633    ctx->context_initialized = (ret == 0);
634 
635    return ret;
636 }
637 
vtest_destroy_context(struct vtest_context * ctx)638 void vtest_destroy_context(struct vtest_context *ctx)
639 {
640    struct vtest_sync_wait *wait, *wait_tmp;
641    uint32_t i;
642 
643    if (renderer.current_context == ctx) {
644       renderer.current_context = NULL;
645    }
646    list_del(&ctx->head);
647 
648    for (i = 0; i < VTEST_MAX_TIMELINE_COUNT; i++) {
649       struct vtest_timeline *timeline = &ctx->timelines[i];
650       struct vtest_timeline_submit *submit, *submit_tmp;
651 
652       LIST_FOR_EACH_ENTRY_SAFE(submit, submit_tmp, &timeline->submits, head)
653          vtest_free_timeline_submit(submit);
654       list_inithead(&timeline->submits);
655    }
656 
657    LIST_FOR_EACH_ENTRY_SAFE(wait, wait_tmp, &ctx->sync_waits, head) {
658       list_del(&wait->head);
659       vtest_free_sync_wait(wait);
660    }
661    list_inithead(&ctx->sync_waits);
662 
663    free(ctx->debug_name);
664    if (ctx->context_initialized)
665       virgl_renderer_context_destroy(ctx->ctx_id);
666    util_hash_table_clear(ctx->resource_table);
667    util_hash_table_clear(ctx->sync_table);
668    vtest_free_context(ctx, false);
669 }
670 
vtest_poll_context(struct vtest_context * ctx)671 void vtest_poll_context(struct vtest_context *ctx)
672 {
673    virgl_renderer_context_poll(ctx->ctx_id);
674 }
675 
vtest_get_context_poll_fd(struct vtest_context * ctx)676 int vtest_get_context_poll_fd(struct vtest_context *ctx)
677 {
678    return virgl_renderer_context_get_poll_fd(ctx->ctx_id);
679 }
680 
vtest_set_current_context(struct vtest_context * ctx)681 void vtest_set_current_context(struct vtest_context *ctx)
682 {
683    renderer.current_context = ctx;
684 }
685 
vtest_get_current_context(void)686 static struct vtest_context *vtest_get_current_context(void)
687 {
688    return renderer.current_context;
689 }
690 
vtest_ping_protocol_version(UNUSED uint32_t length_dw)691 int vtest_ping_protocol_version(UNUSED uint32_t length_dw)
692 {
693    struct vtest_context *ctx = vtest_get_current_context();
694    uint32_t hdr_buf[VTEST_HDR_SIZE];
695    int ret;
696 
697    hdr_buf[VTEST_CMD_LEN] = VCMD_PING_PROTOCOL_VERSION_SIZE;
698    hdr_buf[VTEST_CMD_ID] = VCMD_PING_PROTOCOL_VERSION;
699    ret = vtest_block_write(ctx->out_fd, hdr_buf, sizeof(hdr_buf));
700    if (ret < 0) {
701       return ret;
702    }
703 
704    return 0;
705 }
706 
vtest_protocol_version(UNUSED uint32_t length_dw)707 int vtest_protocol_version(UNUSED uint32_t length_dw)
708 {
709    struct vtest_context *ctx = vtest_get_current_context();
710    uint32_t hdr_buf[VTEST_HDR_SIZE];
711    uint32_t version_buf[VCMD_PROTOCOL_VERSION_SIZE];
712    unsigned version;
713    int ret;
714 
715    ret = ctx->input->read(ctx->input, &version_buf, sizeof(version_buf));
716    if (ret != sizeof(version_buf))
717       return -1;
718 
719    version = MIN2(version_buf[VCMD_PROTOCOL_VERSION_VERSION],
720                   VTEST_PROTOCOL_VERSION);
721 
722    /*
723     * We've deprecated protocol version 1. All of it's called sites are being
724     * moved protocol version 2. If the server supports version 2 and the guest
725     * supports verison 1, fall back to version 0.
726     */
727    if (version == 1) {
728       printf("Older guest Mesa detected, fallbacking to protocol version 0\n");
729       version = 0;
730    }
731 
732    /* Protocol version 2 requires shm support. */
733    if (!vtest_shm_check()) {
734       printf("Shared memory not supported, fallbacking to protocol version 0\n");
735       version = 0;
736    }
737 
738    if (renderer.multi_clients && version < 3)
739       return report_failed_call("protocol version too low", -EINVAL);
740 
741    ctx->protocol_version = version;
742 
743    hdr_buf[VTEST_CMD_LEN] = VCMD_PROTOCOL_VERSION_SIZE;
744    hdr_buf[VTEST_CMD_ID] = VCMD_PROTOCOL_VERSION;
745 
746    version_buf[VCMD_PROTOCOL_VERSION_VERSION] = ctx->protocol_version;
747 
748    ret = vtest_block_write(ctx->out_fd, hdr_buf, sizeof(hdr_buf));
749    if (ret < 0) {
750       return ret;
751    }
752 
753    ret = vtest_block_write(ctx->out_fd, version_buf, sizeof(version_buf));
754    if (ret < 0) {
755       return ret;
756    }
757 
758    return 0;
759 }
760 
vtest_get_param(UNUSED uint32_t length_dw)761 int vtest_get_param(UNUSED uint32_t length_dw)
762 {
763    struct vtest_context *ctx = vtest_get_current_context();
764    uint32_t get_param_buf[VCMD_GET_PARAM_SIZE];
765    uint32_t resp_buf[VTEST_HDR_SIZE + 2];
766    uint32_t param;
767    uint32_t *resp;
768    int ret;
769 
770    ret = ctx->input->read(ctx->input, get_param_buf, sizeof(get_param_buf));
771    if (ret != sizeof(get_param_buf))
772       return -1;
773 
774    param = get_param_buf[VCMD_GET_PARAM_PARAM];
775 
776    resp_buf[VTEST_CMD_LEN] = 2;
777    resp_buf[VTEST_CMD_ID] = VCMD_GET_PARAM;
778    resp = &resp_buf[VTEST_CMD_DATA_START];
779    switch (param) {
780    case VCMD_PARAM_MAX_TIMELINE_COUNT:
781       resp[0] = true;
782       /* TODO until we have a timerfd */
783 #ifdef HAVE_EVENTFD_H
784       if (!getenv("VIRGL_DISABLE_MT"))
785          resp[1] = VTEST_MAX_TIMELINE_COUNT;
786       else
787          resp[1] = 0;
788 #else
789       resp[1] = 0;
790 #endif
791       break;
792    default:
793       resp[0] = false;
794       resp[1] = 0;
795       break;
796    }
797 
798    ret = vtest_block_write(ctx->out_fd, resp_buf, sizeof(resp_buf));
799    if (ret < 0)
800       return -1;
801 
802    return 0;
803 }
804 
vtest_get_capset(UNUSED uint32_t length_dw)805 int vtest_get_capset(UNUSED uint32_t length_dw)
806 {
807    struct vtest_context *ctx = vtest_get_current_context();
808    uint32_t get_capset_buf[VCMD_GET_CAPSET_SIZE];
809    uint32_t resp_buf[VTEST_HDR_SIZE + 1];
810    uint32_t id;
811    uint32_t version;
812    uint32_t max_version;
813    uint32_t max_size;
814    void *caps;
815    int ret;
816 
817    ret = ctx->input->read(ctx->input, get_capset_buf, sizeof(get_capset_buf));
818    if (ret != sizeof(get_capset_buf))
819       return -1;
820 
821    id = get_capset_buf[VCMD_GET_CAPSET_ID];
822    version = get_capset_buf[VCMD_GET_CAPSET_VERSION];
823 
824    virgl_renderer_get_cap_set(id, &max_version, &max_size);
825 
826    /* unsupported id or version */
827    if ((!max_version && !max_size) || version > max_version) {
828       resp_buf[VTEST_CMD_LEN] = 1;
829       resp_buf[VTEST_CMD_ID] = VCMD_GET_CAPSET;
830       resp_buf[VTEST_CMD_DATA_START] = false;
831       return vtest_block_write(ctx->out_fd, resp_buf, sizeof(resp_buf));
832    }
833 
834    if (max_size % 4)
835       return -EINVAL;
836 
837    caps = malloc(max_size);
838    if (!caps)
839       return -ENOMEM;
840 
841    virgl_renderer_fill_caps(id, version, caps);
842 
843    resp_buf[VTEST_CMD_LEN] = 1 + max_size / 4;
844    resp_buf[VTEST_CMD_ID] = VCMD_GET_CAPSET;
845    resp_buf[VTEST_CMD_DATA_START] = true;
846    ret = vtest_block_write(ctx->out_fd, resp_buf, sizeof(resp_buf));
847    if (ret >= 0)
848       ret = vtest_block_write(ctx->out_fd, caps, max_size);
849 
850    free(caps);
851    return ret >= 0 ? 0 : ret;
852 }
853 
vtest_context_init(UNUSED uint32_t length_dw)854 int vtest_context_init(UNUSED uint32_t length_dw)
855 {
856    struct vtest_context *ctx = vtest_get_current_context();
857    uint32_t context_init_buf[VCMD_CONTEXT_INIT_SIZE];
858    uint32_t capset_id;
859    int ret;
860 
861    ret = ctx->input->read(ctx->input, context_init_buf, sizeof(context_init_buf));
862    if (ret != sizeof(context_init_buf))
863       return -1;
864 
865    capset_id = context_init_buf[VCMD_CONTEXT_INIT_CAPSET_ID];
866    if (!capset_id)
867       return -EINVAL;
868 
869    if (ctx->context_initialized) {
870       return ctx->capset_id == capset_id ? 0 : -EINVAL;
871    }
872 
873    ctx->capset_id = capset_id;
874 
875    return vtest_lazy_init_context(ctx);
876 }
877 
vtest_send_caps2(UNUSED uint32_t length_dw)878 int vtest_send_caps2(UNUSED uint32_t length_dw)
879 {
880    struct vtest_context *ctx = vtest_get_current_context();
881    uint32_t hdr_buf[2];
882    void *caps_buf;
883    int ret;
884    uint32_t max_ver, max_size;
885 
886    virgl_renderer_get_cap_set(2, &max_ver, &max_size);
887 
888    if (max_size == 0) {
889       return -1;
890    }
891 
892    caps_buf = malloc(max_size);
893    if (!caps_buf) {
894       return -1;
895    }
896 
897    virgl_renderer_fill_caps(2, 1, caps_buf);
898 
899    hdr_buf[0] = max_size + 1;
900    hdr_buf[1] = 2;
901    ret = vtest_block_write(ctx->out_fd, hdr_buf, 8);
902    if (ret < 0) {
903       goto end;
904    }
905 
906    ret = vtest_block_write(ctx->out_fd, caps_buf, max_size);
907    if (ret < 0) {
908       goto end;
909    }
910 
911 end:
912    free(caps_buf);
913    return 0;
914 }
915 
vtest_send_caps(UNUSED uint32_t length_dw)916 int vtest_send_caps(UNUSED uint32_t length_dw)
917 {
918    struct vtest_context *ctx = vtest_get_current_context();
919    uint32_t  max_ver, max_size;
920    void *caps_buf;
921    uint32_t hdr_buf[2];
922    int ret;
923 
924    virgl_renderer_get_cap_set(1, &max_ver, &max_size);
925 
926    caps_buf = malloc(max_size);
927    if (!caps_buf) {
928       return -1;
929    }
930 
931    virgl_renderer_fill_caps(1, 1, caps_buf);
932 
933    hdr_buf[0] = max_size + 1;
934    hdr_buf[1] = 1;
935    ret = vtest_block_write(ctx->out_fd, hdr_buf, 8);
936    if (ret < 0) {
937       goto end;
938    }
939 
940    ret = vtest_block_write(ctx->out_fd, caps_buf, max_size);
941    if (ret < 0) {
942       goto end;
943    }
944 
945 end:
946    free(caps_buf);
947    return 0;
948 }
949 
vtest_create_resource_decode_args(struct vtest_context * ctx,struct virgl_renderer_resource_create_args * args)950 static int vtest_create_resource_decode_args(struct vtest_context *ctx,
951                                              struct virgl_renderer_resource_create_args *args)
952 {
953    uint32_t res_create_buf[VCMD_RES_CREATE_SIZE];
954    int ret;
955 
956    ret = ctx->input->read(ctx->input, &res_create_buf,
957                           sizeof(res_create_buf));
958    if (ret != sizeof(res_create_buf)) {
959       return -1;
960    }
961 
962    args->handle = res_create_buf[VCMD_RES_CREATE_RES_HANDLE];
963    args->target = res_create_buf[VCMD_RES_CREATE_TARGET];
964    args->format = res_create_buf[VCMD_RES_CREATE_FORMAT];
965    args->bind = res_create_buf[VCMD_RES_CREATE_BIND];
966 
967    args->width = res_create_buf[VCMD_RES_CREATE_WIDTH];
968    args->height = res_create_buf[VCMD_RES_CREATE_HEIGHT];
969    args->depth = res_create_buf[VCMD_RES_CREATE_DEPTH];
970    args->array_size = res_create_buf[VCMD_RES_CREATE_ARRAY_SIZE];
971    args->last_level = res_create_buf[VCMD_RES_CREATE_LAST_LEVEL];
972    args->nr_samples = res_create_buf[VCMD_RES_CREATE_NR_SAMPLES];
973    args->flags = 0;
974 
975    return 0;
976 }
977 
vtest_create_resource_decode_args2(struct vtest_context * ctx,struct virgl_renderer_resource_create_args * args,size_t * shm_size)978 static int vtest_create_resource_decode_args2(struct vtest_context *ctx,
979                                               struct virgl_renderer_resource_create_args *args,
980                                               size_t *shm_size)
981 {
982    uint32_t res_create_buf[VCMD_RES_CREATE2_SIZE];
983    int ret;
984 
985    ret = ctx->input->read(ctx->input, &res_create_buf,
986                           sizeof(res_create_buf));
987    if (ret != sizeof(res_create_buf)) {
988       return -1;
989    }
990 
991    args->handle = res_create_buf[VCMD_RES_CREATE2_RES_HANDLE];
992    args->target = res_create_buf[VCMD_RES_CREATE2_TARGET];
993    args->format = res_create_buf[VCMD_RES_CREATE2_FORMAT];
994    args->bind = res_create_buf[VCMD_RES_CREATE2_BIND];
995 
996    args->width = res_create_buf[VCMD_RES_CREATE2_WIDTH];
997    args->height = res_create_buf[VCMD_RES_CREATE2_HEIGHT];
998    args->depth = res_create_buf[VCMD_RES_CREATE2_DEPTH];
999    args->array_size = res_create_buf[VCMD_RES_CREATE2_ARRAY_SIZE];
1000    args->last_level = res_create_buf[VCMD_RES_CREATE2_LAST_LEVEL];
1001    args->nr_samples = res_create_buf[VCMD_RES_CREATE2_NR_SAMPLES];
1002    args->flags = 0;
1003 
1004    *shm_size = res_create_buf[VCMD_RES_CREATE2_DATA_SIZE];
1005 
1006    return 0;
1007 }
1008 
vtest_create_resource_setup_shm(struct vtest_resource * res,size_t size)1009 static int vtest_create_resource_setup_shm(struct vtest_resource *res,
1010                                            size_t size)
1011 {
1012    int fd;
1013    void *ptr;
1014 
1015    fd = vtest_new_shm(res->res_id, size);
1016    if (fd < 0)
1017       return report_failed_call("vtest_new_shm", fd);
1018 
1019    ptr = mmap(NULL, size, PROT_WRITE | PROT_READ, MAP_SHARED, fd, 0);
1020    if (ptr == MAP_FAILED) {
1021       close(fd);
1022       return -1;
1023    }
1024 
1025    res->iov.iov_base = ptr;
1026    res->iov.iov_len = size;
1027 
1028    return fd;
1029 }
1030 
vtest_create_resource_internal(struct vtest_context * ctx,uint32_t cmd_id,struct virgl_renderer_resource_create_args * args,size_t shm_size)1031 static int vtest_create_resource_internal(struct vtest_context *ctx,
1032                                           uint32_t cmd_id,
1033                                           struct virgl_renderer_resource_create_args *args,
1034                                           size_t shm_size)
1035 {
1036    struct vtest_resource *res;
1037    int ret;
1038 
1039    if (ctx->protocol_version >= 3) {
1040       if (args->handle)
1041          return -EINVAL;
1042    } else {
1043       // Check that the handle doesn't already exist.
1044       if (util_hash_table_get(ctx->resource_table, intptr_to_pointer(args->handle))) {
1045          return -EEXIST;
1046       }
1047    }
1048 
1049    res = vtest_new_resource(args->handle);
1050    if (!res)
1051       return -ENOMEM;
1052    args->handle = res->res_id;
1053 
1054    ret = virgl_renderer_resource_create(args, NULL, 0);
1055    if (ret) {
1056       vtest_unref_resource(res);
1057       return report_failed_call("virgl_renderer_resource_create", ret);
1058    }
1059 
1060    virgl_renderer_ctx_attach_resource(ctx->ctx_id, res->res_id);
1061 
1062    if (ctx->protocol_version >= 3) {
1063       uint32_t resp_buf[VTEST_HDR_SIZE + 1] = {
1064          [VTEST_CMD_LEN] = 1,
1065          [VTEST_CMD_ID] = cmd_id,
1066          [VTEST_CMD_DATA_START] = res->res_id,
1067       };
1068       ret = vtest_block_write(ctx->out_fd, resp_buf, sizeof(resp_buf));
1069       if (ret < 0) {
1070          vtest_unref_resource(res);
1071          return ret;
1072       }
1073    }
1074 
1075    /* no shm for v1 resources or v2 multi-sample resources */
1076    if (shm_size) {
1077       int fd;
1078 
1079       fd = vtest_create_resource_setup_shm(res, shm_size);
1080       if (fd < 0) {
1081          vtest_unref_resource(res);
1082          return -ENOMEM;
1083       }
1084 
1085       ret = vtest_send_fd(ctx->out_fd, fd);
1086       if (ret < 0) {
1087          close(fd);
1088          vtest_unref_resource(res);
1089          return report_failed_call("vtest_send_fd", ret);
1090       }
1091 
1092       /* Closing the file descriptor does not unmap the region. */
1093       close(fd);
1094 
1095       virgl_renderer_resource_attach_iov(res->res_id, &res->iov, 1);
1096    }
1097 
1098    util_hash_table_set(ctx->resource_table, intptr_to_pointer(res->res_id), res);
1099 
1100    return 0;
1101 }
1102 
vtest_create_resource(UNUSED uint32_t length_dw)1103 int vtest_create_resource(UNUSED uint32_t length_dw)
1104 {
1105    struct vtest_context *ctx = vtest_get_current_context();
1106    struct virgl_renderer_resource_create_args args;
1107    int ret;
1108 
1109    ret = vtest_create_resource_decode_args(ctx, &args);
1110    if (ret < 0) {
1111       return ret;
1112    }
1113 
1114    return vtest_create_resource_internal(ctx, VCMD_RESOURCE_CREATE, &args, 0);
1115 }
1116 
vtest_create_resource2(UNUSED uint32_t length_dw)1117 int vtest_create_resource2(UNUSED uint32_t length_dw)
1118 {
1119    struct vtest_context *ctx = vtest_get_current_context();
1120    struct virgl_renderer_resource_create_args args;
1121    size_t shm_size;
1122    int ret;
1123 
1124    ret = vtest_create_resource_decode_args2(ctx, &args, &shm_size);
1125    if (ret < 0) {
1126       return ret;
1127    }
1128 
1129    return vtest_create_resource_internal(ctx, VCMD_RESOURCE_CREATE2, &args, shm_size);
1130 }
1131 
vtest_resource_create_blob(UNUSED uint32_t length_dw)1132 int vtest_resource_create_blob(UNUSED uint32_t length_dw)
1133 {
1134    struct vtest_context *ctx = vtest_get_current_context();
1135    uint32_t res_create_blob_buf[VCMD_RES_CREATE_BLOB_SIZE];
1136    uint32_t resp_buf[VTEST_HDR_SIZE + 1];
1137    struct virgl_renderer_resource_create_blob_args args;
1138    struct vtest_resource *res;
1139    int fd;
1140    int ret;
1141 
1142    ret = ctx->input->read(ctx->input, res_create_blob_buf,
1143                           sizeof(res_create_blob_buf));
1144    if (ret != sizeof(res_create_blob_buf))
1145       return -1;
1146 
1147    memset(&args, 0, sizeof(args));
1148    args.blob_mem = res_create_blob_buf[VCMD_RES_CREATE_BLOB_TYPE];
1149    args.blob_flags = res_create_blob_buf[VCMD_RES_CREATE_BLOB_FLAGS];
1150    args.size = res_create_blob_buf[VCMD_RES_CREATE_BLOB_SIZE_LO];
1151    args.size |= (uint64_t)res_create_blob_buf[VCMD_RES_CREATE_BLOB_SIZE_HI] << 32;
1152    args.blob_id = res_create_blob_buf[VCMD_RES_CREATE_BLOB_ID_LO];
1153    args.blob_id |= (uint64_t)res_create_blob_buf[VCMD_RES_CREATE_BLOB_ID_HI] << 32;
1154 
1155    res = vtest_new_resource(0);
1156    if (!res)
1157       return -ENOMEM;
1158 
1159    args.res_handle = res->res_id;
1160    args.ctx_id = ctx->ctx_id;
1161 
1162    switch (args.blob_mem) {
1163    case VIRGL_RENDERER_BLOB_MEM_GUEST:
1164    case VIRGL_RENDERER_BLOB_MEM_HOST3D_GUEST:
1165       fd = vtest_create_resource_setup_shm(res, args.size);
1166       if (fd < 0) {
1167          vtest_unref_resource(res);
1168          return -ENOMEM;
1169       }
1170 
1171       args.iovecs = &res->iov;
1172       args.num_iovs = 1;
1173       break;
1174    case VIRGL_RENDERER_BLOB_MEM_HOST3D:
1175       fd = -1;
1176       break;
1177    default:
1178       vtest_unref_resource(res);
1179       return -EINVAL;
1180    }
1181 
1182    ret = virgl_renderer_resource_create_blob(&args);
1183    if (ret) {
1184       if (fd >= 0)
1185          close(fd);
1186       vtest_unref_resource(res);
1187       return report_failed_call("virgl_renderer_resource_create_blob", ret);
1188    }
1189 
1190    /* export blob */
1191    if (args.blob_mem == VIRGL_RENDERER_BLOB_MEM_HOST3D) {
1192       uint32_t fd_type;
1193       ret = virgl_renderer_resource_export_blob(res->res_id, &fd_type, &fd);
1194       if (ret) {
1195          vtest_unref_resource(res);
1196          return report_failed_call("virgl_renderer_resource_export_blob", ret);
1197       }
1198       if (fd_type != VIRGL_RENDERER_BLOB_FD_TYPE_DMABUF &&
1199           fd_type != VIRGL_RENDERER_BLOB_FD_TYPE_SHM) {
1200          close(fd);
1201          vtest_unref_resource(res);
1202          return report_failed_call("virgl_renderer_resource_export_blob", -EINVAL);
1203       }
1204    }
1205 
1206    virgl_renderer_ctx_attach_resource(ctx->ctx_id, res->res_id);
1207 
1208    resp_buf[VTEST_CMD_LEN] = 1;
1209    resp_buf[VTEST_CMD_ID] = VCMD_RESOURCE_CREATE_BLOB;
1210    resp_buf[VTEST_CMD_DATA_START] = res->res_id;
1211    ret = vtest_block_write(ctx->out_fd, resp_buf, sizeof(resp_buf));
1212    if (ret < 0) {
1213       close(fd);
1214       vtest_unref_resource(res);
1215       return ret;
1216    }
1217 
1218    ret = vtest_send_fd(ctx->out_fd, fd);
1219    if (ret < 0) {
1220       close(fd);
1221       vtest_unref_resource(res);
1222       return report_failed_call("vtest_send_fd", ret);
1223    }
1224 
1225    /* Closing the file descriptor does not unmap the region. */
1226    close(fd);
1227 
1228    util_hash_table_set(ctx->resource_table, intptr_to_pointer(res->res_id), res);
1229 
1230    return 0;
1231 }
1232 
vtest_resource_unref(UNUSED uint32_t length_dw)1233 int vtest_resource_unref(UNUSED uint32_t length_dw)
1234 {
1235    struct vtest_context *ctx = vtest_get_current_context();
1236    uint32_t res_unref_buf[VCMD_RES_UNREF_SIZE];
1237    int ret;
1238    uint32_t handle;
1239 
1240    ret = ctx->input->read(ctx->input, &res_unref_buf,
1241                           sizeof(res_unref_buf));
1242    if (ret != sizeof(res_unref_buf)) {
1243       return -1;
1244    }
1245 
1246    handle = res_unref_buf[VCMD_RES_UNREF_RES_HANDLE];
1247    util_hash_table_remove(ctx->resource_table, intptr_to_pointer(handle));
1248 
1249    return 0;
1250 }
1251 
vtest_submit_cmd(uint32_t length_dw)1252 int vtest_submit_cmd(uint32_t length_dw)
1253 {
1254    struct vtest_context *ctx = vtest_get_current_context();
1255    uint32_t *cbuf;
1256    int ret;
1257 
1258    if (length_dw > renderer.max_length / 4) {
1259       return -1;
1260    }
1261 
1262    cbuf = malloc(length_dw * 4);
1263    if (!cbuf) {
1264       return -1;
1265    }
1266 
1267    ret = ctx->input->read(ctx->input, cbuf, length_dw * 4);
1268    if (ret != (int)length_dw * 4) {
1269       free(cbuf);
1270       return -1;
1271    }
1272 
1273    ret = virgl_renderer_submit_cmd(cbuf, ctx->ctx_id, length_dw);
1274 
1275    free(cbuf);
1276    if (ret)
1277       return -1;
1278 
1279    vtest_create_implicit_fence(&renderer);
1280    return 0;
1281 }
1282 
1283 struct vtest_transfer_args {
1284    uint32_t handle;
1285    uint32_t level;
1286    uint32_t stride;
1287    uint32_t layer_stride;
1288    struct virgl_box box;
1289    uint32_t offset;
1290 };
1291 
vtest_transfer_decode_args(struct vtest_context * ctx,struct vtest_transfer_args * args,uint32_t * data_size)1292 static int vtest_transfer_decode_args(struct vtest_context *ctx,
1293                                       struct vtest_transfer_args *args,
1294                                       uint32_t *data_size)
1295 {
1296    uint32_t thdr_buf[VCMD_TRANSFER_HDR_SIZE];
1297    int ret;
1298 
1299    ret = ctx->input->read(ctx->input, thdr_buf, sizeof(thdr_buf));
1300    if (ret != sizeof(thdr_buf)) {
1301       return -1;
1302    }
1303 
1304    args->handle = thdr_buf[VCMD_TRANSFER_RES_HANDLE];
1305    args->level = thdr_buf[VCMD_TRANSFER_LEVEL];
1306    args->stride = thdr_buf[VCMD_TRANSFER_STRIDE];
1307    args->layer_stride = thdr_buf[VCMD_TRANSFER_LAYER_STRIDE];
1308    args->box.x = thdr_buf[VCMD_TRANSFER_X];
1309    args->box.y = thdr_buf[VCMD_TRANSFER_Y];
1310    args->box.z = thdr_buf[VCMD_TRANSFER_Z];
1311    args->box.w = thdr_buf[VCMD_TRANSFER_WIDTH];
1312    args->box.h = thdr_buf[VCMD_TRANSFER_HEIGHT];
1313    args->box.d = thdr_buf[VCMD_TRANSFER_DEPTH];
1314    args->offset = 0;
1315 
1316    *data_size = thdr_buf[VCMD_TRANSFER_DATA_SIZE];
1317 
1318    if (*data_size > renderer.max_length) {
1319       return -ENOMEM;
1320    }
1321 
1322    return 0;
1323 }
1324 
vtest_transfer_decode_args2(struct vtest_context * ctx,struct vtest_transfer_args * args)1325 static int vtest_transfer_decode_args2(struct vtest_context *ctx,
1326                                        struct vtest_transfer_args *args)
1327 {
1328    uint32_t thdr_buf[VCMD_TRANSFER2_HDR_SIZE];
1329    int ret;
1330 
1331    ret = ctx->input->read(ctx->input, thdr_buf, sizeof(thdr_buf));
1332    if (ret != sizeof(thdr_buf)) {
1333       return -1;
1334    }
1335 
1336    args->handle = thdr_buf[VCMD_TRANSFER2_RES_HANDLE];
1337    args->level = thdr_buf[VCMD_TRANSFER2_LEVEL];
1338    args->stride = 0;
1339    args->layer_stride = 0;
1340    args->box.x = thdr_buf[VCMD_TRANSFER2_X];
1341    args->box.y = thdr_buf[VCMD_TRANSFER2_Y];
1342    args->box.z = thdr_buf[VCMD_TRANSFER2_Z];
1343    args->box.w = thdr_buf[VCMD_TRANSFER2_WIDTH];
1344    args->box.h = thdr_buf[VCMD_TRANSFER2_HEIGHT];
1345    args->box.d = thdr_buf[VCMD_TRANSFER2_DEPTH];
1346    args->offset = thdr_buf[VCMD_TRANSFER2_OFFSET];
1347 
1348    return 0;
1349 }
1350 
vtest_transfer_get_internal(struct vtest_context * ctx,struct vtest_transfer_args * args,uint32_t data_size,bool do_transfer)1351 static int vtest_transfer_get_internal(struct vtest_context *ctx,
1352                                        struct vtest_transfer_args *args,
1353                                        uint32_t data_size,
1354                                        bool do_transfer)
1355 {
1356    struct vtest_resource *res;
1357    struct iovec data_iov;
1358    int ret = 0;
1359 
1360    res = util_hash_table_get(ctx->resource_table,
1361                              intptr_to_pointer(args->handle));
1362    if (!res) {
1363       return report_failed_call("util_hash_table_get", -ESRCH);
1364    }
1365 
1366    if (data_size) {
1367       data_iov.iov_len = data_size;
1368       data_iov.iov_base = malloc(data_size);
1369       if (!data_iov.iov_base) {
1370          return -ENOMEM;
1371       }
1372    } else {
1373       if (args->offset >= res->iov.iov_len) {
1374          return report_failure("offset larger then length of backing store", -EFAULT);
1375       }
1376    }
1377 
1378    if (do_transfer) {
1379       ret = virgl_renderer_transfer_read_iov(res->res_id,
1380                                              ctx->ctx_id,
1381                                              args->level,
1382                                              args->stride,
1383                                              args->layer_stride,
1384                                              &args->box,
1385                                              args->offset,
1386                                              data_size ? &data_iov : NULL,
1387                                              data_size ? 1 : 0);
1388       if (ret) {
1389          report_failed_call("virgl_renderer_transfer_read_iov", ret);
1390       }
1391    } else if (data_size) {
1392       memset(data_iov.iov_base, 0, data_iov.iov_len);
1393    }
1394 
1395    if (data_size) {
1396       ret = vtest_block_write(ctx->out_fd, data_iov.iov_base, data_iov.iov_len);
1397       if (ret > 0)
1398          ret = 0;
1399 
1400       free(data_iov.iov_base);
1401    }
1402 
1403    return ret;
1404 }
1405 
vtest_transfer_put_internal(struct vtest_context * ctx,struct vtest_transfer_args * args,uint32_t data_size,bool do_transfer)1406 static int vtest_transfer_put_internal(struct vtest_context *ctx,
1407                                        struct vtest_transfer_args *args,
1408                                        uint32_t data_size,
1409                                        bool do_transfer)
1410 {
1411    struct vtest_resource *res;
1412    struct iovec data_iov;
1413    int ret = 0;
1414 
1415    res = util_hash_table_get(ctx->resource_table,
1416                              intptr_to_pointer(args->handle));
1417    if (!res) {
1418       return report_failed_call("util_hash_table_get", -ESRCH);
1419    }
1420 
1421    if (data_size) {
1422       data_iov.iov_len = data_size;
1423       data_iov.iov_base = malloc(data_size);
1424       if (!data_iov.iov_base) {
1425          return -ENOMEM;
1426       }
1427 
1428       ret = ctx->input->read(ctx->input, data_iov.iov_base, data_iov.iov_len);
1429       if (ret < 0) {
1430          return ret;
1431       }
1432    }
1433 
1434    if (do_transfer) {
1435       ret = virgl_renderer_transfer_write_iov(res->res_id,
1436                                               ctx->ctx_id,
1437                                               args->level,
1438                                               args->stride,
1439                                               args->layer_stride,
1440                                               &args->box,
1441                                               args->offset,
1442                                               data_size ? &data_iov : NULL,
1443                                               data_size ? 1 : 0);
1444       if (ret) {
1445          report_failed_call("virgl_renderer_transfer_write_iov", ret);
1446       }
1447    }
1448 
1449    if (data_size) {
1450       free(data_iov.iov_base);
1451    }
1452 
1453    return ret;
1454 }
1455 
vtest_transfer_get(UNUSED uint32_t length_dw)1456 int vtest_transfer_get(UNUSED uint32_t length_dw)
1457 {
1458    struct vtest_context *ctx = vtest_get_current_context();
1459    int ret;
1460    struct vtest_transfer_args args;
1461    uint32_t data_size;
1462 
1463    ret = vtest_transfer_decode_args(ctx, &args, &data_size);
1464    if (ret < 0) {
1465       return ret;
1466    }
1467 
1468    return vtest_transfer_get_internal(ctx, &args, data_size, true);
1469 }
1470 
vtest_transfer_get_nop(UNUSED uint32_t length_dw)1471 int vtest_transfer_get_nop(UNUSED uint32_t length_dw)
1472 {
1473    struct vtest_context *ctx = vtest_get_current_context();
1474    int ret;
1475    struct vtest_transfer_args args;
1476    uint32_t data_size;
1477 
1478    ret = vtest_transfer_decode_args(ctx, &args, &data_size);
1479    if (ret < 0) {
1480       return ret;
1481    }
1482 
1483    return vtest_transfer_get_internal(ctx, &args, data_size, false);
1484 }
1485 
vtest_transfer_put(UNUSED uint32_t length_dw)1486 int vtest_transfer_put(UNUSED uint32_t length_dw)
1487 {
1488    struct vtest_context *ctx = vtest_get_current_context();
1489    int ret;
1490    struct vtest_transfer_args args;
1491    uint32_t data_size;
1492 
1493    ret = vtest_transfer_decode_args(ctx, &args, &data_size);
1494    if (ret < 0) {
1495       return ret;
1496    }
1497 
1498    return vtest_transfer_put_internal(ctx, &args, data_size, true);
1499 }
1500 
vtest_transfer_put_nop(UNUSED uint32_t length_dw)1501 int vtest_transfer_put_nop(UNUSED uint32_t length_dw)
1502 {
1503    struct vtest_context *ctx = vtest_get_current_context();
1504    int ret;
1505    struct vtest_transfer_args args;
1506    uint32_t data_size;
1507 
1508    ret = vtest_transfer_decode_args(ctx, &args, &data_size);
1509    if (ret < 0) {
1510       return ret;
1511    }
1512 
1513    return vtest_transfer_put_internal(ctx, &args, data_size, false);
1514 }
1515 
vtest_transfer_get2(UNUSED uint32_t length_dw)1516 int vtest_transfer_get2(UNUSED uint32_t length_dw)
1517 {
1518    struct vtest_context *ctx = vtest_get_current_context();
1519    int ret;
1520    struct vtest_transfer_args args;
1521 
1522    ret = vtest_transfer_decode_args2(ctx, &args);
1523    if (ret < 0) {
1524       return ret;
1525    }
1526 
1527    return vtest_transfer_get_internal(ctx, &args, 0, true);
1528 }
1529 
vtest_transfer_get2_nop(UNUSED uint32_t length_dw)1530 int vtest_transfer_get2_nop(UNUSED uint32_t length_dw)
1531 {
1532    struct vtest_context *ctx = vtest_get_current_context();
1533    int ret;
1534    struct vtest_transfer_args args;
1535 
1536    ret = vtest_transfer_decode_args2(ctx, &args);
1537    if (ret < 0) {
1538       return ret;
1539    }
1540 
1541    return vtest_transfer_get_internal(ctx, &args, 0, false);
1542 }
1543 
vtest_transfer_put2(UNUSED uint32_t length_dw)1544 int vtest_transfer_put2(UNUSED uint32_t length_dw)
1545 {
1546    struct vtest_context *ctx = vtest_get_current_context();
1547    int ret;
1548    struct vtest_transfer_args args;
1549 
1550    ret = vtest_transfer_decode_args2(ctx, &args);
1551    if (ret < 0) {
1552       return ret;
1553    }
1554 
1555    return vtest_transfer_put_internal(ctx, &args, 0, true);
1556 }
1557 
vtest_transfer_put2_nop(UNUSED uint32_t length_dw)1558 int vtest_transfer_put2_nop(UNUSED uint32_t length_dw)
1559 {
1560    struct vtest_context *ctx = vtest_get_current_context();
1561    int ret;
1562    struct vtest_transfer_args args;
1563 
1564    ret = vtest_transfer_decode_args2(ctx, &args);
1565    if (ret < 0) {
1566       return ret;
1567    }
1568 
1569    return vtest_transfer_put_internal(ctx, &args, 0, false);
1570 }
1571 
vtest_resource_busy_wait(UNUSED uint32_t length_dw)1572 int vtest_resource_busy_wait(UNUSED uint32_t length_dw)
1573 {
1574    struct vtest_context *ctx = vtest_get_current_context();
1575    uint32_t bw_buf[VCMD_BUSY_WAIT_SIZE];
1576    int ret, fd;
1577    int flags;
1578    uint32_t hdr_buf[VTEST_HDR_SIZE];
1579    uint32_t reply_buf[1];
1580    bool busy = false;
1581 
1582    ret = ctx->input->read(ctx->input, &bw_buf, sizeof(bw_buf));
1583    if (ret != sizeof(bw_buf)) {
1584       return -1;
1585    }
1586 
1587    /* clients often send VCMD_PING_PROTOCOL_VERSION followed by
1588     * VCMD_RESOURCE_BUSY_WAIT with handle 0 to figure out if
1589     * VCMD_PING_PROTOCOL_VERSION is supported.  We need to make a special case
1590     * for that.
1591     */
1592    if (!ctx->context_initialized && bw_buf[VCMD_BUSY_WAIT_HANDLE])
1593       return -1;
1594 
1595    /*  handle = bw_buf[VCMD_BUSY_WAIT_HANDLE]; unused as of now */
1596    flags = bw_buf[VCMD_BUSY_WAIT_FLAGS];
1597 
1598    do {
1599       busy = renderer.implicit_fence_completed !=
1600              renderer.implicit_fence_submitted;
1601       if (!busy || !(flags & VCMD_BUSY_WAIT_FLAG_WAIT))
1602          break;
1603 
1604       /* TODO this is bad when there are multiple clients */
1605       fd = virgl_renderer_get_poll_fd();
1606       if (fd != -1) {
1607          vtest_wait_for_fd_read(fd);
1608       }
1609       virgl_renderer_poll();
1610    } while (true);
1611 
1612    hdr_buf[VTEST_CMD_LEN] = 1;
1613    hdr_buf[VTEST_CMD_ID] = VCMD_RESOURCE_BUSY_WAIT;
1614    reply_buf[0] = busy ? 1 : 0;
1615 
1616    ret = vtest_block_write(ctx->out_fd, hdr_buf, sizeof(hdr_buf));
1617    if (ret < 0) {
1618       return ret;
1619    }
1620 
1621    ret = vtest_block_write(ctx->out_fd, reply_buf, sizeof(reply_buf));
1622    if (ret < 0) {
1623       return ret;
1624    }
1625 
1626    return 0;
1627 }
1628 
vtest_resource_busy_wait_nop(UNUSED uint32_t length_dw)1629 int vtest_resource_busy_wait_nop(UNUSED uint32_t length_dw)
1630 {
1631    struct vtest_context *ctx = vtest_get_current_context();
1632    uint32_t bw_buf[VCMD_BUSY_WAIT_SIZE];
1633    uint32_t reply_buf[VTEST_HDR_SIZE + 1];
1634    int ret;
1635 
1636    ret = ctx->input->read(ctx->input, &bw_buf, sizeof(bw_buf));
1637    if (ret != sizeof(bw_buf)) {
1638       return -1;
1639    }
1640 
1641    reply_buf[VTEST_CMD_LEN] = 1;
1642    reply_buf[VTEST_CMD_ID] = VCMD_RESOURCE_BUSY_WAIT;
1643    reply_buf[VTEST_CMD_DATA_START] = 0;
1644 
1645    ret = vtest_block_write(ctx->out_fd, reply_buf, sizeof(reply_buf));
1646    if (ret < 0) {
1647       return ret;
1648    }
1649 
1650    return 0;
1651 }
1652 
vtest_poll_resource_busy_wait(void)1653 void vtest_poll_resource_busy_wait(void)
1654 {
1655    /* poll the implicit fences */
1656    virgl_renderer_poll();
1657 }
1658 
vtest_gettime(uint32_t offset_ms)1659 static uint64_t vtest_gettime(uint32_t offset_ms)
1660 {
1661    const uint64_t ns_per_ms = 1000000;
1662    const uint64_t ns_per_s = ns_per_ms * 1000;
1663    struct timespec ts;
1664    uint64_t ns;
1665 
1666    if (offset_ms > INT32_MAX)
1667       return UINT64_MAX;
1668 
1669    clock_gettime(CLOCK_MONOTONIC, &ts);
1670    ns = ns_per_s * ts.tv_sec + ts.tv_nsec;
1671 
1672    return ns + ns_per_ms * offset_ms;
1673 }
1674 
write_ready(int fd)1675 static inline void write_ready(int fd)
1676 {
1677 #ifdef __GNUC__
1678 #  pragma GCC diagnostic push
1679 #  pragma GCC diagnostic ignored "-Wunused-result"
1680 #endif
1681    static const uint64_t val = 1;
1682    write(fd, &val, sizeof(val));
1683 #ifdef __GNUC__
1684 #  pragma GCC diagnostic pop
1685 #endif
1686 }
1687 
1688 
1689 /* TODO this is slow */
vtest_signal_sync(struct vtest_sync * sync,uint64_t value)1690 static void vtest_signal_sync(struct vtest_sync *sync, uint64_t value)
1691 {
1692    struct vtest_context *ctx;
1693    uint64_t now;
1694 
1695    if (sync->value >= value) {
1696       sync->value = value;
1697       return;
1698    }
1699    sync->value = value;
1700 
1701    now = vtest_gettime(0);
1702 
1703    LIST_FOR_EACH_ENTRY(ctx, &renderer.active_contexts, head) {
1704       struct vtest_sync_wait *wait, *tmp;
1705       LIST_FOR_EACH_ENTRY_SAFE(wait, tmp, &ctx->sync_waits, head) {
1706          bool is_ready = false;
1707          uint32_t i;
1708 
1709          /* garbage collect */
1710          if (wait->valid_before < now) {
1711             list_del(&wait->head);
1712             vtest_free_sync_wait(wait);
1713             continue;
1714          }
1715 
1716          for (i = 0; i < wait->count; i++) {
1717             if (wait->syncs[i] != sync || wait->values[i] > value)
1718                continue;
1719 
1720             vtest_unref_sync(wait->syncs[i]);
1721             wait->syncs[i] = NULL;
1722 
1723             wait->signaled_count++;
1724             if (wait->signaled_count == wait->count ||
1725                 (wait->flags & VCMD_SYNC_WAIT_FLAG_ANY)) {
1726                is_ready = true;
1727                break;
1728             }
1729          }
1730 
1731          if (is_ready) {
1732             list_del(&wait->head);
1733             write_ready(wait->fd);
1734             vtest_free_sync_wait(wait);
1735          }
1736       }
1737    }
1738 }
1739 
vtest_signal_timeline(struct vtest_timeline * timeline,struct vtest_timeline_submit * to_submit)1740 static void vtest_signal_timeline(struct vtest_timeline *timeline,
1741                                   struct vtest_timeline_submit *to_submit)
1742 {
1743    struct vtest_timeline_submit *submit, *tmp;
1744 
1745    LIST_FOR_EACH_ENTRY_SAFE(submit, tmp, &timeline->submits, head) {
1746       uint32_t i;
1747 
1748       list_del(&submit->head);
1749 
1750       for (i = 0; i < submit->count; i++) {
1751          vtest_signal_sync(submit->syncs[i], submit->values[i]);
1752          vtest_unref_sync(submit->syncs[i]);
1753       }
1754       free(submit);
1755 
1756       if (submit == to_submit)
1757          break;
1758    }
1759 }
1760 
vtest_sync_create(UNUSED uint32_t length_dw)1761 int vtest_sync_create(UNUSED uint32_t length_dw)
1762 {
1763    struct vtest_context *ctx = vtest_get_current_context();
1764    uint32_t sync_create_buf[VCMD_SYNC_CREATE_SIZE];
1765    uint32_t resp_buf[VTEST_HDR_SIZE + 1];
1766    uint64_t value;
1767    struct vtest_sync *sync;
1768    int ret;
1769 
1770    ret = ctx->input->read(ctx->input, sync_create_buf, sizeof(sync_create_buf));
1771    if (ret != sizeof(sync_create_buf))
1772       return -1;
1773 
1774    value = sync_create_buf[VCMD_SYNC_CREATE_VALUE_LO];
1775    value |= (uint64_t)sync_create_buf[VCMD_SYNC_CREATE_VALUE_HI] << 32;
1776 
1777    sync = vtest_new_sync(value);
1778    if (!sync)
1779       return -ENOMEM;
1780 
1781    resp_buf[VTEST_CMD_LEN] = 1;
1782    resp_buf[VTEST_CMD_ID] = VCMD_SYNC_CREATE;
1783    resp_buf[VTEST_CMD_DATA_START] = sync->sync_id;
1784    ret = vtest_block_write(ctx->out_fd, resp_buf, sizeof(resp_buf));
1785    if (ret < 0) {
1786       vtest_unref_sync(sync);
1787       return ret;
1788    }
1789 
1790    util_hash_table_set(ctx->sync_table, intptr_to_pointer(sync->sync_id), sync);
1791 
1792    return 0;
1793 }
1794 
vtest_sync_unref(UNUSED uint32_t length_dw)1795 int vtest_sync_unref(UNUSED uint32_t length_dw)
1796 {
1797    struct vtest_context *ctx = vtest_get_current_context();
1798    uint32_t sync_unref_buf[VCMD_SYNC_UNREF_SIZE];
1799    uint32_t sync_id;
1800    int ret;
1801 
1802    ret = ctx->input->read(ctx->input, &sync_unref_buf,
1803                           sizeof(sync_unref_buf));
1804    if (ret != sizeof(sync_unref_buf)) {
1805       return -1;
1806    }
1807 
1808    sync_id = sync_unref_buf[VCMD_SYNC_UNREF_ID];
1809    util_hash_table_remove(ctx->sync_table, intptr_to_pointer(sync_id));
1810 
1811    return 0;
1812 }
1813 
vtest_sync_read(UNUSED uint32_t length_dw)1814 int vtest_sync_read(UNUSED uint32_t length_dw)
1815 {
1816    struct vtest_context *ctx = vtest_get_current_context();
1817    uint32_t sync_read_buf[VCMD_SYNC_READ_SIZE];
1818    uint32_t resp_buf[VTEST_HDR_SIZE + 2];
1819    uint32_t sync_id;
1820    struct vtest_sync *sync;
1821    int ret;
1822 
1823    ret = ctx->input->read(ctx->input, &sync_read_buf,
1824                           sizeof(sync_read_buf));
1825    if (ret != sizeof(sync_read_buf)) {
1826       return -1;
1827    }
1828 
1829    sync_id = sync_read_buf[VCMD_SYNC_READ_ID];
1830 
1831    sync = util_hash_table_get(ctx->sync_table, intptr_to_pointer(sync_id));
1832    if (!sync)
1833       return -EEXIST;
1834 
1835    resp_buf[VTEST_CMD_LEN] = 2;
1836    resp_buf[VTEST_CMD_ID] = VCMD_SYNC_READ;
1837    resp_buf[VTEST_CMD_DATA_START] = (uint32_t)sync->value;
1838    resp_buf[VTEST_CMD_DATA_START + 1] = (uint32_t)(sync->value >> 32);
1839 
1840    ret = vtest_block_write(ctx->out_fd, resp_buf, sizeof(resp_buf));
1841    if (ret < 0)
1842       return ret;
1843 
1844    return 0;
1845 }
1846 
vtest_sync_decode_id_and_value(const uint32_t * data,uint32_t index,uint64_t * value)1847 static uint32_t vtest_sync_decode_id_and_value(const uint32_t *data,
1848                                                uint32_t index,
1849                                                uint64_t *value)
1850 {
1851    data += index * 3;
1852 
1853    /* 32-bit sync id followed by 64-bit sync value */
1854    *value = (uint64_t)data[1];
1855    *value |= (uint64_t)data[2] << 32;
1856    return data[0];
1857 }
1858 
vtest_sync_write(UNUSED uint32_t length_dw)1859 int vtest_sync_write(UNUSED uint32_t length_dw)
1860 {
1861    struct vtest_context *ctx = vtest_get_current_context();
1862    uint32_t sync_write_buf[VCMD_SYNC_WRITE_SIZE];
1863    uint32_t sync_id;
1864    uint64_t value;
1865    struct vtest_sync *sync;
1866    int ret;
1867 
1868    ret = ctx->input->read(ctx->input, &sync_write_buf,
1869                           sizeof(sync_write_buf));
1870    if (ret != sizeof(sync_write_buf)) {
1871       return -1;
1872    }
1873 
1874    sync_id = vtest_sync_decode_id_and_value(sync_write_buf, 0, &value);
1875 
1876    sync = util_hash_table_get(ctx->sync_table, intptr_to_pointer(sync_id));
1877    if (!sync)
1878       return -EEXIST;
1879 
1880    vtest_signal_sync(sync, value);
1881 
1882    return 0;
1883 }
1884 
vtest_sync_wait_init(struct vtest_sync_wait * wait,struct vtest_context * ctx,uint32_t flags,uint32_t timeout,const uint32_t * syncs,uint32_t sync_count)1885 static int vtest_sync_wait_init(struct vtest_sync_wait *wait,
1886                                 struct vtest_context *ctx,
1887                                 uint32_t flags,
1888                                 uint32_t timeout,
1889                                 const uint32_t *syncs,
1890                                 uint32_t sync_count)
1891 {
1892    uint32_t i;
1893 
1894 #ifdef HAVE_EVENTFD_H
1895    wait->fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
1896 #else
1897    /* TODO pipe */
1898    wait->fd = -1;
1899 #endif
1900    if (wait->fd < 0)
1901       return -ENODEV;
1902 
1903    wait->flags = flags;
1904    wait->valid_before = vtest_gettime(timeout);
1905 
1906    wait->count = 0;
1907    wait->signaled_count = 0;
1908    for (i = 0; i < sync_count; i++) {
1909       struct vtest_sync *sync;
1910       uint32_t sync_id;
1911       uint64_t value;
1912 
1913       sync_id = vtest_sync_decode_id_and_value(syncs, i, &value);
1914 
1915       sync = util_hash_table_get(ctx->sync_table, intptr_to_pointer(sync_id));
1916       if (!sync)
1917          break;
1918 
1919       /* skip signaled */
1920       if (sync->value < value) {
1921          wait->syncs[wait->count] = vtest_ref_sync(sync);
1922          wait->values[wait->count] = value;
1923          wait->count++;
1924       }
1925    }
1926 
1927    if (i < sync_count) {
1928       vtest_free_sync_wait(wait);
1929       return -EEXIST;
1930    }
1931 
1932    return 0;
1933 }
1934 
vtest_sync_wait(uint32_t length_dw)1935 int vtest_sync_wait(uint32_t length_dw)
1936 {
1937    struct vtest_context *ctx = vtest_get_current_context();
1938    uint32_t resp_buf[VTEST_HDR_SIZE];
1939    uint32_t sync_count;
1940    uint32_t *sync_wait_buf;
1941    uint32_t flags;
1942    uint32_t timeout;
1943    struct vtest_sync_wait *wait;
1944    bool is_ready;
1945    int ret;
1946 
1947    if (length_dw > renderer.max_length / 4)
1948       return -EINVAL;
1949 
1950    if ((length_dw - 2) % 3)
1951       return -EINVAL;
1952    sync_count = (length_dw - 2) / 3;
1953 
1954    sync_wait_buf = malloc(length_dw * 4);
1955    if (!sync_wait_buf)
1956       return -ENOMEM;
1957 
1958    ret = ctx->input->read(ctx->input, sync_wait_buf, length_dw * 4);
1959    if (ret != (int)length_dw * 4) {
1960       free(sync_wait_buf);
1961       return -1;
1962    }
1963 
1964    flags = sync_wait_buf[VCMD_SYNC_WAIT_FLAGS];
1965    timeout = sync_wait_buf[VCMD_SYNC_WAIT_TIMEOUT];
1966 
1967    wait = malloc(sizeof(*wait) +
1968                  sizeof(*wait->syncs) * sync_count +
1969                  sizeof(*wait->values) * sync_count);
1970    if (!wait) {
1971       free(sync_wait_buf);
1972       return -ENOMEM;
1973    }
1974    wait->syncs = (void *)&wait[1];
1975    wait->values = (void *)&wait->syncs[sync_count];
1976 
1977    ret = vtest_sync_wait_init(wait, ctx, flags, timeout,
1978          sync_wait_buf + 2, sync_count);
1979    free(sync_wait_buf);
1980 
1981    if (ret)
1982       return ret;
1983 
1984    is_ready = !wait->count;
1985    if ((wait->flags & VCMD_SYNC_WAIT_FLAG_ANY) && wait->count < sync_count)
1986       is_ready = true;
1987 
1988    if (is_ready) {
1989       write_ready(wait->fd);
1990    }
1991 
1992    resp_buf[VTEST_CMD_LEN] = 0;
1993    resp_buf[VTEST_CMD_ID] = VCMD_SYNC_WAIT;
1994    ret = vtest_block_write(ctx->out_fd, resp_buf, sizeof(resp_buf));
1995    if (ret >= 0)
1996       ret = vtest_send_fd(ctx->out_fd, wait->fd);
1997 
1998    if (ret || is_ready || !timeout)
1999       vtest_free_sync_wait(wait);
2000    else
2001       list_addtail(&wait->head, &ctx->sync_waits);
2002 
2003    return ret;
2004 }
2005 
vtest_submit_cmd2_batch(struct vtest_context * ctx,const struct vcmd_submit_cmd2_batch * batch,const uint32_t * cmds,const uint32_t * syncs)2006 static int vtest_submit_cmd2_batch(struct vtest_context *ctx,
2007                                    const struct vcmd_submit_cmd2_batch *batch,
2008                                    const uint32_t *cmds,
2009                                    const uint32_t *syncs)
2010 {
2011    struct vtest_timeline_submit *submit = NULL;
2012    uint32_t i;
2013    int ret;
2014 
2015    ret = virgl_renderer_submit_cmd((void *)cmds, ctx->ctx_id, batch->cmd_size);
2016    if (ret)
2017       return -EINVAL;
2018 
2019    if (!batch->sync_count)
2020       return 0;
2021 
2022    if (batch->flags & VCMD_SUBMIT_CMD2_FLAG_RING_IDX) {
2023       submit = malloc(sizeof(*submit) +
2024                       sizeof(*submit->syncs) * batch->sync_count +
2025                       sizeof(*submit->values) * batch->sync_count);
2026       if (!submit)
2027          return -ENOMEM;
2028 
2029       submit->count = batch->sync_count;
2030       submit->syncs = (void *)&submit[1];
2031       submit->values = (void *)&submit->syncs[batch->sync_count];
2032    }
2033 
2034    for (i = 0; i < batch->sync_count; i++) {
2035       struct vtest_sync *sync;
2036       uint32_t sync_id;
2037       uint64_t value;
2038 
2039       sync_id = vtest_sync_decode_id_and_value(syncs, i, &value);
2040 
2041       sync = util_hash_table_get(ctx->sync_table, intptr_to_pointer(sync_id));
2042       if (!sync)
2043          break;
2044 
2045       if (submit) {
2046          submit->syncs[i] = vtest_ref_sync(sync);
2047          submit->values[i] = value;
2048       } else {
2049          vtest_signal_sync(sync, value);
2050       }
2051    }
2052 
2053    if (i < batch->sync_count) {
2054       if (submit) {
2055          submit->count = i;
2056          vtest_free_timeline_submit(submit);
2057       }
2058       return -EEXIST;
2059    }
2060 
2061    if (submit) {
2062       struct vtest_timeline *timeline = &ctx->timelines[batch->ring_idx];
2063 
2064       submit->timeline = timeline;
2065       ret = virgl_renderer_context_create_fence(ctx->ctx_id,
2066                                                 VIRGL_RENDERER_FENCE_FLAG_MERGEABLE,
2067                                                 batch->ring_idx,
2068                                                 (uintptr_t)submit);
2069       if (ret) {
2070          vtest_free_timeline_submit(submit);
2071          return ret;
2072       }
2073 
2074       list_addtail(&submit->head, &timeline->submits);
2075    }
2076 
2077    return 0;
2078 }
2079 
vtest_submit_cmd2(uint32_t length_dw)2080 int vtest_submit_cmd2(uint32_t length_dw)
2081 {
2082    struct vtest_context *ctx = vtest_get_current_context();
2083    uint32_t *submit_cmd2_buf;
2084    uint32_t batch_count;
2085    uint32_t i;
2086    int ret;
2087 
2088    if (length_dw > renderer.max_length / 4)
2089       return -EINVAL;
2090 
2091    submit_cmd2_buf = malloc(length_dw * 4);
2092    if (!submit_cmd2_buf)
2093       return -ENOMEM;
2094 
2095    ret = ctx->input->read(ctx->input, submit_cmd2_buf, length_dw * 4);
2096    if (ret != (int)length_dw * 4) {
2097       free(submit_cmd2_buf);
2098       return -1;
2099    }
2100 
2101    batch_count = submit_cmd2_buf[VCMD_SUBMIT_CMD2_BATCH_COUNT];
2102    if (VCMD_SUBMIT_CMD2_BATCH_COUNT + 6 * batch_count > length_dw) {
2103       free(submit_cmd2_buf);
2104       return -EINVAL;
2105    }
2106 
2107    for (i = 0; i < batch_count; i++) {
2108       const struct vcmd_submit_cmd2_batch batch = {
2109          .flags = submit_cmd2_buf[VCMD_SUBMIT_CMD2_BATCH_FLAGS(i)],
2110          .cmd_offset = submit_cmd2_buf[VCMD_SUBMIT_CMD2_BATCH_CMD_OFFSET(i)],
2111          .cmd_size = submit_cmd2_buf[VCMD_SUBMIT_CMD2_BATCH_CMD_SIZE(i)],
2112          .sync_offset = submit_cmd2_buf[VCMD_SUBMIT_CMD2_BATCH_SYNC_OFFSET(i)],
2113          .sync_count = submit_cmd2_buf[VCMD_SUBMIT_CMD2_BATCH_SYNC_COUNT(i)],
2114          .ring_idx = submit_cmd2_buf[VCMD_SUBMIT_CMD2_BATCH_RING_IDX(i)],
2115       };
2116       const uint32_t *cmds = &submit_cmd2_buf[batch.cmd_offset];
2117       const uint32_t *syncs = &submit_cmd2_buf[batch.sync_offset];
2118 
2119       if (batch.cmd_offset + batch.cmd_size > length_dw ||
2120           batch.sync_offset + batch.sync_count * 3 > length_dw ||
2121           batch.ring_idx >= VTEST_MAX_TIMELINE_COUNT) {
2122          free(submit_cmd2_buf);
2123          return -EINVAL;
2124       }
2125 
2126       ret = vtest_submit_cmd2_batch(ctx, &batch, cmds, syncs);
2127       if (ret) {
2128          free(submit_cmd2_buf);
2129          return ret;
2130       }
2131    }
2132 
2133    free(submit_cmd2_buf);
2134 
2135    return 0;
2136 }
2137 
vtest_set_max_length(uint32_t length)2138 void vtest_set_max_length(uint32_t length)
2139 {
2140    renderer.max_length = length;
2141 }
2142