1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <errno.h>
25 #include <fcntl.h>
26 #include <limits.h>
27 #include <stdio.h>
28 #include <sys/ioctl.h>
29 #include <sys/stat.h>
30
31 #include "util/os_mman.h"
32 #include "util/os_file.h"
33 #include "util/os_time.h"
34 #include "util/simple_mtx.h"
35 #include "util/u_memory.h"
36 #include "util/format/u_format.h"
37 #include "util/u_hash_table.h"
38 #include "util/u_inlines.h"
39 #include "util/u_pointer.h"
40 #include "frontend/drm_driver.h"
41 #include "virgl/virgl_screen.h"
42 #include "virgl/virgl_public.h"
43 #include "virtio-gpu/virgl_protocol.h"
44
45 #include <xf86drm.h>
46 #include <libsync.h>
47 #include "drm-uapi/virtgpu_drm.h"
48
49 #include "virgl_drm_winsys.h"
50 #include "virgl_drm_public.h"
51
52 // Delete local definitions when virglrenderer_hw.h becomes public
53 #define VIRGL_DRM_CAPSET_VIRGL 1
54 #define VIRGL_DRM_CAPSET_VIRGL2 2
55
56 #define VIRGL_DRM_VERSION(major, minor) ((major) << 16 | (minor))
57 #define VIRGL_DRM_VERSION_FENCE_FD VIRGL_DRM_VERSION(0, 1)
58
59 /* Gets a pointer to the virgl_hw_res containing the pointed to cache entry. */
60 #define cache_entry_container_res(ptr) \
61 (struct virgl_hw_res*)((char*)ptr - offsetof(struct virgl_hw_res, cache_entry))
62
can_cache_resource(uint32_t bind)63 static inline bool can_cache_resource(uint32_t bind)
64 {
65 return bind == VIRGL_BIND_CONSTANT_BUFFER ||
66 bind == VIRGL_BIND_INDEX_BUFFER ||
67 bind == VIRGL_BIND_VERTEX_BUFFER ||
68 bind == VIRGL_BIND_CUSTOM ||
69 bind == VIRGL_BIND_STAGING ||
70 bind == VIRGL_BIND_DEPTH_STENCIL ||
71 bind == VIRGL_BIND_RENDER_TARGET ||
72 bind == 0;
73 }
74
virgl_hw_res_destroy(struct virgl_drm_winsys * qdws,struct virgl_hw_res * res)75 static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws,
76 struct virgl_hw_res *res)
77 {
78 struct drm_gem_close args;
79
80 mtx_lock(&qdws->bo_handles_mutex);
81
82 /* We intentionally avoid taking the lock in
83 * virgl_drm_resource_reference. Now that the
84 * lock is taken, we need to check the refcount
85 * again. */
86 if (pipe_is_referenced(&res->reference)) {
87 mtx_unlock(&qdws->bo_handles_mutex);
88 return;
89 }
90
91 _mesa_hash_table_remove_key(qdws->bo_handles,
92 (void *)(uintptr_t)res->bo_handle);
93 if (res->flink_name)
94 _mesa_hash_table_remove_key(qdws->bo_names,
95 (void *)(uintptr_t)res->flink_name);
96 mtx_unlock(&qdws->bo_handles_mutex);
97 if (res->ptr)
98 os_munmap(res->ptr, res->size);
99
100 memset(&args, 0, sizeof(args));
101 args.handle = res->bo_handle;
102 drmIoctl(qdws->fd, DRM_IOCTL_GEM_CLOSE, &args);
103 FREE(res);
104 }
105
virgl_drm_resource_is_busy(struct virgl_winsys * vws,struct virgl_hw_res * res)106 static bool virgl_drm_resource_is_busy(struct virgl_winsys *vws,
107 struct virgl_hw_res *res)
108 {
109 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
110 struct drm_virtgpu_3d_wait waitcmd;
111 int ret;
112
113 if (!p_atomic_read(&res->maybe_busy) && !p_atomic_read(&res->external))
114 return false;
115
116 memset(&waitcmd, 0, sizeof(waitcmd));
117 waitcmd.handle = res->bo_handle;
118 waitcmd.flags = VIRTGPU_WAIT_NOWAIT;
119
120 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
121 if (ret && errno == EBUSY)
122 return true;
123
124 p_atomic_set(&res->maybe_busy, false);
125
126 return false;
127 }
128
129 static void
virgl_drm_winsys_destroy(struct virgl_winsys * qws)130 virgl_drm_winsys_destroy(struct virgl_winsys *qws)
131 {
132 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
133
134 virgl_resource_cache_flush(&qdws->cache);
135
136 _mesa_hash_table_destroy(qdws->bo_handles, NULL);
137 _mesa_hash_table_destroy(qdws->bo_names, NULL);
138 mtx_destroy(&qdws->bo_handles_mutex);
139 mtx_destroy(&qdws->mutex);
140
141 FREE(qdws);
142 }
143
virgl_drm_resource_reference(struct virgl_winsys * qws,struct virgl_hw_res ** dres,struct virgl_hw_res * sres)144 static void virgl_drm_resource_reference(struct virgl_winsys *qws,
145 struct virgl_hw_res **dres,
146 struct virgl_hw_res *sres)
147 {
148 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
149 struct virgl_hw_res *old = *dres;
150
151 if (pipe_reference(&(*dres)->reference, &sres->reference)) {
152
153 if (!can_cache_resource(old->bind) ||
154 p_atomic_read(&old->external)) {
155 virgl_hw_res_destroy(qdws, old);
156 } else {
157 mtx_lock(&qdws->mutex);
158 virgl_resource_cache_add(&qdws->cache, &old->cache_entry);
159 mtx_unlock(&qdws->mutex);
160 }
161 }
162 *dres = sres;
163 }
164
165 static struct virgl_hw_res *
virgl_drm_winsys_resource_create_blob(struct virgl_winsys * qws,enum pipe_texture_target target,uint32_t format,uint32_t bind,uint32_t width,uint32_t height,uint32_t depth,uint32_t array_size,uint32_t last_level,uint32_t nr_samples,uint32_t flags,uint32_t size)166 virgl_drm_winsys_resource_create_blob(struct virgl_winsys *qws,
167 enum pipe_texture_target target,
168 uint32_t format,
169 uint32_t bind,
170 uint32_t width,
171 uint32_t height,
172 uint32_t depth,
173 uint32_t array_size,
174 uint32_t last_level,
175 uint32_t nr_samples,
176 uint32_t flags,
177 uint32_t size)
178 {
179 int ret;
180 int32_t blob_id;
181 uint32_t cmd[VIRGL_PIPE_RES_CREATE_SIZE + 1] = { 0 };
182 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
183 struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
184 struct virgl_hw_res *res;
185 struct virgl_resource_params params = { .size = size,
186 .bind = bind,
187 .format = format,
188 .flags = flags,
189 .nr_samples = nr_samples,
190 .width = width,
191 .height = height,
192 .depth = depth,
193 .array_size = array_size,
194 .last_level = last_level,
195 .target = target };
196
197 res = CALLOC_STRUCT(virgl_hw_res);
198 if (!res)
199 return NULL;
200
201 /* Make sure blob is page aligned. */
202 if (flags & (VIRGL_RESOURCE_FLAG_MAP_PERSISTENT |
203 VIRGL_RESOURCE_FLAG_MAP_COHERENT)) {
204 width = ALIGN(width, getpagesize());
205 size = ALIGN(size, getpagesize());
206 }
207
208 blob_id = p_atomic_inc_return(&qdws->blob_id);
209 cmd[0] = VIRGL_CMD0(VIRGL_CCMD_PIPE_RESOURCE_CREATE, 0, VIRGL_PIPE_RES_CREATE_SIZE);
210 cmd[VIRGL_PIPE_RES_CREATE_FORMAT] = format;
211 cmd[VIRGL_PIPE_RES_CREATE_BIND] = bind;
212 cmd[VIRGL_PIPE_RES_CREATE_TARGET] = target;
213 cmd[VIRGL_PIPE_RES_CREATE_WIDTH] = width;
214 cmd[VIRGL_PIPE_RES_CREATE_HEIGHT] = height;
215 cmd[VIRGL_PIPE_RES_CREATE_DEPTH] = depth;
216 cmd[VIRGL_PIPE_RES_CREATE_ARRAY_SIZE] = array_size;
217 cmd[VIRGL_PIPE_RES_CREATE_LAST_LEVEL] = last_level;
218 cmd[VIRGL_PIPE_RES_CREATE_NR_SAMPLES] = nr_samples;
219 cmd[VIRGL_PIPE_RES_CREATE_FLAGS] = flags;
220 cmd[VIRGL_PIPE_RES_CREATE_BLOB_ID] = blob_id;
221
222 drm_rc_blob.cmd = (unsigned long)(void *)&cmd;
223 drm_rc_blob.cmd_size = 4 * (VIRGL_PIPE_RES_CREATE_SIZE + 1);
224 drm_rc_blob.size = size;
225 drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST3D;
226 drm_rc_blob.blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
227 drm_rc_blob.blob_id = (uint64_t) blob_id;
228
229 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
230 if (ret != 0) {
231 FREE(res);
232 return NULL;
233 }
234
235 res->bind = bind;
236 res->res_handle = drm_rc_blob.res_handle;
237 res->bo_handle = drm_rc_blob.bo_handle;
238 res->size = size;
239 res->flags = flags;
240 res->maybe_untyped = false;
241 pipe_reference_init(&res->reference, 1);
242 p_atomic_set(&res->external, false);
243 p_atomic_set(&res->num_cs_references, 0);
244 virgl_resource_cache_entry_init(&res->cache_entry, params);
245 return res;
246 }
247
248 static struct virgl_hw_res *
virgl_drm_winsys_resource_create(struct virgl_winsys * qws,enum pipe_texture_target target,uint32_t format,uint32_t bind,uint32_t width,uint32_t height,uint32_t depth,uint32_t array_size,uint32_t last_level,uint32_t nr_samples,uint32_t size,bool for_fencing)249 virgl_drm_winsys_resource_create(struct virgl_winsys *qws,
250 enum pipe_texture_target target,
251 uint32_t format,
252 uint32_t bind,
253 uint32_t width,
254 uint32_t height,
255 uint32_t depth,
256 uint32_t array_size,
257 uint32_t last_level,
258 uint32_t nr_samples,
259 uint32_t size,
260 bool for_fencing)
261 {
262 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
263 struct drm_virtgpu_resource_create createcmd;
264 int ret;
265 struct virgl_hw_res *res;
266 uint32_t stride = width * util_format_get_blocksize(format);
267 struct virgl_resource_params params = { .size = size,
268 .bind = bind,
269 .format = format,
270 .flags = 0,
271 .nr_samples = nr_samples,
272 .width = width,
273 .height = height,
274 .depth = depth,
275 .array_size = array_size,
276 .last_level = last_level,
277 .target = target };
278
279 res = CALLOC_STRUCT(virgl_hw_res);
280 if (!res)
281 return NULL;
282
283 memset(&createcmd, 0, sizeof(createcmd));
284 createcmd.target = target;
285 createcmd.format = pipe_to_virgl_format(format);
286 createcmd.bind = bind;
287 createcmd.width = width;
288 createcmd.height = height;
289 createcmd.depth = depth;
290 createcmd.array_size = array_size;
291 createcmd.last_level = last_level;
292 createcmd.nr_samples = nr_samples;
293 createcmd.stride = stride;
294 createcmd.size = size;
295
296 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &createcmd);
297 if (ret != 0) {
298 FREE(res);
299 return NULL;
300 }
301
302 res->bind = bind;
303
304 res->res_handle = createcmd.res_handle;
305 res->bo_handle = createcmd.bo_handle;
306 res->size = size;
307 res->target = target;
308 res->maybe_untyped = false;
309 pipe_reference_init(&res->reference, 1);
310 p_atomic_set(&res->external, false);
311 p_atomic_set(&res->num_cs_references, 0);
312
313 /* A newly created resource is considered busy by the kernel until the
314 * command is retired. But for our purposes, we can consider it idle
315 * unless it is used for fencing.
316 */
317 p_atomic_set(&res->maybe_busy, for_fencing);
318
319 virgl_resource_cache_entry_init(&res->cache_entry, params);
320
321 return res;
322 }
323
324 /*
325 * Previously, with DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, all host resources had
326 * a guest memory shadow resource with size = stride * bpp. Virglrenderer
327 * would guess the stride implicitly when performing transfer operations, if
328 * the stride wasn't specified. Interestingly, vtest would specify the stride.
329 *
330 * Guessing the stride breaks down with YUV images, which may be imported into
331 * Mesa as 3R8 images. It also doesn't work if an external allocator
332 * (i.e, minigbm) decides to use a stride not equal to stride * bpp. With blob
333 * resources, the size = stride * bpp restriction no longer holds, so use
334 * explicit strides passed into Mesa.
335 */
use_explicit_stride(struct virgl_hw_res * res,uint32_t level,uint32_t depth)336 static inline bool use_explicit_stride(struct virgl_hw_res *res, uint32_t level,
337 uint32_t depth)
338 {
339 return (params[param_resource_blob].value &&
340 res->blob_mem == VIRTGPU_BLOB_MEM_HOST3D_GUEST &&
341 res->target == PIPE_TEXTURE_2D &&
342 level == 0 && depth == 1);
343 }
344
345 static int
virgl_bo_transfer_put(struct virgl_winsys * vws,struct virgl_hw_res * res,const struct pipe_box * box,uint32_t stride,uint32_t layer_stride,uint32_t buf_offset,uint32_t level)346 virgl_bo_transfer_put(struct virgl_winsys *vws,
347 struct virgl_hw_res *res,
348 const struct pipe_box *box,
349 uint32_t stride, uint32_t layer_stride,
350 uint32_t buf_offset, uint32_t level)
351 {
352 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
353 struct drm_virtgpu_3d_transfer_to_host tohostcmd;
354
355 p_atomic_set(&res->maybe_busy, true);
356
357 memset(&tohostcmd, 0, sizeof(tohostcmd));
358 tohostcmd.bo_handle = res->bo_handle;
359 tohostcmd.box.x = box->x;
360 tohostcmd.box.y = box->y;
361 tohostcmd.box.z = box->z;
362 tohostcmd.box.w = box->width;
363 tohostcmd.box.h = box->height;
364 tohostcmd.box.d = box->depth;
365 tohostcmd.offset = buf_offset;
366 tohostcmd.level = level;
367
368 if (use_explicit_stride(res, level, box->depth))
369 tohostcmd.stride = stride;
370
371 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &tohostcmd);
372 }
373
374 static int
virgl_bo_transfer_get(struct virgl_winsys * vws,struct virgl_hw_res * res,const struct pipe_box * box,uint32_t stride,uint32_t layer_stride,uint32_t buf_offset,uint32_t level)375 virgl_bo_transfer_get(struct virgl_winsys *vws,
376 struct virgl_hw_res *res,
377 const struct pipe_box *box,
378 uint32_t stride, uint32_t layer_stride,
379 uint32_t buf_offset, uint32_t level)
380 {
381 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
382 struct drm_virtgpu_3d_transfer_from_host fromhostcmd;
383
384 p_atomic_set(&res->maybe_busy, true);
385
386 memset(&fromhostcmd, 0, sizeof(fromhostcmd));
387 fromhostcmd.bo_handle = res->bo_handle;
388 fromhostcmd.level = level;
389 fromhostcmd.offset = buf_offset;
390 fromhostcmd.box.x = box->x;
391 fromhostcmd.box.y = box->y;
392 fromhostcmd.box.z = box->z;
393 fromhostcmd.box.w = box->width;
394 fromhostcmd.box.h = box->height;
395 fromhostcmd.box.d = box->depth;
396
397 if (use_explicit_stride(res, level, box->depth))
398 fromhostcmd.stride = stride;
399
400 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &fromhostcmd);
401 }
402
403 static struct virgl_hw_res *
virgl_drm_winsys_resource_cache_create(struct virgl_winsys * qws,enum pipe_texture_target target,const void * map_front_private,uint32_t format,uint32_t bind,uint32_t width,uint32_t height,uint32_t depth,uint32_t array_size,uint32_t last_level,uint32_t nr_samples,uint32_t flags,uint32_t size)404 virgl_drm_winsys_resource_cache_create(struct virgl_winsys *qws,
405 enum pipe_texture_target target,
406 const void *map_front_private,
407 uint32_t format,
408 uint32_t bind,
409 uint32_t width,
410 uint32_t height,
411 uint32_t depth,
412 uint32_t array_size,
413 uint32_t last_level,
414 uint32_t nr_samples,
415 uint32_t flags,
416 uint32_t size)
417 {
418 bool need_sync = false;
419 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
420 struct virgl_hw_res *res;
421 struct virgl_resource_cache_entry *entry;
422 struct virgl_resource_params params = { .size = size,
423 .bind = bind,
424 .format = format,
425 .flags = flags,
426 .nr_samples = nr_samples,
427 .width = width,
428 .height = height,
429 .depth = depth,
430 .array_size = array_size,
431 .last_level = last_level,
432 .target = target };
433
434 if (!can_cache_resource(bind))
435 goto alloc;
436
437 mtx_lock(&qdws->mutex);
438
439 entry = virgl_resource_cache_remove_compatible(&qdws->cache, params);
440 if (entry) {
441 res = cache_entry_container_res(entry);
442 mtx_unlock(&qdws->mutex);
443 pipe_reference_init(&res->reference, 1);
444 return res;
445 }
446
447 mtx_unlock(&qdws->mutex);
448
449 alloc:
450 /* PIPE_BUFFER with VIRGL_BIND_CUSTOM flag will access data when attaching,
451 * in order to avoid race conditions we need to treat it as busy during
452 * creation
453 */
454 if (target == PIPE_BUFFER && (bind & VIRGL_BIND_CUSTOM))
455 need_sync = true;
456
457 if (flags & (VIRGL_RESOURCE_FLAG_MAP_PERSISTENT |
458 VIRGL_RESOURCE_FLAG_MAP_COHERENT))
459 res = virgl_drm_winsys_resource_create_blob(qws, target, format, bind,
460 width, height, depth,
461 array_size, last_level,
462 nr_samples, flags, size);
463 else
464 res = virgl_drm_winsys_resource_create(qws, target, format, bind, width,
465 height, depth, array_size,
466 last_level, nr_samples, size,
467 need_sync);
468 return res;
469 }
470
471 static uint32_t
virgl_drm_winsys_resource_get_storage_size(struct virgl_winsys * qws,struct virgl_hw_res * res)472 virgl_drm_winsys_resource_get_storage_size(struct virgl_winsys *qws,
473 struct virgl_hw_res *res)
474 {
475 return res->size;
476 }
477
478 static struct virgl_hw_res *
virgl_drm_winsys_resource_create_handle(struct virgl_winsys * qws,struct winsys_handle * whandle,UNUSED struct pipe_resource * templ,uint32_t * plane,uint32_t * stride,uint32_t * plane_offset,uint64_t * modifier,uint32_t * blob_mem)479 virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
480 struct winsys_handle *whandle,
481 UNUSED struct pipe_resource *templ,
482 uint32_t *plane,
483 uint32_t *stride,
484 uint32_t *plane_offset,
485 uint64_t *modifier,
486 uint32_t *blob_mem)
487 {
488 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
489 struct drm_gem_open open_arg = {};
490 struct drm_virtgpu_resource_info info_arg = {};
491 struct virgl_hw_res *res = NULL;
492 uint32_t handle = whandle->handle;
493
494 if (whandle->plane >= VIRGL_MAX_PLANE_COUNT) {
495 return NULL;
496 }
497
498 if (whandle->offset != 0 && whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
499 _debug_printf("attempt to import unsupported winsys offset %u\n",
500 whandle->offset);
501 return NULL;
502 } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
503 *plane = whandle->plane;
504 *stride = whandle->stride;
505 *plane_offset = whandle->offset;
506 *modifier = whandle->modifier;
507 }
508
509 mtx_lock(&qdws->bo_handles_mutex);
510
511 /* We must maintain a list of pairs <handle, bo>, so that we always return
512 * the same BO for one particular handle. If we didn't do that and created
513 * more than one BO for the same handle and then relocated them in a CS,
514 * we would hit a deadlock in the kernel.
515 *
516 * The list of pairs is guarded by a mutex, of course. */
517 if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
518 res = util_hash_table_get(qdws->bo_names, (void*)(uintptr_t)handle);
519 } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
520 int r;
521 r = drmPrimeFDToHandle(qdws->fd, whandle->handle, &handle);
522 if (r)
523 goto done;
524 res = util_hash_table_get(qdws->bo_handles, (void*)(uintptr_t)handle);
525 } else {
526 /* Unknown handle type */
527 goto done;
528 }
529
530 if (res) {
531 /* qdws->bo_{names,handles} hold weak pointers to virgl_hw_res. Because
532 * virgl_drm_resource_reference does not take qdws->bo_handles_mutex
533 * until it enters virgl_hw_res_destroy, there is a small window that
534 * the refcount can drop to zero. Call p_atomic_inc directly instead of
535 * virgl_drm_resource_reference to avoid hitting assert failures.
536 */
537 p_atomic_inc(&res->reference.count);
538 goto done;
539 }
540
541 res = CALLOC_STRUCT(virgl_hw_res);
542 if (!res)
543 goto done;
544
545 if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
546 res->bo_handle = handle;
547 } else {
548 memset(&open_arg, 0, sizeof(open_arg));
549 open_arg.name = whandle->handle;
550 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
551 FREE(res);
552 res = NULL;
553 goto done;
554 }
555 res->bo_handle = open_arg.handle;
556 res->flink_name = whandle->handle;
557 }
558
559 memset(&info_arg, 0, sizeof(info_arg));
560 info_arg.bo_handle = res->bo_handle;
561
562 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &info_arg)) {
563 /* close */
564 FREE(res);
565 res = NULL;
566 goto done;
567 }
568
569 res->res_handle = info_arg.res_handle;
570 res->blob_mem = info_arg.blob_mem;
571 *blob_mem = info_arg.blob_mem;
572
573 res->size = info_arg.size;
574 res->maybe_untyped = info_arg.blob_mem ? true : false;
575 pipe_reference_init(&res->reference, 1);
576 p_atomic_set(&res->external, true);
577 res->num_cs_references = 0;
578
579 if (res->flink_name)
580 _mesa_hash_table_insert(qdws->bo_names, (void *)(uintptr_t)res->flink_name, res);
581 _mesa_hash_table_insert(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
582
583 done:
584 mtx_unlock(&qdws->bo_handles_mutex);
585 return res;
586 }
587
588 static void
virgl_drm_winsys_resource_set_type(struct virgl_winsys * qws,struct virgl_hw_res * res,uint32_t format,uint32_t bind,uint32_t width,uint32_t height,uint32_t usage,uint64_t modifier,uint32_t plane_count,const uint32_t * plane_strides,const uint32_t * plane_offsets)589 virgl_drm_winsys_resource_set_type(struct virgl_winsys *qws,
590 struct virgl_hw_res *res,
591 uint32_t format, uint32_t bind,
592 uint32_t width, uint32_t height,
593 uint32_t usage, uint64_t modifier,
594 uint32_t plane_count,
595 const uint32_t *plane_strides,
596 const uint32_t *plane_offsets)
597 {
598 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
599 uint32_t cmd[VIRGL_PIPE_RES_SET_TYPE_SIZE(VIRGL_MAX_PLANE_COUNT) + 1];
600 struct drm_virtgpu_execbuffer eb;
601 int ret;
602
603 mtx_lock(&qdws->bo_handles_mutex);
604
605 if (!res->maybe_untyped) {
606 mtx_unlock(&qdws->bo_handles_mutex);
607 return;
608 }
609 res->maybe_untyped = false;
610
611 assert(plane_count && plane_count <= VIRGL_MAX_PLANE_COUNT);
612
613 cmd[0] = VIRGL_CMD0(VIRGL_CCMD_PIPE_RESOURCE_SET_TYPE, 0, VIRGL_PIPE_RES_SET_TYPE_SIZE(plane_count));
614 cmd[VIRGL_PIPE_RES_SET_TYPE_RES_HANDLE] = res->res_handle,
615 cmd[VIRGL_PIPE_RES_SET_TYPE_FORMAT] = format;
616 cmd[VIRGL_PIPE_RES_SET_TYPE_BIND] = bind;
617 cmd[VIRGL_PIPE_RES_SET_TYPE_WIDTH] = width;
618 cmd[VIRGL_PIPE_RES_SET_TYPE_HEIGHT] = height;
619 cmd[VIRGL_PIPE_RES_SET_TYPE_USAGE] = usage;
620 cmd[VIRGL_PIPE_RES_SET_TYPE_MODIFIER_LO] = (uint32_t)modifier;
621 cmd[VIRGL_PIPE_RES_SET_TYPE_MODIFIER_HI] = (uint32_t)(modifier >> 32);
622 for (uint32_t i = 0; i < plane_count; i++) {
623 cmd[VIRGL_PIPE_RES_SET_TYPE_PLANE_STRIDE(i)] = plane_strides[i];
624 cmd[VIRGL_PIPE_RES_SET_TYPE_PLANE_OFFSET(i)] = plane_offsets[i];
625 }
626
627 memset(&eb, 0, sizeof(eb));
628 eb.command = (uintptr_t)cmd;
629 eb.size = (1 + VIRGL_PIPE_RES_SET_TYPE_SIZE(plane_count)) * 4;
630 eb.num_bo_handles = 1;
631 eb.bo_handles = (uintptr_t)&res->bo_handle;
632
633 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
634 if (ret == -1)
635 _debug_printf("failed to set resource type: %s", strerror(errno));
636
637 mtx_unlock(&qdws->bo_handles_mutex);
638 }
639
virgl_drm_winsys_resource_get_handle(struct virgl_winsys * qws,struct virgl_hw_res * res,uint32_t stride,struct winsys_handle * whandle)640 static bool virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
641 struct virgl_hw_res *res,
642 uint32_t stride,
643 struct winsys_handle *whandle)
644 {
645 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
646 struct drm_gem_flink flink;
647
648 if (!res)
649 return false;
650
651 if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
652 if (!res->flink_name) {
653 memset(&flink, 0, sizeof(flink));
654 flink.handle = res->bo_handle;
655
656 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
657 return false;
658 }
659 res->flink_name = flink.name;
660
661 mtx_lock(&qdws->bo_handles_mutex);
662 _mesa_hash_table_insert(qdws->bo_names, (void *)(uintptr_t)res->flink_name, res);
663 mtx_unlock(&qdws->bo_handles_mutex);
664 }
665 whandle->handle = res->flink_name;
666 } else if (whandle->type == WINSYS_HANDLE_TYPE_KMS) {
667 whandle->handle = res->bo_handle;
668 } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
669 if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
670 return false;
671 mtx_lock(&qdws->bo_handles_mutex);
672 _mesa_hash_table_insert(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
673 mtx_unlock(&qdws->bo_handles_mutex);
674 }
675
676 p_atomic_set(&res->external, true);
677
678 whandle->stride = stride;
679 return true;
680 }
681
virgl_drm_resource_map(struct virgl_winsys * qws,struct virgl_hw_res * res)682 static void *virgl_drm_resource_map(struct virgl_winsys *qws,
683 struct virgl_hw_res *res)
684 {
685 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
686 struct drm_virtgpu_map mmap_arg;
687 void *ptr;
688
689 if (res->ptr)
690 return res->ptr;
691
692 memset(&mmap_arg, 0, sizeof(mmap_arg));
693 mmap_arg.handle = res->bo_handle;
694 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_MAP, &mmap_arg))
695 return NULL;
696
697 ptr = os_mmap(0, res->size, PROT_READ|PROT_WRITE, MAP_SHARED,
698 qdws->fd, mmap_arg.offset);
699 if (ptr == MAP_FAILED)
700 return NULL;
701
702 res->ptr = ptr;
703 return ptr;
704
705 }
706
virgl_drm_resource_wait(struct virgl_winsys * qws,struct virgl_hw_res * res)707 static void virgl_drm_resource_wait(struct virgl_winsys *qws,
708 struct virgl_hw_res *res)
709 {
710 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
711 struct drm_virtgpu_3d_wait waitcmd;
712 int ret;
713
714 if (!p_atomic_read(&res->maybe_busy) && !p_atomic_read(&res->external))
715 return;
716
717 memset(&waitcmd, 0, sizeof(waitcmd));
718 waitcmd.handle = res->bo_handle;
719
720 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
721 if (ret)
722 _debug_printf("waiting got error - %d, slow gpu or hang?\n", errno);
723
724 p_atomic_set(&res->maybe_busy, false);
725 }
726
virgl_drm_alloc_res_list(struct virgl_drm_cmd_buf * cbuf,int initial_size)727 static bool virgl_drm_alloc_res_list(struct virgl_drm_cmd_buf *cbuf,
728 int initial_size)
729 {
730 cbuf->nres = initial_size;
731 cbuf->cres = 0;
732
733 cbuf->res_bo = CALLOC(cbuf->nres, sizeof(struct virgl_hw_buf*));
734 if (!cbuf->res_bo)
735 return false;
736
737 cbuf->res_hlist = MALLOC(cbuf->nres * sizeof(uint32_t));
738 if (!cbuf->res_hlist) {
739 FREE(cbuf->res_bo);
740 return false;
741 }
742
743 return true;
744 }
745
virgl_drm_free_res_list(struct virgl_drm_cmd_buf * cbuf)746 static void virgl_drm_free_res_list(struct virgl_drm_cmd_buf *cbuf)
747 {
748 int i;
749
750 for (i = 0; i < cbuf->cres; i++) {
751 p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
752 virgl_drm_resource_reference(cbuf->ws, &cbuf->res_bo[i], NULL);
753 }
754 FREE(cbuf->res_hlist);
755 FREE(cbuf->res_bo);
756 }
757
virgl_drm_lookup_res(struct virgl_drm_cmd_buf * cbuf,struct virgl_hw_res * res)758 static bool virgl_drm_lookup_res(struct virgl_drm_cmd_buf *cbuf,
759 struct virgl_hw_res *res)
760 {
761 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
762 int i;
763
764 if (cbuf->is_handle_added[hash]) {
765 i = cbuf->reloc_indices_hashlist[hash];
766 if (cbuf->res_bo[i] == res)
767 return true;
768
769 for (i = 0; i < cbuf->cres; i++) {
770 if (cbuf->res_bo[i] == res) {
771 cbuf->reloc_indices_hashlist[hash] = i;
772 return true;
773 }
774 }
775 }
776 return false;
777 }
778
virgl_drm_add_res(struct virgl_drm_winsys * qdws,struct virgl_drm_cmd_buf * cbuf,struct virgl_hw_res * res)779 static void virgl_drm_add_res(struct virgl_drm_winsys *qdws,
780 struct virgl_drm_cmd_buf *cbuf,
781 struct virgl_hw_res *res)
782 {
783 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
784
785 if (cbuf->cres >= cbuf->nres) {
786 unsigned new_nres = cbuf->nres + 256;
787 void *new_ptr = REALLOC(cbuf->res_bo,
788 cbuf->nres * sizeof(struct virgl_hw_buf*),
789 new_nres * sizeof(struct virgl_hw_buf*));
790 if (!new_ptr) {
791 _debug_printf("failure to add relocation %d, %d\n", cbuf->cres, new_nres);
792 return;
793 }
794 cbuf->res_bo = new_ptr;
795
796 new_ptr = REALLOC(cbuf->res_hlist,
797 cbuf->nres * sizeof(uint32_t),
798 new_nres * sizeof(uint32_t));
799 if (!new_ptr) {
800 _debug_printf("failure to add hlist relocation %d, %d\n", cbuf->cres, cbuf->nres);
801 return;
802 }
803 cbuf->res_hlist = new_ptr;
804 cbuf->nres = new_nres;
805 }
806
807 cbuf->res_bo[cbuf->cres] = NULL;
808 virgl_drm_resource_reference(&qdws->base, &cbuf->res_bo[cbuf->cres], res);
809 cbuf->res_hlist[cbuf->cres] = res->bo_handle;
810 cbuf->is_handle_added[hash] = true;
811
812 cbuf->reloc_indices_hashlist[hash] = cbuf->cres;
813 p_atomic_inc(&res->num_cs_references);
814 cbuf->cres++;
815 }
816
817 /* This is called after the cbuf is submitted. */
virgl_drm_clear_res_list(struct virgl_drm_cmd_buf * cbuf)818 static void virgl_drm_clear_res_list(struct virgl_drm_cmd_buf *cbuf)
819 {
820 int i;
821
822 for (i = 0; i < cbuf->cres; i++) {
823 /* mark all BOs busy after submission */
824 p_atomic_set(&cbuf->res_bo[i]->maybe_busy, true);
825
826 p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
827 virgl_drm_resource_reference(cbuf->ws, &cbuf->res_bo[i], NULL);
828 }
829
830 cbuf->cres = 0;
831
832 memset(cbuf->is_handle_added, 0, sizeof(cbuf->is_handle_added));
833 }
834
virgl_drm_emit_res(struct virgl_winsys * qws,struct virgl_cmd_buf * _cbuf,struct virgl_hw_res * res,bool write_buf)835 static void virgl_drm_emit_res(struct virgl_winsys *qws,
836 struct virgl_cmd_buf *_cbuf,
837 struct virgl_hw_res *res, bool write_buf)
838 {
839 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
840 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
841 bool already_in_list = virgl_drm_lookup_res(cbuf, res);
842
843 if (write_buf)
844 cbuf->base.buf[cbuf->base.cdw++] = res->res_handle;
845
846 if (!already_in_list)
847 virgl_drm_add_res(qdws, cbuf, res);
848 }
849
virgl_drm_res_is_ref(struct virgl_winsys * qws,struct virgl_cmd_buf * _cbuf,struct virgl_hw_res * res)850 static bool virgl_drm_res_is_ref(struct virgl_winsys *qws,
851 struct virgl_cmd_buf *_cbuf,
852 struct virgl_hw_res *res)
853 {
854 if (!p_atomic_read(&res->num_cs_references))
855 return false;
856
857 return true;
858 }
859
virgl_drm_cmd_buf_create(struct virgl_winsys * qws,uint32_t size)860 static struct virgl_cmd_buf *virgl_drm_cmd_buf_create(struct virgl_winsys *qws,
861 uint32_t size)
862 {
863 struct virgl_drm_cmd_buf *cbuf;
864
865 cbuf = CALLOC_STRUCT(virgl_drm_cmd_buf);
866 if (!cbuf)
867 return NULL;
868
869 cbuf->ws = qws;
870
871 if (!virgl_drm_alloc_res_list(cbuf, 512)) {
872 FREE(cbuf);
873 return NULL;
874 }
875
876 cbuf->buf = CALLOC(size, sizeof(uint32_t));
877 if (!cbuf->buf) {
878 FREE(cbuf->res_hlist);
879 FREE(cbuf->res_bo);
880 FREE(cbuf);
881 return NULL;
882 }
883
884 cbuf->in_fence_fd = -1;
885 cbuf->base.buf = cbuf->buf;
886 return &cbuf->base;
887 }
888
virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf * _cbuf)889 static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf *_cbuf)
890 {
891 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
892
893 virgl_drm_free_res_list(cbuf);
894
895 FREE(cbuf->buf);
896 FREE(cbuf);
897 }
898
899 static struct pipe_fence_handle *
virgl_drm_fence_create(struct virgl_winsys * vws,int fd,bool external)900 virgl_drm_fence_create(struct virgl_winsys *vws, int fd, bool external)
901 {
902 struct virgl_drm_fence *fence;
903
904 assert(vws->supports_fences);
905
906 if (external) {
907 fd = os_dupfd_cloexec(fd);
908 if (fd < 0)
909 return NULL;
910 }
911
912 fence = CALLOC_STRUCT(virgl_drm_fence);
913 if (!fence) {
914 close(fd);
915 return NULL;
916 }
917
918 fence->fd = fd;
919 fence->external = external;
920
921 pipe_reference_init(&fence->reference, 1);
922
923 return (struct pipe_fence_handle *)fence;
924 }
925
926 static struct pipe_fence_handle *
virgl_drm_fence_create_legacy(struct virgl_winsys * vws)927 virgl_drm_fence_create_legacy(struct virgl_winsys *vws)
928 {
929 struct virgl_drm_fence *fence;
930
931 assert(!vws->supports_fences);
932
933 fence = CALLOC_STRUCT(virgl_drm_fence);
934 if (!fence)
935 return NULL;
936 fence->fd = -1;
937
938 /* Resources for fences should not be from the cache, since we are basing
939 * the fence status on the resource creation busy status.
940 */
941 fence->hw_res = virgl_drm_winsys_resource_create(vws, PIPE_BUFFER,
942 PIPE_FORMAT_R8_UNORM, VIRGL_BIND_CUSTOM, 8, 1, 1, 0, 0, 0, 8, true);
943 if (!fence->hw_res) {
944 FREE(fence);
945 return NULL;
946 }
947
948 pipe_reference_init(&fence->reference, 1);
949
950 return (struct pipe_fence_handle *)fence;
951 }
952
virgl_drm_winsys_submit_cmd(struct virgl_winsys * qws,struct virgl_cmd_buf * _cbuf,struct pipe_fence_handle ** fence)953 static int virgl_drm_winsys_submit_cmd(struct virgl_winsys *qws,
954 struct virgl_cmd_buf *_cbuf,
955 struct pipe_fence_handle **fence)
956 {
957 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
958 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
959 struct drm_virtgpu_execbuffer eb;
960 int ret;
961
962 if (cbuf->base.cdw == 0)
963 return 0;
964
965 memset(&eb, 0, sizeof(struct drm_virtgpu_execbuffer));
966 eb.command = (unsigned long)(void*)cbuf->buf;
967 eb.size = cbuf->base.cdw * 4;
968 eb.num_bo_handles = cbuf->cres;
969 eb.bo_handles = (unsigned long)(void *)cbuf->res_hlist;
970
971 eb.fence_fd = -1;
972 if (qws->supports_fences) {
973 if (cbuf->in_fence_fd >= 0) {
974 eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_IN;
975 eb.fence_fd = cbuf->in_fence_fd;
976 }
977
978 if (fence != NULL)
979 eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_OUT;
980 } else {
981 assert(cbuf->in_fence_fd < 0);
982 }
983
984 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
985 if (ret == -1)
986 _debug_printf("got error from kernel - expect bad rendering %d\n", errno);
987 cbuf->base.cdw = 0;
988
989 if (qws->supports_fences) {
990 if (cbuf->in_fence_fd >= 0) {
991 close(cbuf->in_fence_fd);
992 cbuf->in_fence_fd = -1;
993 }
994
995 if (fence != NULL && ret == 0)
996 *fence = virgl_drm_fence_create(qws, eb.fence_fd, false);
997 } else {
998 if (fence != NULL && ret == 0)
999 *fence = virgl_drm_fence_create_legacy(qws);
1000 }
1001
1002 virgl_drm_clear_res_list(cbuf);
1003
1004 return ret;
1005 }
1006
virgl_drm_get_caps(struct virgl_winsys * vws,struct virgl_drm_caps * caps)1007 static int virgl_drm_get_caps(struct virgl_winsys *vws,
1008 struct virgl_drm_caps *caps)
1009 {
1010 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
1011 struct drm_virtgpu_get_caps args;
1012 int ret;
1013
1014 virgl_ws_fill_new_caps_defaults(caps);
1015
1016 memset(&args, 0, sizeof(args));
1017 if (params[param_capset_fix].value) {
1018 /* if we have the query fix - try and get cap set id 2 first */
1019 args.cap_set_id = 2;
1020 args.size = sizeof(union virgl_caps);
1021 } else {
1022 args.cap_set_id = 1;
1023 args.size = sizeof(struct virgl_caps_v1);
1024 }
1025 args.addr = (unsigned long)&caps->caps;
1026
1027 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
1028 if (ret == -1 && errno == EINVAL) {
1029 /* Fallback to v1 */
1030 args.cap_set_id = 1;
1031 args.size = sizeof(struct virgl_caps_v1);
1032 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
1033 if (ret == -1)
1034 return ret;
1035 }
1036 return ret;
1037 }
1038
1039 static struct pipe_fence_handle *
virgl_cs_create_fence(struct virgl_winsys * vws,int fd)1040 virgl_cs_create_fence(struct virgl_winsys *vws, int fd)
1041 {
1042 if (!vws->supports_fences)
1043 return NULL;
1044
1045 return virgl_drm_fence_create(vws, fd, true);
1046 }
1047
virgl_fence_wait(struct virgl_winsys * vws,struct pipe_fence_handle * _fence,uint64_t timeout)1048 static bool virgl_fence_wait(struct virgl_winsys *vws,
1049 struct pipe_fence_handle *_fence,
1050 uint64_t timeout)
1051 {
1052 struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
1053
1054 if (vws->supports_fences) {
1055 uint64_t timeout_ms;
1056 int timeout_poll;
1057
1058 if (timeout == 0)
1059 return sync_wait(fence->fd, 0) == 0;
1060
1061 timeout_ms = timeout / 1000000;
1062 /* round up */
1063 if (timeout_ms * 1000000 < timeout)
1064 timeout_ms++;
1065
1066 timeout_poll = timeout_ms <= INT_MAX ? (int) timeout_ms : -1;
1067
1068 return sync_wait(fence->fd, timeout_poll) == 0;
1069 }
1070
1071 if (timeout == 0)
1072 return !virgl_drm_resource_is_busy(vws, fence->hw_res);
1073
1074 if (timeout != OS_TIMEOUT_INFINITE) {
1075 int64_t start_time = os_time_get();
1076 timeout /= 1000;
1077 while (virgl_drm_resource_is_busy(vws, fence->hw_res)) {
1078 if (os_time_get() - start_time >= timeout)
1079 return false;
1080 os_time_sleep(10);
1081 }
1082 return true;
1083 }
1084 virgl_drm_resource_wait(vws, fence->hw_res);
1085
1086 return true;
1087 }
1088
virgl_fence_reference(struct virgl_winsys * vws,struct pipe_fence_handle ** dst,struct pipe_fence_handle * src)1089 static void virgl_fence_reference(struct virgl_winsys *vws,
1090 struct pipe_fence_handle **dst,
1091 struct pipe_fence_handle *src)
1092 {
1093 struct virgl_drm_fence *dfence = virgl_drm_fence(*dst);
1094 struct virgl_drm_fence *sfence = virgl_drm_fence(src);
1095
1096 if (pipe_reference(&dfence->reference, &sfence->reference)) {
1097 if (vws->supports_fences) {
1098 close(dfence->fd);
1099 } else {
1100 virgl_drm_resource_reference(vws, &dfence->hw_res, NULL);
1101 }
1102 FREE(dfence);
1103 }
1104
1105 *dst = src;
1106 }
1107
virgl_fence_server_sync(struct virgl_winsys * vws,struct virgl_cmd_buf * _cbuf,struct pipe_fence_handle * _fence)1108 static void virgl_fence_server_sync(struct virgl_winsys *vws,
1109 struct virgl_cmd_buf *_cbuf,
1110 struct pipe_fence_handle *_fence)
1111 {
1112 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
1113 struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
1114
1115 if (!vws->supports_fences)
1116 return;
1117
1118 /* if not an external fence, then nothing more to do without preemption: */
1119 if (!fence->external)
1120 return;
1121
1122 sync_accumulate("virgl", &cbuf->in_fence_fd, fence->fd);
1123 }
1124
virgl_fence_get_fd(struct virgl_winsys * vws,struct pipe_fence_handle * _fence)1125 static int virgl_fence_get_fd(struct virgl_winsys *vws,
1126 struct pipe_fence_handle *_fence)
1127 {
1128 struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
1129
1130 if (!vws->supports_fences)
1131 return -1;
1132
1133 return os_dupfd_cloexec(fence->fd);
1134 }
1135
virgl_drm_get_version(int fd)1136 static int virgl_drm_get_version(int fd)
1137 {
1138 int ret;
1139 drmVersionPtr version;
1140
1141 version = drmGetVersion(fd);
1142
1143 if (!version)
1144 ret = -EFAULT;
1145 else if (version->version_major != 0)
1146 ret = -EINVAL;
1147 else
1148 ret = VIRGL_DRM_VERSION(0, version->version_minor);
1149
1150 drmFreeVersion(version);
1151
1152 return ret;
1153 }
1154
1155 static bool
virgl_drm_resource_cache_entry_is_busy(struct virgl_resource_cache_entry * entry,void * user_data)1156 virgl_drm_resource_cache_entry_is_busy(struct virgl_resource_cache_entry *entry,
1157 void *user_data)
1158 {
1159 struct virgl_drm_winsys *qdws = user_data;
1160 struct virgl_hw_res *res = cache_entry_container_res(entry);
1161
1162 return virgl_drm_resource_is_busy(&qdws->base, res);
1163 }
1164
1165 static void
virgl_drm_resource_cache_entry_release(struct virgl_resource_cache_entry * entry,void * user_data)1166 virgl_drm_resource_cache_entry_release(struct virgl_resource_cache_entry *entry,
1167 void *user_data)
1168 {
1169 struct virgl_drm_winsys *qdws = user_data;
1170 struct virgl_hw_res *res = cache_entry_container_res(entry);
1171
1172 virgl_hw_res_destroy(qdws, res);
1173 }
1174
virgl_init_context(int drmFD)1175 static int virgl_init_context(int drmFD)
1176 {
1177 int ret;
1178 struct drm_virtgpu_context_init init = { 0 };
1179 struct drm_virtgpu_context_set_param ctx_set_param = { 0 };
1180 uint64_t supports_capset_virgl, supports_capset_virgl2;
1181 supports_capset_virgl = supports_capset_virgl2 = 0;
1182
1183 supports_capset_virgl = ((1 << VIRGL_DRM_CAPSET_VIRGL) &
1184 params[param_supported_capset_ids].value);
1185
1186 supports_capset_virgl2 = ((1 << VIRGL_DRM_CAPSET_VIRGL2) &
1187 params[param_supported_capset_ids].value);
1188
1189 if (!supports_capset_virgl && !supports_capset_virgl2) {
1190 _debug_printf("No virgl contexts available on host");
1191 return -EINVAL;
1192 }
1193
1194 ctx_set_param.param = VIRTGPU_CONTEXT_PARAM_CAPSET_ID;
1195 ctx_set_param.value = (supports_capset_virgl2) ?
1196 VIRGL_DRM_CAPSET_VIRGL2 :
1197 VIRGL_DRM_CAPSET_VIRGL;
1198
1199 init.ctx_set_params = (unsigned long)(void *)&ctx_set_param;
1200 init.num_params = 1;
1201
1202 ret = drmIoctl(drmFD, DRM_IOCTL_VIRTGPU_CONTEXT_INIT, &init);
1203 /*
1204 * EEXIST happens when a compositor does DUMB_CREATE before initializing
1205 * virgl.
1206 */
1207 if (ret && errno != EEXIST) {
1208 _debug_printf("DRM_IOCTL_VIRTGPU_CONTEXT_INIT failed with %s\n",
1209 strerror(errno));
1210 return -1;
1211 }
1212
1213 return 0;
1214 }
1215
1216 static int
virgl_drm_winsys_get_fd(struct virgl_winsys * vws)1217 virgl_drm_winsys_get_fd(struct virgl_winsys *vws)
1218 {
1219 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
1220
1221 return vdws->fd;
1222 }
1223
1224 static struct virgl_winsys *
virgl_drm_winsys_create(int drmFD)1225 virgl_drm_winsys_create(int drmFD)
1226 {
1227 static const unsigned CACHE_TIMEOUT_USEC = 1000000;
1228 struct virgl_drm_winsys *qdws;
1229 int drm_version;
1230 int ret;
1231
1232 for (uint32_t i = 0; i < ARRAY_SIZE(params); i++) {
1233 struct drm_virtgpu_getparam getparam = { 0 };
1234 uint64_t value = 0;
1235 getparam.param = params[i].param;
1236 getparam.value = (uint64_t)(uintptr_t)&value;
1237 ret = drmIoctl(drmFD, DRM_IOCTL_VIRTGPU_GETPARAM, &getparam);
1238 params[i].value = (ret == 0) ? value : 0;
1239 }
1240
1241 if (!params[param_3d_features].value)
1242 return NULL;
1243
1244 drm_version = virgl_drm_get_version(drmFD);
1245 if (drm_version < 0)
1246 return NULL;
1247
1248 if (params[param_context_init].value) {
1249 ret = virgl_init_context(drmFD);
1250 if (ret)
1251 return NULL;
1252 }
1253
1254 qdws = CALLOC_STRUCT(virgl_drm_winsys);
1255 if (!qdws)
1256 return NULL;
1257
1258 qdws->fd = drmFD;
1259 virgl_resource_cache_init(&qdws->cache, CACHE_TIMEOUT_USEC,
1260 virgl_drm_resource_cache_entry_is_busy,
1261 virgl_drm_resource_cache_entry_release,
1262 qdws);
1263 (void) mtx_init(&qdws->mutex, mtx_plain);
1264 (void) mtx_init(&qdws->bo_handles_mutex, mtx_plain);
1265 p_atomic_set(&qdws->blob_id, 0);
1266
1267 qdws->bo_handles = util_hash_table_create_ptr_keys();
1268 qdws->bo_names = util_hash_table_create_ptr_keys();
1269 qdws->base.destroy = virgl_drm_winsys_destroy;
1270
1271 qdws->base.transfer_put = virgl_bo_transfer_put;
1272 qdws->base.transfer_get = virgl_bo_transfer_get;
1273 qdws->base.resource_create = virgl_drm_winsys_resource_cache_create;
1274 qdws->base.resource_reference = virgl_drm_resource_reference;
1275 qdws->base.resource_create_from_handle = virgl_drm_winsys_resource_create_handle;
1276 qdws->base.resource_set_type = virgl_drm_winsys_resource_set_type;
1277 qdws->base.resource_get_handle = virgl_drm_winsys_resource_get_handle;
1278 qdws->base.resource_get_storage_size = virgl_drm_winsys_resource_get_storage_size;
1279 qdws->base.resource_map = virgl_drm_resource_map;
1280 qdws->base.resource_wait = virgl_drm_resource_wait;
1281 qdws->base.resource_is_busy = virgl_drm_resource_is_busy;
1282 qdws->base.cmd_buf_create = virgl_drm_cmd_buf_create;
1283 qdws->base.cmd_buf_destroy = virgl_drm_cmd_buf_destroy;
1284 qdws->base.submit_cmd = virgl_drm_winsys_submit_cmd;
1285 qdws->base.emit_res = virgl_drm_emit_res;
1286 qdws->base.res_is_referenced = virgl_drm_res_is_ref;
1287
1288 qdws->base.cs_create_fence = virgl_cs_create_fence;
1289 qdws->base.fence_wait = virgl_fence_wait;
1290 qdws->base.fence_reference = virgl_fence_reference;
1291 qdws->base.fence_server_sync = virgl_fence_server_sync;
1292 qdws->base.fence_get_fd = virgl_fence_get_fd;
1293 qdws->base.get_caps = virgl_drm_get_caps;
1294 qdws->base.get_fd = virgl_drm_winsys_get_fd;
1295 qdws->base.supports_fences = drm_version >= VIRGL_DRM_VERSION_FENCE_FD;
1296 qdws->base.supports_encoded_transfers = 1;
1297
1298 qdws->base.supports_coherent = params[param_resource_blob].value &&
1299 params[param_host_visible].value;
1300 return &qdws->base;
1301
1302 }
1303
1304 static struct hash_table *fd_tab = NULL;
1305 static simple_mtx_t virgl_screen_mutex = SIMPLE_MTX_INITIALIZER;
1306
1307 static void
virgl_drm_screen_destroy(struct pipe_screen * pscreen)1308 virgl_drm_screen_destroy(struct pipe_screen *pscreen)
1309 {
1310 struct virgl_screen *screen = virgl_screen(pscreen);
1311 bool destroy;
1312
1313 simple_mtx_lock(&virgl_screen_mutex);
1314 destroy = --screen->refcnt == 0;
1315 if (destroy) {
1316 int fd = virgl_drm_winsys(screen->vws)->fd;
1317 _mesa_hash_table_remove_key(fd_tab, intptr_to_pointer(fd));
1318 close(fd);
1319 }
1320 simple_mtx_unlock(&virgl_screen_mutex);
1321
1322 if (destroy) {
1323 pscreen->destroy = screen->winsys_priv;
1324 pscreen->destroy(pscreen);
1325 }
1326 }
1327
1328 static uint32_t
hash_fd(const void * key)1329 hash_fd(const void *key)
1330 {
1331 int fd = pointer_to_intptr(key);
1332
1333 return _mesa_hash_int(&fd);
1334 }
1335
1336 static bool
equal_fd(const void * key1,const void * key2)1337 equal_fd(const void *key1, const void *key2)
1338 {
1339 int ret;
1340 int fd1 = pointer_to_intptr(key1);
1341 int fd2 = pointer_to_intptr(key2);
1342
1343 /* Since the scope of prime handle is limited to drm_file,
1344 * virgl_screen is only shared at the drm_file level,
1345 * not at the device (/dev/dri/cardX) level.
1346 */
1347 ret = os_same_file_description(fd1, fd2);
1348 if (ret == 0) {
1349 return true;
1350 } else if (ret < 0) {
1351 static bool logged;
1352
1353 if (!logged) {
1354 _debug_printf("virgl: os_same_file_description couldn't "
1355 "determine if two DRM fds reference the same "
1356 "file description.\n"
1357 "If they do, bad things may happen!\n");
1358 logged = true;
1359 }
1360 }
1361
1362 return false;
1363 }
1364
1365 struct pipe_screen *
virgl_drm_screen_create(int fd,const struct pipe_screen_config * config)1366 virgl_drm_screen_create(int fd, const struct pipe_screen_config *config)
1367 {
1368 struct pipe_screen *pscreen = NULL;
1369
1370 simple_mtx_lock(&virgl_screen_mutex);
1371 if (!fd_tab) {
1372 fd_tab = _mesa_hash_table_create(NULL, hash_fd, equal_fd);
1373 if (!fd_tab)
1374 goto unlock;
1375 }
1376
1377 pscreen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
1378 if (pscreen) {
1379 virgl_screen(pscreen)->refcnt++;
1380 } else {
1381 struct virgl_winsys *vws;
1382 int dup_fd = os_dupfd_cloexec(fd);
1383
1384 vws = virgl_drm_winsys_create(dup_fd);
1385 if (!vws) {
1386 close(dup_fd);
1387 goto unlock;
1388 }
1389
1390 pscreen = virgl_create_screen(vws, config);
1391 if (pscreen) {
1392 _mesa_hash_table_insert(fd_tab, intptr_to_pointer(dup_fd), pscreen);
1393
1394 /* Bit of a hack, to avoid circular linkage dependency,
1395 * ie. pipe driver having to call in to winsys, we
1396 * override the pipe drivers screen->destroy():
1397 */
1398 virgl_screen(pscreen)->winsys_priv = pscreen->destroy;
1399 pscreen->destroy = virgl_drm_screen_destroy;
1400 }
1401 }
1402
1403 unlock:
1404 simple_mtx_unlock(&virgl_screen_mutex);
1405 return pscreen;
1406 }
1407