xref: /aosp_15_r20/external/mesa3d/src/virtio/vdrm/vdrm_virtgpu.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2023 Google, Inc.
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include <fcntl.h>
7 #include <string.h>
8 #include <sys/mman.h>
9 #include <xf86drm.h>
10 
11 #include "vdrm.h"
12 
13 #include "drm-uapi/virtgpu_drm.h"
14 #include "util/libsync.h"
15 #include "util/log.h"
16 #include "util/perf/cpu_trace.h"
17 
18 
19 #define SHMEM_SZ 0x4000
20 
21 #define virtgpu_ioctl(fd, name, args...) ({                          \
22       MESA_TRACE_SCOPE(#name);                                       \
23       int ret = drmIoctl((fd), DRM_IOCTL_ ## name, (args));          \
24       ret;                                                           \
25    })
26 
27 struct virtgpu_device {
28    struct vdrm_device base;
29    uint32_t shmem_handle;
30    int fd;
31 };
DEFINE_CAST(vdrm_device,virtgpu_device)32 DEFINE_CAST(vdrm_device, virtgpu_device)
33 
34 static int
35 virtgpu_execbuf_locked(struct vdrm_device *vdev, struct vdrm_execbuf_params *p,
36                        void *command, unsigned size)
37 {
38    struct virtgpu_device *vgdev = to_virtgpu_device(vdev);
39 
40    simple_mtx_assert_locked(&vdev->eb_lock);
41 
42    assert(size);
43 
44 #define COND(bool, val) ((bool) ? (val) : 0)
45    struct drm_virtgpu_execbuffer eb = {
46          .flags = COND(p->needs_out_fence_fd, VIRTGPU_EXECBUF_FENCE_FD_OUT) |
47                   COND(p->has_in_fence_fd, VIRTGPU_EXECBUF_FENCE_FD_IN) |
48                   VIRTGPU_EXECBUF_RING_IDX,
49          .size  = size,
50          .command = (uintptr_t)command,
51          .bo_handles = (uintptr_t)p->handles,
52          .num_bo_handles = p->num_handles,
53          .fence_fd = p->fence_fd,
54          .ring_idx = p->ring_idx,
55          .syncobj_stride = sizeof(struct drm_virtgpu_execbuffer_syncobj),
56          .num_in_syncobjs = p->num_in_syncobjs,
57          .num_out_syncobjs = p->num_out_syncobjs,
58          .in_syncobjs = (uintptr_t)p->in_syncobjs,
59          .out_syncobjs = (uintptr_t)p->out_syncobjs,
60    };
61 
62    int ret = virtgpu_ioctl(vgdev->fd, VIRTGPU_EXECBUFFER, &eb);
63    if (ret) {
64       mesa_loge("EXECBUFFER failed: %s", strerror(errno));
65       return ret;
66    }
67 
68    if (p->needs_out_fence_fd)
69       p->fence_fd = eb.fence_fd;
70 
71    return 0;
72 }
73 
74 static int
virtgpu_flush_locked(struct vdrm_device * vdev,uintptr_t * fencep)75 virtgpu_flush_locked(struct vdrm_device *vdev, uintptr_t *fencep)
76 {
77    int ret;
78 
79    simple_mtx_assert_locked(&vdev->eb_lock);
80 
81    if (!vdev->reqbuf_len)
82       return 0;
83 
84    struct vdrm_execbuf_params p = {
85       .needs_out_fence_fd = !!fencep,
86    };
87    ret = virtgpu_execbuf_locked(vdev, &p, vdev->reqbuf, vdev->reqbuf_len);
88    if (ret)
89       return ret;
90 
91    vdev->reqbuf_len = 0;
92    vdev->reqbuf_cnt = 0;
93 
94    if (fencep)
95       *fencep = p.fence_fd;
96 
97    return 0;
98 }
99 
100 static void
virtgpu_wait_fence(struct vdrm_device * vdev,uintptr_t fence)101 virtgpu_wait_fence(struct vdrm_device *vdev, uintptr_t fence)
102 {
103    int fence_fd = fence;
104 
105    sync_wait(fence_fd, -1);
106    close(fence_fd);
107 }
108 
109 static void
gem_close(struct virtgpu_device * vgdev,uint32_t handle)110 gem_close(struct virtgpu_device *vgdev, uint32_t handle)
111 {
112    virtgpu_ioctl(vgdev->fd, GEM_CLOSE, &(struct drm_gem_close){
113       .handle = handle,
114    });
115 }
116 
117 /**
118  * Note, does _not_ de-duplicate handles
119  */
120 static uint32_t
virtgpu_dmabuf_to_handle(struct vdrm_device * vdev,int fd)121 virtgpu_dmabuf_to_handle(struct vdrm_device *vdev, int fd)
122 {
123    struct virtgpu_device *vgdev = to_virtgpu_device(vdev);
124    uint32_t handle;
125    int ret;
126 
127    ret = drmPrimeFDToHandle(vgdev->fd, fd, &handle);
128    if (ret) {
129       mesa_loge("dmabuf import failed: %s", strerror(errno));
130       return 0;
131    }
132 
133    return handle;
134 }
135 
136 static uint32_t
virtgpu_handle_to_res_id(struct vdrm_device * vdev,uint32_t handle)137 virtgpu_handle_to_res_id(struct vdrm_device *vdev, uint32_t handle)
138 {
139    struct virtgpu_device *vgdev = to_virtgpu_device(vdev);
140    struct drm_virtgpu_resource_info args = {
141          .bo_handle = handle,
142    };
143    int ret;
144 
145    ret = virtgpu_ioctl(vgdev->fd, VIRTGPU_RESOURCE_INFO, &args);
146    if (ret) {
147       mesa_loge("failed to get resource info: %s", strerror(errno));
148       return 0;
149    }
150 
151    return args.res_handle;
152 }
153 
154 static uint32_t
virtgpu_bo_create(struct vdrm_device * vdev,size_t size,uint32_t blob_flags,uint64_t blob_id,struct vdrm_ccmd_req * req)155 virtgpu_bo_create(struct vdrm_device *vdev, size_t size, uint32_t blob_flags,
156                   uint64_t blob_id, struct vdrm_ccmd_req *req)
157 {
158    struct virtgpu_device *vgdev = to_virtgpu_device(vdev);
159    struct drm_virtgpu_resource_create_blob args = {
160          .blob_mem   = VIRTGPU_BLOB_MEM_HOST3D,
161          .blob_flags = blob_flags,
162          .size       = size,
163          .cmd_size   = req->len,
164          .cmd        = (uintptr_t)req,
165          .blob_id    = blob_id,
166    };
167    int ret;
168 
169    simple_mtx_assert_locked(&vdev->eb_lock);
170 
171    ret = virtgpu_ioctl(vgdev->fd, VIRTGPU_RESOURCE_CREATE_BLOB, &args);
172    if (ret) {
173       mesa_loge("buffer allocation failed: %s", strerror(errno));
174       return 0;
175    }
176 
177    return args.bo_handle;
178 }
179 
180 static int
map_handle(int fd,uint32_t handle,size_t size,void ** map,void * placed_addr)181 map_handle(int fd, uint32_t handle, size_t size, void **map, void *placed_addr)
182 {
183    struct drm_virtgpu_map req = {
184       .handle = handle,
185    };
186    int ret;
187 
188    ret = virtgpu_ioctl(fd, VIRTGPU_MAP, &req);
189    if (ret) {
190       mesa_loge("VIRTGPU_MAP failed: %s", strerror(errno));
191       return ret;
192    }
193 
194    *map = mmap(placed_addr, size, PROT_READ | PROT_WRITE,
195                MAP_SHARED | (placed_addr != NULL ? MAP_FIXED : 0),
196                fd, req.offset);
197    if (*map == MAP_FAILED) {
198       mesa_loge("failed to map handle: %s", strerror(errno));
199       return -1;
200    }
201 
202    return 0;
203 }
204 
205 static int
virtgpu_bo_wait(struct vdrm_device * vdev,uint32_t handle)206 virtgpu_bo_wait(struct vdrm_device *vdev, uint32_t handle)
207 {
208    struct virtgpu_device *vgdev = to_virtgpu_device(vdev);
209    struct drm_virtgpu_3d_wait args = {
210          .handle = handle,
211    };
212    int ret;
213 
214    /* Side note, this ioctl is defined as IO_WR but should be IO_W: */
215    ret = virtgpu_ioctl(vgdev->fd, VIRTGPU_WAIT, &args);
216    if (ret && errno == EBUSY)
217       return -EBUSY;
218 
219    return 0;
220 }
221 
222 static void *
virtgpu_bo_map(struct vdrm_device * vdev,uint32_t handle,size_t size,void * placed_addr)223 virtgpu_bo_map(struct vdrm_device *vdev, uint32_t handle, size_t size, void *placed_addr)
224 {
225    struct virtgpu_device *vgdev = to_virtgpu_device(vdev);
226    void *map;
227    int ret;
228 
229    ret = map_handle(vgdev->fd, handle, size, &map, placed_addr);
230    if (ret)
231       return NULL;
232 
233    return map;
234 }
235 
236 static int
virtgpu_bo_export_dmabuf(struct vdrm_device * vdev,uint32_t handle)237 virtgpu_bo_export_dmabuf(struct vdrm_device *vdev, uint32_t handle)
238 {
239    struct virtgpu_device *vgdev = to_virtgpu_device(vdev);
240    int ret, fd;
241 
242    ret = drmPrimeHandleToFD(vgdev->fd, handle, DRM_CLOEXEC | DRM_RDWR, &fd);
243    if (ret) {
244       mesa_loge("dmabuf export failed: %s", strerror(errno));
245       return ret;
246    }
247 
248    return fd;
249 }
250 
251 static void
virtgpu_bo_close(struct vdrm_device * vdev,uint32_t handle)252 virtgpu_bo_close(struct vdrm_device *vdev, uint32_t handle)
253 {
254    /* Flush any buffered commands first, so the detach_resource doesn't
255     * overtake any buffered ccmd which references the resource:
256     */
257    if (vdev->reqbuf_len) {
258       simple_mtx_lock(&vdev->eb_lock);
259       virtgpu_flush_locked(vdev, NULL);
260       simple_mtx_unlock(&vdev->eb_lock);
261    }
262 
263    gem_close(to_virtgpu_device(vdev), handle);
264 }
265 
266 static void
virtgpu_close(struct vdrm_device * vdev)267 virtgpu_close(struct vdrm_device *vdev)
268 {
269    struct virtgpu_device *vgdev = to_virtgpu_device(vdev);
270 
271    munmap(vdev->shmem, SHMEM_SZ);
272    gem_close(vgdev, vgdev->shmem_handle);
273 }
274 
275 static const struct vdrm_device_funcs funcs = {
276    .flush_locked = virtgpu_flush_locked,
277    .wait_fence = virtgpu_wait_fence,
278    .execbuf_locked = virtgpu_execbuf_locked,
279    .dmabuf_to_handle = virtgpu_dmabuf_to_handle,
280    .handle_to_res_id = virtgpu_handle_to_res_id,
281    .bo_create = virtgpu_bo_create,
282    .bo_wait = virtgpu_bo_wait,
283    .bo_map = virtgpu_bo_map,
284    .bo_export_dmabuf = virtgpu_bo_export_dmabuf,
285    .bo_close = virtgpu_bo_close,
286    .close = virtgpu_close,
287 };
288 
289 static int
get_capset(int fd,struct virgl_renderer_capset_drm * caps)290 get_capset(int fd, struct virgl_renderer_capset_drm *caps)
291 {
292    struct drm_virtgpu_get_caps args = {
293          .cap_set_id = VIRGL_RENDERER_CAPSET_DRM,
294          .cap_set_ver = 0,
295          .addr = (uintptr_t)caps,
296          .size = sizeof(*caps),
297    };
298 
299    memset(caps, 0, sizeof(*caps));
300 
301    return virtgpu_ioctl(fd, VIRTGPU_GET_CAPS, &args);
302 }
303 
304 static int
set_context(int fd)305 set_context(int fd)
306 {
307    struct drm_virtgpu_context_set_param params[] = {
308          { VIRTGPU_CONTEXT_PARAM_CAPSET_ID, VIRGL_RENDERER_CAPSET_DRM },
309          { VIRTGPU_CONTEXT_PARAM_NUM_RINGS, 64 },
310    };
311    struct drm_virtgpu_context_init args = {
312       .num_params = ARRAY_SIZE(params),
313       .ctx_set_params = (uintptr_t)params,
314    };
315 
316    return virtgpu_ioctl(fd, VIRTGPU_CONTEXT_INIT, &args);
317 }
318 
319 static int
init_shmem(struct virtgpu_device * vgdev)320 init_shmem(struct virtgpu_device *vgdev)
321 {
322    struct vdrm_device *vdev = &vgdev->base;
323    struct drm_virtgpu_resource_create_blob args = {
324          .blob_mem   = VIRTGPU_BLOB_MEM_HOST3D,
325          .blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE,
326          .size       = SHMEM_SZ,
327          .blob_id    = 0,
328    };
329    int ret;
330 
331    ret = virtgpu_ioctl(vgdev->fd, VIRTGPU_RESOURCE_CREATE_BLOB, &args);
332    if (ret) {
333       mesa_logi("failed to allocate shmem buffer: %s", strerror(errno));
334       return ret;
335    }
336 
337    vgdev->shmem_handle = args.bo_handle;
338 
339    ret = map_handle(vgdev->fd, vgdev->shmem_handle, args.size, (void **)&vdev->shmem, NULL);
340    if (ret) {
341       gem_close(vgdev, vgdev->shmem_handle);
342       vgdev->shmem_handle = 0;
343       return ret;
344    }
345 
346    uint32_t offset = vdev->shmem->rsp_mem_offset;
347    vdev->rsp_mem_len = args.size - offset;
348    vdev->rsp_mem = &((uint8_t *)vdev->shmem)[offset];
349 
350    return 0;
351 }
352 
353 struct vdrm_device * vdrm_virtgpu_connect(int fd, uint32_t context_type);
354 
355 struct vdrm_device *
vdrm_virtgpu_connect(int fd,uint32_t context_type)356 vdrm_virtgpu_connect(int fd, uint32_t context_type)
357 {
358    struct virgl_renderer_capset_drm caps;
359    struct virtgpu_device *vgdev;
360    struct vdrm_device *vdev;
361    int ret;
362 
363    ret = get_capset(fd, &caps);
364    if (ret) {
365       mesa_logi("could not get caps: %s", strerror(errno));
366       return NULL;
367    }
368 
369    if (caps.context_type != context_type) {
370       mesa_logi("wrong context_type: %u", caps.context_type);
371       return NULL;
372    }
373 
374    ret = set_context(fd);
375    if (ret) {
376       mesa_logi("Could not set context type: %s", strerror(errno));
377       return NULL;
378    }
379 
380    vgdev = calloc(1, sizeof(*vgdev));
381    if (!vgdev)
382       return NULL;
383 
384    vgdev->fd = fd;
385 
386    ret = init_shmem(vgdev);
387    if (ret) {
388       free(vgdev);
389       return NULL;
390    }
391 
392    vdev = &vgdev->base;
393    vdev->caps = caps;
394    vdev->funcs = &funcs;
395 
396    return vdev;
397 }
398