xref: /aosp_15_r20/external/mesa3d/src/virtio/vdrm/vdrm.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2023 Google, Inc.
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "util/u_math.h"
7 #include "util/perf/cpu_trace.h"
8 
9 #include "vdrm.h"
10 
11 struct vdrm_device * vdrm_virtgpu_connect(int fd, uint32_t context_type);
12 
13 struct vdrm_device *
vdrm_device_connect(int fd,uint32_t context_type)14 vdrm_device_connect(int fd, uint32_t context_type)
15 {
16    struct vdrm_device *vdev;
17 
18    // TODO vtest vs virtio..
19    vdev = vdrm_virtgpu_connect(fd, context_type);
20    if (!vdev)
21       return NULL;
22 
23    simple_mtx_init(&vdev->rsp_lock, mtx_plain);
24    simple_mtx_init(&vdev->eb_lock, mtx_plain);
25 
26    return vdev;
27 }
28 
29 void
vdrm_device_close(struct vdrm_device * vdev)30 vdrm_device_close(struct vdrm_device *vdev)
31 {
32    vdev->funcs->close(vdev);
33    free(vdev);
34 }
35 
36 uint32_t
vdrm_bo_create(struct vdrm_device * vdev,size_t size,uint32_t blob_flags,uint64_t blob_id,struct vdrm_ccmd_req * req)37 vdrm_bo_create(struct vdrm_device *vdev, size_t size, uint32_t blob_flags,
38                uint64_t blob_id, struct vdrm_ccmd_req *req)
39 {
40    uint32_t handle;
41 
42    simple_mtx_lock(&vdev->eb_lock);
43 
44    /* flush any buffered cmds so they are seen by the host *prior* to
45     * the cmds associated with bo creation.
46     */
47    vdev->funcs->flush_locked(vdev, NULL);
48 
49    req->seqno = ++vdev->next_seqno;
50 
51    handle = vdev->funcs->bo_create(vdev, size, blob_flags, blob_id, req);
52 
53    simple_mtx_unlock(&vdev->eb_lock);
54 
55    return handle;
56 }
57 
58 void *
vdrm_alloc_rsp(struct vdrm_device * vdev,struct vdrm_ccmd_req * req,uint32_t sz)59 vdrm_alloc_rsp(struct vdrm_device *vdev, struct vdrm_ccmd_req *req, uint32_t sz)
60 {
61    unsigned off;
62 
63    simple_mtx_lock(&vdev->rsp_lock);
64 
65    sz = align(sz, 8);
66 
67    if ((vdev->next_rsp_off + sz) >= vdev->rsp_mem_len)
68       vdev->next_rsp_off = 0;
69 
70    off = vdev->next_rsp_off;
71    vdev->next_rsp_off += sz;
72 
73    simple_mtx_unlock(&vdev->rsp_lock);
74 
75    req->rsp_off = off;
76 
77    struct vdrm_ccmd_rsp *rsp = (void *)&vdev->rsp_mem[off];
78    rsp->len = sz;
79 
80    return rsp;
81 }
82 
83 static int
enqueue_req(struct vdrm_device * vdev,struct vdrm_ccmd_req * req)84 enqueue_req(struct vdrm_device *vdev, struct vdrm_ccmd_req *req)
85 {
86    simple_mtx_assert_locked(&vdev->eb_lock);
87 
88    req->seqno = ++vdev->next_seqno;
89 
90    if ((vdev->reqbuf_len + req->len) > sizeof(vdev->reqbuf)) {
91       int ret = vdev->funcs->flush_locked(vdev, NULL);
92       if (ret)
93          return ret;
94    }
95 
96    memcpy(&vdev->reqbuf[vdev->reqbuf_len], req, req->len);
97    vdev->reqbuf_len += req->len;
98    vdev->reqbuf_cnt++;
99 
100    return 0;
101 }
102 
103 int
vdrm_execbuf(struct vdrm_device * vdev,struct vdrm_execbuf_params * p)104 vdrm_execbuf(struct vdrm_device *vdev, struct vdrm_execbuf_params *p)
105 {
106    int ret = 0;
107 
108    MESA_TRACE_FUNC();
109 
110    simple_mtx_lock(&vdev->eb_lock);
111 
112    p->req->seqno = ++vdev->next_seqno;
113 
114    ret = vdev->funcs->flush_locked(vdev, NULL);
115    if (ret)
116       goto out_unlock;
117 
118    ret = vdev->funcs->execbuf_locked(vdev, p, p->req, p->req->len);
119 
120 out_unlock:
121    simple_mtx_unlock(&vdev->eb_lock);
122 
123    return ret;
124 }
125 
126 /**
127  * Buffer/send a request cmd to host
128  */
129 int
vdrm_send_req(struct vdrm_device * vdev,struct vdrm_ccmd_req * req,bool sync)130 vdrm_send_req(struct vdrm_device *vdev, struct vdrm_ccmd_req *req, bool sync)
131 {
132    MESA_TRACE_FUNC();
133 
134    uintptr_t fence = 0;
135    int ret = 0;
136 
137    simple_mtx_lock(&vdev->eb_lock);
138    ret = enqueue_req(vdev, req);
139 
140    if (ret || !sync)
141       goto out_unlock;
142 
143    ret = vdev->funcs->flush_locked(vdev, &fence);
144 
145 out_unlock:
146    simple_mtx_unlock(&vdev->eb_lock);
147 
148    if (ret)
149       return ret;
150 
151    if (sync) {
152       MESA_TRACE_SCOPE("vdrm_execbuf sync");
153       vdev->funcs->wait_fence(vdev, fence);
154       vdrm_host_sync(vdev, req);
155    }
156 
157    return 0;
158 }
159 
160 int
vdrm_flush(struct vdrm_device * vdev)161 vdrm_flush(struct vdrm_device *vdev)
162 {
163    int ret = 0;
164 
165    MESA_TRACE_FUNC();
166 
167    simple_mtx_lock(&vdev->eb_lock);
168    ret = vdev->funcs->flush_locked(vdev, NULL);
169    simple_mtx_unlock(&vdev->eb_lock);
170 
171    return ret;
172 }
173 
174 /**
175  * Helper for fence/seqno comparisions which deals properly with rollover.
176  * Returns true if fence 'a' is before fence 'b'
177  */
178 static bool
fence_before(uint32_t a,uint32_t b)179 fence_before(uint32_t a, uint32_t b)
180 {
181    return (int32_t)(a - b) < 0;
182 }
183 
184 /**
185  * Wait until host has processed the specified request.
186  */
187 void
vdrm_host_sync(struct vdrm_device * vdev,const struct vdrm_ccmd_req * req)188 vdrm_host_sync(struct vdrm_device *vdev, const struct vdrm_ccmd_req *req)
189 {
190    while (fence_before(vdev->shmem->seqno, req->seqno))
191       sched_yield();
192 }
193