xref: /aosp_15_r20/external/mesa3d/src/freedreno/common/msm_proto.h (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2022 Google LLC
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #ifndef MSM_PROTO_H_
7 #define MSM_PROTO_H_
8 
9 /**
10  * General protocol notes:
11  * 1) Request (req) messages are generally sent over DRM_VIRTGPU_EXECBUFFER
12  *    but can also be sent via DRM_VIRTGPU_RESOURCE_CREATE_BLOB (in which
13  *    case they are processed by the host before ctx->get_blob())
14  * 2) Response (rsp) messages are returned via shmem->rsp_mem, at an offset
15  *    specified by the guest in the req message.  Not all req messages have
16  *    a rsp.
17  * 3) Host and guest could have different pointer sizes, ie. 32b guest and
18  *    64b host, or visa versa, so similar to kernel uabi, req and rsp msgs
19  *    should be explicitly padded to avoid 32b vs 64b struct padding issues
20  */
21 
22 /**
23  * Defines the layout of shmem buffer used for host->guest communication.
24  */
25 struct msm_shmem {
26    struct vdrm_shmem base;
27 
28    /**
29     * Counter that is incremented on asynchronous errors, like SUBMIT
30     * or GEM_NEW failures.  The guest should treat errors as context-
31     * lost.
32     */
33    uint32_t async_error;
34 
35    /**
36     * Counter that is incremented on global fault (see MSM_PARAM_FAULTS)
37     */
38    uint32_t global_faults;
39 };
40 DEFINE_CAST(vdrm_shmem, msm_shmem)
41 
42 /*
43  * Possible cmd types for "command stream", ie. payload of EXECBUF ioctl:
44  */
45 enum msm_ccmd {
46    MSM_CCMD_NOP = 1,         /* No payload, can be used to sync with host */
47    MSM_CCMD_IOCTL_SIMPLE,
48    MSM_CCMD_GEM_NEW,
49    MSM_CCMD_GEM_SET_IOVA,
50    MSM_CCMD_GEM_CPU_PREP,
51    MSM_CCMD_GEM_SET_NAME,
52    MSM_CCMD_GEM_SUBMIT,
53    MSM_CCMD_GEM_UPLOAD,
54    MSM_CCMD_SUBMITQUEUE_QUERY,
55    MSM_CCMD_WAIT_FENCE,
56    MSM_CCMD_SET_DEBUGINFO,
57    MSM_CCMD_LAST,
58 };
59 
60 #ifdef __cplusplus
61 #define MSM_CCMD(_cmd, _len) {                      \
62        .cmd = MSM_CCMD_##_cmd,                      \
63        .len = (_len),                               \
64    }
65 #else
66 #define MSM_CCMD(_cmd, _len) (struct vdrm_ccmd_req){ \
67        .cmd = MSM_CCMD_##_cmd,                      \
68        .len = (_len),                               \
69    }
70 #endif
71 
72 /*
73  * MSM_CCMD_NOP
74  */
75 struct msm_ccmd_nop_req {
76    struct vdrm_ccmd_req hdr;
77 };
78 
79 /*
80  * MSM_CCMD_IOCTL_SIMPLE
81  *
82  * Forward simple/flat IOC_RW or IOC_W ioctls.  Limited ioctls are supported.
83  */
84 struct msm_ccmd_ioctl_simple_req {
85    struct vdrm_ccmd_req hdr;
86 
87    uint32_t cmd;
88    uint8_t payload[];
89 };
90 DEFINE_CAST(vdrm_ccmd_req, msm_ccmd_ioctl_simple_req)
91 
92 struct msm_ccmd_ioctl_simple_rsp {
93    struct vdrm_ccmd_rsp hdr;
94 
95    /* ioctl return value, interrupted syscalls are handled on the host without
96     * returning to the guest.
97     */
98    int32_t ret;
99 
100    /* The output payload for IOC_RW ioctls, the payload is the same size as
101     * msm_context_cmd_ioctl_simple_req.
102     *
103     * For IOC_W ioctls (userspace writes, kernel reads) this is zero length.
104     */
105    uint8_t payload[];
106 };
107 
108 /*
109  * MSM_CCMD_GEM_NEW
110  *
111  * GEM buffer allocation, maps to DRM_MSM_GEM_NEW plus DRM_MSM_GEM_INFO to
112  * set the BO's iova (to avoid extra guest -> host trip)
113  *
114  * No response.
115  */
116 struct msm_ccmd_gem_new_req {
117    struct vdrm_ccmd_req hdr;
118 
119    uint64_t iova;
120    uint64_t size;
121    uint32_t flags;
122    uint32_t blob_id;
123 };
124 DEFINE_CAST(vdrm_ccmd_req, msm_ccmd_gem_new_req)
125 
126 /*
127  * MSM_CCMD_GEM_SET_IOVA
128  *
129  * Set the buffer iova (for imported BOs).  Also used to release the iova
130  * (by setting it to zero) when a BO is freed.
131  */
132 struct msm_ccmd_gem_set_iova_req {
133    struct vdrm_ccmd_req hdr;
134 
135    uint64_t iova;
136    uint32_t res_id;
137 };
138 DEFINE_CAST(vdrm_ccmd_req, msm_ccmd_gem_set_iova_req)
139 
140 /*
141  * MSM_CCMD_GEM_CPU_PREP
142  *
143  * Maps to DRM_MSM_GEM_CPU_PREP
144  *
145  * Note: Since we don't want to block the single threaded host, this returns
146  * immediately with -EBUSY if the fence is not yet signaled.  The guest
147  * should poll if needed.
148  */
149 struct msm_ccmd_gem_cpu_prep_req {
150    struct vdrm_ccmd_req hdr;
151 
152    uint32_t res_id;
153    uint32_t op;
154 };
155 DEFINE_CAST(vdrm_ccmd_req, msm_ccmd_gem_cpu_prep_req)
156 
157 struct msm_ccmd_gem_cpu_prep_rsp {
158    struct vdrm_ccmd_rsp hdr;
159 
160    int32_t ret;
161 };
162 
163 /*
164  * MSM_CCMD_GEM_SET_NAME
165  *
166  * Maps to DRM_MSM_GEM_INFO:MSM_INFO_SET_NAME
167  *
168  * No response.
169  */
170 struct msm_ccmd_gem_set_name_req {
171    struct vdrm_ccmd_req hdr;
172 
173    uint32_t res_id;
174    /* Note: packet size aligned to 4 bytes, so the string name may
175     * be shorter than the packet header indicates.
176     */
177    uint32_t len;
178    uint8_t  payload[];
179 };
180 DEFINE_CAST(vdrm_ccmd_req, msm_ccmd_gem_set_name_req)
181 
182 /*
183  * MSM_CCMD_GEM_SUBMIT
184  *
185  * Maps to DRM_MSM_GEM_SUBMIT
186  *
187  * The actual for-reals cmdstream submission.  Note this intentionally
188  * does not support relocs, since we already require a non-ancient
189  * kernel.
190  *
191  * Note, no in/out fence-fd, that synchronization is handled on guest
192  * kernel side (ugg).. need to come up with a better story for fencing.
193  * We probably need to sort something out for that to handle syncobjs.
194  *
195  * No response.
196  */
197 struct msm_ccmd_gem_submit_req {
198    struct vdrm_ccmd_req hdr;
199 
200    uint32_t flags;
201    uint32_t queue_id;
202    uint32_t nr_bos;
203    uint32_t nr_cmds;
204 
205    /**
206     * The fence "seqno" assigned by the guest userspace.  The host SUBMIT
207     * ioctl uses the MSM_SUBMIT_FENCE_SN_IN flag to let the guest assign
208     * the sequence #, to avoid the guest needing to wait for a response
209     * from the host.
210     */
211    uint32_t fence;
212 
213    /**
214     * Payload is first an array of 'struct drm_msm_gem_submit_bo' of
215     * length determined by nr_bos (note that handles are guest resource
216     * ids which are translated to host GEM handles by the host VMM),
217     * followed by an array of 'struct drm_msm_gem_submit_cmd' of length
218     * determined by nr_cmds
219     */
220    int8_t   payload[];
221 };
222 DEFINE_CAST(vdrm_ccmd_req, msm_ccmd_gem_submit_req)
223 
224 /*
225  * MSM_CCMD_GEM_UPLOAD
226  *
227  * Upload data to a GEM buffer
228  *
229  * No response.
230  */
231 struct msm_ccmd_gem_upload_req {
232    struct vdrm_ccmd_req hdr;
233 
234    uint32_t res_id;
235    uint32_t pad;
236    uint32_t off;
237 
238    /* Note: packet size aligned to 4 bytes, so the payload may
239     * be shorter than the packet header indicates.
240     */
241    uint32_t len;
242    uint8_t  payload[];
243 };
244 DEFINE_CAST(vdrm_ccmd_req, msm_ccmd_gem_upload_req)
245 
246 /*
247  * MSM_CCMD_SUBMITQUEUE_QUERY
248  *
249  * Maps to DRM_MSM_SUBMITQUEUE_QUERY
250  */
251 struct msm_ccmd_submitqueue_query_req {
252    struct vdrm_ccmd_req hdr;
253 
254    uint32_t queue_id;
255    uint32_t param;
256    uint32_t len;   /* size of payload in rsp */
257 };
258 DEFINE_CAST(vdrm_ccmd_req, msm_ccmd_submitqueue_query_req)
259 
260 struct msm_ccmd_submitqueue_query_rsp {
261    struct vdrm_ccmd_rsp hdr;
262 
263    int32_t  ret;
264    uint32_t out_len;
265    uint8_t  payload[];
266 };
267 
268 /*
269  * MSM_CCMD_WAIT_FENCE
270  *
271  * Maps to DRM_MSM_WAIT_FENCE
272  *
273  * Note: Since we don't want to block the single threaded host, this returns
274  * immediately with -ETIMEDOUT if the fence is not yet signaled.  The guest
275  * should poll if needed.
276  */
277 struct msm_ccmd_wait_fence_req {
278    struct vdrm_ccmd_req hdr;
279 
280    uint32_t queue_id;
281    uint32_t fence;
282 };
283 DEFINE_CAST(vdrm_ccmd_req, msm_ccmd_wait_fence_req)
284 
285 struct msm_ccmd_wait_fence_rsp {
286    struct vdrm_ccmd_rsp hdr;
287 
288    int32_t ret;
289 };
290 
291 /*
292  * MSM_CCMD_SET_DEBUGINFO
293  *
294  * Set per-guest-process debug info (comm and cmdline).  For GPU faults/
295  * crashes, it isn't too useful to see the crosvm (for ex.) comm/cmdline,
296  * since the host process is only a proxy.  This allows the guest to
297  * pass through the guest process comm and commandline for debugging
298  * purposes.
299  *
300  * No response.
301  */
302 struct msm_ccmd_set_debuginfo_req {
303    struct vdrm_ccmd_req hdr;
304 
305    uint32_t comm_len;
306    uint32_t cmdline_len;
307 
308    /**
309     * Payload is first the comm string followed by cmdline string, padded
310     * out to a multiple of 4.
311     */
312    int8_t   payload[];
313 };
314 DEFINE_CAST(vdrm_ccmd_req, msm_ccmd_set_debuginfo_req)
315 
316 #endif /* MSM_PROTO_H_ */
317