1 /*
2 * Copyright © 2022 Google, Inc.
3 * SPDX-License-Identifier: MIT
4 */
5
6 #include "util/libsync.h"
7 #include "util/slab.h"
8
9 #include "freedreno_ringbuffer_sp.h"
10 #include "virtio_priv.h"
11
12 static int
query_param(struct fd_pipe * pipe,uint32_t param,uint64_t * value)13 query_param(struct fd_pipe *pipe, uint32_t param, uint64_t *value)
14 {
15 struct virtio_pipe *virtio_pipe = to_virtio_pipe(pipe);
16 struct drm_msm_param req = {
17 .pipe = virtio_pipe->pipe,
18 .param = param,
19 };
20 int ret;
21
22 ret = virtio_simple_ioctl(pipe->dev, DRM_IOCTL_MSM_GET_PARAM, &req);
23 if (ret)
24 return ret;
25
26 *value = req.value;
27
28 return 0;
29 }
30
31 static int
query_faults(struct fd_pipe * pipe,uint64_t * value)32 query_faults(struct fd_pipe *pipe, uint64_t *value)
33 {
34 struct virtio_device *virtio_dev = to_virtio_device(pipe->dev);
35 uint32_t async_error = 0;
36 uint64_t global_faults;
37
38 if (vdrm_shmem_has_field(virtio_dev->shmem, async_error))
39 async_error = virtio_dev->shmem->async_error;
40
41 if (vdrm_shmem_has_field(virtio_dev->shmem, global_faults)) {
42 global_faults = virtio_dev->shmem->global_faults;
43 } else {
44 int ret = query_param(pipe, MSM_PARAM_FAULTS, &global_faults);
45 if (ret)
46 return ret;
47 }
48
49 *value = global_faults + async_error;
50
51 return 0;
52 }
53
54 static int
virtio_pipe_get_param(struct fd_pipe * pipe,enum fd_param_id param,uint64_t * value)55 virtio_pipe_get_param(struct fd_pipe *pipe, enum fd_param_id param,
56 uint64_t *value)
57 {
58 struct virtio_pipe *virtio_pipe = to_virtio_pipe(pipe);
59 struct virtio_device *virtio_dev = to_virtio_device(pipe->dev);
60
61 switch (param) {
62 case FD_DEVICE_ID: // XXX probably get rid of this..
63 case FD_GPU_ID:
64 *value = virtio_pipe->gpu_id;
65 return 0;
66 case FD_GMEM_SIZE:
67 *value = virtio_pipe->gmem;
68 return 0;
69 case FD_GMEM_BASE:
70 *value = virtio_pipe->gmem_base;
71 return 0;
72 case FD_CHIP_ID:
73 *value = virtio_pipe->chip_id;
74 return 0;
75 case FD_MAX_FREQ:
76 *value = virtio_dev->vdrm->caps.u.msm.max_freq;
77 return 0;
78 case FD_TIMESTAMP:
79 return query_param(pipe, MSM_PARAM_TIMESTAMP, value);
80 case FD_NR_PRIORITIES:
81 *value = virtio_dev->vdrm->caps.u.msm.priorities;
82 return 0;
83 case FD_CTX_FAULTS:
84 case FD_GLOBAL_FAULTS:
85 return query_faults(pipe, value);
86 case FD_SUSPEND_COUNT:
87 return query_param(pipe, MSM_PARAM_SUSPENDS, value);
88 case FD_VA_SIZE:
89 *value = virtio_dev->vdrm->caps.u.msm.va_size;
90 return 0;
91 default:
92 ERROR_MSG("invalid param id: %d", param);
93 return -1;
94 }
95 }
96
97 static void
virtio_pipe_finish(struct fd_pipe * pipe)98 virtio_pipe_finish(struct fd_pipe *pipe)
99 {
100 struct virtio_pipe *virtio_pipe = to_virtio_pipe(pipe);
101 if (util_queue_is_initialized(&virtio_pipe->retire_queue))
102 util_queue_finish(&virtio_pipe->retire_queue);
103 }
104
105 static int
virtio_pipe_wait(struct fd_pipe * pipe,const struct fd_fence * fence,uint64_t timeout)106 virtio_pipe_wait(struct fd_pipe *pipe, const struct fd_fence *fence, uint64_t timeout)
107 {
108 MESA_TRACE_FUNC();
109 struct vdrm_device *vdrm = to_virtio_device(pipe->dev)->vdrm;
110 struct msm_ccmd_wait_fence_req req = {
111 .hdr = MSM_CCMD(WAIT_FENCE, sizeof(req)),
112 .queue_id = to_virtio_pipe(pipe)->queue_id,
113 .fence = fence->kfence,
114 };
115 struct msm_ccmd_submitqueue_query_rsp *rsp;
116 int64_t end_time = os_time_get_nano() + timeout;
117 int ret;
118
119 /* Do a non-blocking wait to trigger host-side wait-boost,
120 * if the host kernel is new enough
121 */
122 rsp = vdrm_alloc_rsp(vdrm, &req.hdr, sizeof(*rsp));
123 ret = vdrm_send_req(vdrm, &req.hdr, false);
124 if (ret)
125 goto out;
126
127 vdrm_flush(vdrm);
128
129 if (fence->use_fence_fd)
130 return sync_wait(fence->fence_fd, timeout / 1000000);
131
132 do {
133 rsp = vdrm_alloc_rsp(vdrm, &req.hdr, sizeof(*rsp));
134
135 ret = vdrm_send_req(vdrm, &req.hdr, true);
136 if (ret)
137 goto out;
138
139 if ((timeout != OS_TIMEOUT_INFINITE) &&
140 (os_time_get_nano() >= end_time))
141 break;
142
143 ret = rsp->ret;
144 } while (ret == -ETIMEDOUT);
145
146 out:
147 return ret;
148 }
149
150 static int
open_submitqueue(struct fd_pipe * pipe,uint32_t prio)151 open_submitqueue(struct fd_pipe *pipe, uint32_t prio)
152 {
153 struct virtio_pipe *virtio_pipe = to_virtio_pipe(pipe);
154
155 struct drm_msm_submitqueue req = {
156 .flags = 0,
157 .prio = prio,
158 };
159 uint64_t nr_prio = 1;
160 int ret;
161
162 virtio_pipe_get_param(pipe, FD_NR_PRIORITIES, &nr_prio);
163
164 req.prio = MIN2(req.prio, MAX2(nr_prio, 1) - 1);
165
166 ret = virtio_simple_ioctl(pipe->dev, DRM_IOCTL_MSM_SUBMITQUEUE_NEW, &req);
167 if (ret) {
168 ERROR_MSG("could not create submitqueue! %d (%s)", ret, strerror(errno));
169 return ret;
170 }
171
172 virtio_pipe->queue_id = req.id;
173 virtio_pipe->ring_idx = req.prio + 1;
174
175 return 0;
176 }
177
178 static void
close_submitqueue(struct fd_pipe * pipe,uint32_t queue_id)179 close_submitqueue(struct fd_pipe *pipe, uint32_t queue_id)
180 {
181 virtio_simple_ioctl(pipe->dev, DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE, &queue_id);
182 }
183
184 static void
virtio_pipe_destroy(struct fd_pipe * pipe)185 virtio_pipe_destroy(struct fd_pipe *pipe)
186 {
187 struct virtio_pipe *virtio_pipe = to_virtio_pipe(pipe);
188
189 if (util_queue_is_initialized(&virtio_pipe->retire_queue))
190 util_queue_destroy(&virtio_pipe->retire_queue);
191
192 close_submitqueue(pipe, virtio_pipe->queue_id);
193 fd_pipe_sp_ringpool_fini(pipe);
194 free(virtio_pipe);
195 }
196
197 static const struct fd_pipe_funcs funcs = {
198 .ringbuffer_new_object = fd_ringbuffer_sp_new_object,
199 .submit_new = virtio_submit_new,
200 .flush = fd_pipe_sp_flush,
201 .finish = virtio_pipe_finish,
202 .get_param = virtio_pipe_get_param,
203 .wait = virtio_pipe_wait,
204 .destroy = virtio_pipe_destroy,
205 };
206
207 struct fd_pipe *
virtio_pipe_new(struct fd_device * dev,enum fd_pipe_id id,uint32_t prio)208 virtio_pipe_new(struct fd_device *dev, enum fd_pipe_id id, uint32_t prio)
209 {
210 static const uint32_t pipe_id[] = {
211 [FD_PIPE_3D] = MSM_PIPE_3D0,
212 [FD_PIPE_2D] = MSM_PIPE_2D0,
213 };
214 struct virtio_device *virtio_dev = to_virtio_device(dev);
215 struct vdrm_device *vdrm = virtio_dev->vdrm;
216 struct virtio_pipe *virtio_pipe = NULL;
217 struct fd_pipe *pipe = NULL;
218
219 virtio_pipe = calloc(1, sizeof(*virtio_pipe));
220 if (!virtio_pipe) {
221 ERROR_MSG("allocation failed");
222 goto fail;
223 }
224
225 pipe = &virtio_pipe->base;
226
227 pipe->funcs = &funcs;
228
229 /* initialize before get_param(): */
230 pipe->dev = dev;
231 virtio_pipe->pipe = pipe_id[id];
232
233 virtio_pipe->gpu_id = vdrm->caps.u.msm.gpu_id;
234 virtio_pipe->gmem = vdrm->caps.u.msm.gmem_size;
235 virtio_pipe->gmem_base = vdrm->caps.u.msm.gmem_base;
236 virtio_pipe->chip_id = vdrm->caps.u.msm.chip_id;
237
238
239 if (!(virtio_pipe->gpu_id || virtio_pipe->chip_id))
240 goto fail;
241
242 util_queue_init(&virtio_pipe->retire_queue, "rq", 8, 1,
243 UTIL_QUEUE_INIT_RESIZE_IF_FULL, NULL);
244
245 INFO_MSG("Pipe Info:");
246 INFO_MSG(" GPU-id: %d", virtio_pipe->gpu_id);
247 INFO_MSG(" Chip-id: 0x%016"PRIx64, virtio_pipe->chip_id);
248 INFO_MSG(" GMEM size: 0x%08x", virtio_pipe->gmem);
249
250 if (open_submitqueue(pipe, prio))
251 goto fail;
252
253 fd_pipe_sp_ringpool_init(pipe);
254
255 return pipe;
256 fail:
257 if (pipe)
258 fd_pipe_del(pipe);
259 return NULL;
260 }
261