1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0+
2
3 /*
4 * Virtio-media driver.
5 *
6 * Copyright (c) 2023-2024 Google LLC.
7 */
8
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/mm.h>
12 #include <linux/mutex.h>
13 #include <linux/scatterlist.h>
14 #include <linux/types.h>
15 #include <linux/videodev2.h>
16 #include <linux/vmalloc.h>
17 #include <linux/wait.h>
18 #include <linux/workqueue.h>
19 #include <media/frame_vector.h>
20 #include <media/v4l2-dev.h>
21 #include <media/v4l2-event.h>
22 #include <media/videobuf2-memops.h>
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/version.h>
26 #include <linux/virtio.h>
27 #include <linux/virtio_config.h>
28 #include <linux/virtio_ids.h>
29
30 #include <media/v4l2-device.h>
31 #include <media/v4l2-ioctl.h>
32
33 #include "protocol.h"
34 #include "session.h"
35 #include "virtio_media.h"
36
37 #define VIRTIO_MEDIA_NUM_EVENT_BUFS 16
38
39 #ifndef VIRTIO_ID_MEDIA
40 #define VIRTIO_ID_MEDIA 49
41 #endif
42
43 /* ID of the SHM region into which MMAP buffer will be mapped. */
44 #define VIRTIO_MEDIA_SHM_MMAP 0
45
46 /*
47 * Name of the driver to expose to user-space.
48 *
49 * This is configurable because v4l2-compliance has workarounds specific to
50 * some drivers. When proxying these directly from the host, this allows it to
51 * apply them as needed.
52 */
53 char *driver_name = NULL;
54 module_param(driver_name, charp, 0660);
55
56 /**
57 * Allocate a new session. The id and list fields must still be set by the
58 * caller.
59 */
60 static struct virtio_media_session *
virtio_media_session_alloc(struct virtio_media * vv,u32 id,bool nonblocking_dequeue)61 virtio_media_session_alloc(struct virtio_media *vv, u32 id,
62 bool nonblocking_dequeue)
63 {
64 struct virtio_media_session *session;
65 int i;
66 int ret;
67
68 session = kzalloc(sizeof(*session), GFP_KERNEL);
69 if (!session)
70 goto err_session;
71
72 session->shadow_buf = kzalloc(VIRTIO_SHADOW_BUF_SIZE, GFP_KERNEL);
73 if (!session->shadow_buf)
74 goto err_shadow_buf;
75
76 ret = sg_alloc_table(&session->command_sgs, DESC_CHAIN_MAX_LEN,
77 GFP_KERNEL);
78 if (ret) {
79 goto err_payload_sgs;
80 }
81
82 session->id = id;
83 session->nonblocking_dequeue = nonblocking_dequeue;
84
85 INIT_LIST_HEAD(&session->list);
86 v4l2_fh_init(&session->fh, &vv->video_dev);
87 v4l2_fh_add(&session->fh);
88
89 for (i = 0; i <= VIRTIO_MEDIA_LAST_QUEUE; i++)
90 INIT_LIST_HEAD(&session->queues[i].pending_dqbufs);
91 mutex_init(&session->dqbufs_lock);
92
93 init_waitqueue_head(&session->dqbufs_wait);
94
95 mutex_lock(&vv->sessions_lock);
96 list_add_tail(&session->list, &vv->sessions);
97 mutex_unlock(&vv->sessions_lock);
98
99 return session;
100
101 err_payload_sgs:
102 kfree(session->shadow_buf);
103 err_shadow_buf:
104 kfree(session);
105 err_session:
106 return ERR_PTR(-ENOMEM);
107 }
108
109 /**
110 * Close and destroy `session`.
111 */
virtio_media_session_close(struct virtio_media * vv,struct virtio_media_session * session)112 static void virtio_media_session_close(struct virtio_media *vv,
113 struct virtio_media_session *session)
114 {
115 int i;
116
117 mutex_lock(&vv->sessions_lock);
118 list_del(&session->list);
119 mutex_unlock(&vv->sessions_lock);
120
121 v4l2_fh_del(&session->fh);
122 v4l2_fh_exit(&session->fh);
123
124 sg_free_table(&session->command_sgs);
125
126 for (i = 0; i <= VIRTIO_MEDIA_LAST_QUEUE; i++)
127 if (session->queues[i].buffers)
128 vfree(session->queues[i].buffers);
129
130 kfree(session->shadow_buf);
131 kfree(session);
132 }
133
134 /**
135 * Lookup the session with `id`.
136 */
137 static struct virtio_media_session *
virtio_media_find_session(struct virtio_media * vv,u32 id)138 virtio_media_find_session(struct virtio_media *vv, u32 id)
139 {
140 struct list_head *p;
141 struct virtio_media_session *session = NULL;
142
143 mutex_lock(&vv->sessions_lock);
144 list_for_each(p, &vv->sessions) {
145 struct virtio_media_session *s =
146 list_entry(p, struct virtio_media_session, list);
147 if (s->id == id) {
148 session = s;
149 break;
150 }
151 }
152 mutex_unlock(&vv->sessions_lock);
153
154 return session;
155 }
156
157 /**
158 * Callback parameters to the virtio command queue.
159 */
160 struct virtio_media_cmd_callback_param {
161 struct virtio_media *vv;
162 /* Flag to switch once the command is completed */
163 bool done_flag;
164 /* Size of the received response */
165 size_t resp_len;
166 };
167
168 /**
169 * Callback for the command queue. This just wakes up the thread that was
170 * waiting on the command to complete.
171 */
commandq_callback(struct virtqueue * queue)172 static void commandq_callback(struct virtqueue *queue)
173 {
174 unsigned int len;
175 struct virtio_media_cmd_callback_param *param;
176
177 while ((param = virtqueue_get_buf(queue, &len))) {
178 param->done_flag = true;
179 param->resp_len = len;
180 wake_up(¶m->vv->wq);
181 }
182
183 virtqueue_enable_cb(queue);
184 }
185
186 /**
187 * Returns 0 in case of success, or a negative error code.
188 */
virtio_media_kick_command(struct virtio_media * vv,struct scatterlist ** sgs,const size_t out_sgs,const size_t in_sgs,size_t * resp_len)189 static int virtio_media_kick_command(struct virtio_media *vv,
190 struct scatterlist **sgs,
191 const size_t out_sgs, const size_t in_sgs,
192 size_t *resp_len)
193 {
194 struct virtio_media_cmd_callback_param cb_param = {
195 .vv = vv,
196 .done_flag = false,
197 .resp_len = 0,
198 };
199 struct virtio_media_resp_header *resp_header;
200 int ret;
201
202 ret = virtqueue_add_sgs(vv->commandq, sgs, out_sgs, in_sgs, &cb_param,
203 GFP_ATOMIC);
204 if (ret) {
205 v4l2_err(&vv->v4l2_dev,
206 "failed to add sgs to command virtqueue\n");
207 return ret;
208 }
209
210 if (!virtqueue_kick(vv->commandq)) {
211 v4l2_err(&vv->v4l2_dev, "failed to kick command virtqueue\n");
212 return -EINVAL;
213 }
214
215 /* Wait for the response. */
216 ret = wait_event_timeout(vv->wq, cb_param.done_flag, 5 * HZ);
217 if (ret == 0) {
218 v4l2_err(&vv->v4l2_dev,
219 "timed out waiting for response to command\n");
220 return -ETIMEDOUT;
221 }
222
223 if (resp_len)
224 *resp_len = cb_param.resp_len;
225
226 if (in_sgs > 0) {
227 /*
228 * If we expect a response, make sure we have at least a response header - anything shorter is
229 * invalid.
230 */
231 if (cb_param.resp_len < sizeof(*resp_header)) {
232 v4l2_err(&vv->v4l2_dev,
233 "received response header is too short\n");
234 return -EINVAL;
235 }
236
237 resp_header = sg_virt(sgs[out_sgs]);
238 if (resp_header->status)
239 /* Host returns a positive error code. */
240 return -resp_header->status;
241 }
242
243 return 0;
244 }
245
246 /**
247 * Send a command to the host and wait for its response.
248 * @vv: the virtio_media device to communicate with.
249 * @minimum_resp_len: the minimum length of the response expected by the caller
250 * in case the command succeeded. Anything shorter than that will result in an
251 * error.
252 *
253 * Returns 0 in case of success or an error code. If an error is returned,
254 * resp_len might not have been updated.
255 */
virtio_media_send_command(struct virtio_media * vv,struct scatterlist ** sgs,const size_t out_sgs,const size_t in_sgs,size_t minimum_resp_len,size_t * resp_len)256 int virtio_media_send_command(struct virtio_media *vv, struct scatterlist **sgs,
257 const size_t out_sgs, const size_t in_sgs,
258 size_t minimum_resp_len, size_t *resp_len)
259 {
260 size_t local_resp_len = resp_len ? *resp_len : 0;
261 int ret = virtio_media_kick_command(vv, sgs, out_sgs, in_sgs,
262 &local_resp_len);
263 if (resp_len)
264 *resp_len = local_resp_len;
265
266 /* If the host could not process the command, there is no valid response */
267 if (ret < 0)
268 return ret;
269
270 /* Make sure the host wrote a complete reply. */
271 if (local_resp_len < minimum_resp_len) {
272 v4l2_err(
273 &vv->v4l2_dev,
274 "received response is too short: received %zu, expected at least %zu\n",
275 local_resp_len, minimum_resp_len);
276 return -EINVAL;
277 }
278
279 return 0;
280 }
281
282 /**
283 * Send the event buffer to the host so it can return it back to us filled with
284 * the next event that occurred.
285 */
virtio_media_send_event_buffer(struct virtio_media * vv,void * event_buffer)286 static int virtio_media_send_event_buffer(struct virtio_media *vv,
287 void *event_buffer)
288 {
289 struct scatterlist *sgs[1], vresp;
290 int ret;
291
292 sg_init_one(&vresp, event_buffer, VIRTIO_MEDIA_EVENT_MAX_SIZE);
293 sgs[0] = &vresp;
294
295 ret = virtqueue_add_sgs(vv->eventq, sgs, 0, 1, event_buffer,
296 GFP_ATOMIC);
297 if (ret) {
298 v4l2_err(&vv->v4l2_dev,
299 "failed to add sgs to event virtqueue\n");
300 return ret;
301 }
302
303 if (!virtqueue_kick(vv->eventq)) {
304 v4l2_err(&vv->v4l2_dev, "failed to kick event virtqueue\n");
305 return -EINVAL;
306 }
307
308 return 0;
309 }
310
eventq_callback(struct virtqueue * queue)311 static void eventq_callback(struct virtqueue *queue)
312 {
313 struct virtio_media *vv = queue->vdev->priv;
314
315 schedule_work(&vv->eventq_work);
316 }
317
318 static void
virtio_media_process_dqbuf_event(struct virtio_media * vv,struct virtio_media_session * session,struct virtio_media_event_dqbuf * dqbuf_evt)319 virtio_media_process_dqbuf_event(struct virtio_media *vv,
320 struct virtio_media_session *session,
321 struct virtio_media_event_dqbuf *dqbuf_evt)
322 {
323 struct virtio_media_buffer *dqbuf;
324 const enum v4l2_buf_type queue_type = dqbuf_evt->buffer.type;
325 struct virtio_media_queue_state *queue;
326 typeof(dqbuf->buffer.m) buffer_m;
327 typeof(dqbuf->buffer.m.planes[0].m) plane_m;
328 int i;
329
330 if (queue_type >= ARRAY_SIZE(session->queues)) {
331 v4l2_err(&vv->v4l2_dev,
332 "unmanaged queue %d passed to dqbuf event",
333 dqbuf_evt->buffer.type);
334 return;
335 }
336 queue = &session->queues[queue_type];
337
338 if (dqbuf_evt->buffer.index >= queue->allocated_bufs) {
339 v4l2_err(&vv->v4l2_dev,
340 "invalid buffer ID %d for queue %d in dqbuf event",
341 dqbuf_evt->buffer.index, dqbuf_evt->buffer.type);
342 return;
343 }
344
345 dqbuf = &queue->buffers[dqbuf_evt->buffer.index];
346
347 /*
348 * Preserve the 'm' union that was passed to us during QBUF so userspace
349 * gets back the information it submitted.
350 */
351 buffer_m = dqbuf->buffer.m;
352 memcpy(&dqbuf->buffer, &dqbuf_evt->buffer, sizeof(dqbuf->buffer));
353 dqbuf->buffer.m = buffer_m;
354 if (V4L2_TYPE_IS_MULTIPLANAR(dqbuf->buffer.type)) {
355 if (dqbuf->buffer.length > VIDEO_MAX_PLANES) {
356 v4l2_err(
357 &vv->v4l2_dev,
358 "invalid number of planes received from host for "
359 "a multiplanar buffer\n");
360 return;
361 }
362 for (i = 0; i < dqbuf->buffer.length; i++) {
363 plane_m = dqbuf->planes[i].m;
364 memcpy(&dqbuf->planes[i], &dqbuf_evt->planes[i],
365 sizeof(struct v4l2_plane));
366 dqbuf->planes[i].m = plane_m;
367 }
368 }
369
370 /* Set the DONE flag as the buffer is waiting for being dequeued. */
371 dqbuf->buffer.flags |= V4L2_BUF_FLAG_DONE;
372
373 mutex_lock(&session->dqbufs_lock);
374 list_add_tail(&dqbuf->list, &queue->pending_dqbufs);
375 mutex_unlock(&session->dqbufs_lock);
376 queue->queued_bufs -= 1;
377 wake_up(&session->dqbufs_wait);
378 }
379
virtio_media_process_events(struct virtio_media * vv)380 void virtio_media_process_events(struct virtio_media *vv)
381 {
382 struct virtio_media_event_error *error_evt;
383 struct virtio_media_event_dqbuf *dqbuf_evt;
384 struct virtio_media_event_event *event_evt;
385 struct virtio_media_session *session;
386 struct virtio_media_event_header *evt;
387 unsigned int len;
388
389 mutex_lock(&vv->events_process_lock);
390
391 while ((evt = virtqueue_get_buf(vv->eventq, &len))) {
392 /* Make sure we received enough data */
393 if (len < sizeof(*evt)) {
394 v4l2_err(
395 &vv->v4l2_dev,
396 "event is too short: got %u, expected at least %zu\n",
397 len, sizeof(*evt));
398 goto end_of_event;
399 }
400
401 session = virtio_media_find_session(vv, evt->session_id);
402 if (session == NULL) {
403 v4l2_err(&vv->v4l2_dev, "cannot find session %d\n",
404 evt->session_id);
405 goto end_of_event;
406 }
407
408 switch (evt->event) {
409 case VIRTIO_MEDIA_EVT_ERROR:
410 if (len < sizeof(*error_evt)) {
411 v4l2_err(
412 &vv->v4l2_dev,
413 "error event is too short: got %u, expected %zu\n",
414 len, sizeof(*error_evt));
415 break;
416 }
417 error_evt = (struct virtio_media_event_error *)evt;
418 v4l2_err(&vv->v4l2_dev,
419 "received error %d for session %d",
420 error_evt->errno, error_evt->hdr.session_id);
421 /* TODO close session! */
422 break;
423
424 /*
425 * Dequeued buffer: put it into the right queue so user-space can dequeue
426 * it.
427 */
428 case VIRTIO_MEDIA_EVT_DQBUF:
429 if (len < sizeof(*dqbuf_evt)) {
430 v4l2_err(
431 &vv->v4l2_dev,
432 "dqbuf event is too short: got %u, expected %zu\n",
433 len, sizeof(*dqbuf_evt));
434 break;
435 }
436 dqbuf_evt = (struct virtio_media_event_dqbuf *)evt;
437 virtio_media_process_dqbuf_event(vv, session,
438 dqbuf_evt);
439 break;
440
441 case VIRTIO_MEDIA_EVT_EVENT:
442 if (len < sizeof(*event_evt)) {
443 v4l2_err(
444 &vv->v4l2_dev,
445 "session event is too short: got %u expected %zu\n",
446 len, sizeof(*event_evt));
447 break;
448 }
449
450 event_evt = (struct virtio_media_event_event *)evt;
451 v4l2_event_queue_fh(&session->fh, &event_evt->event);
452 break;
453
454 default:
455 v4l2_err(&vv->v4l2_dev, "unknown event type %d\n",
456 evt->event);
457 break;
458 }
459
460 end_of_event:
461 virtio_media_send_event_buffer(vv, evt);
462 }
463
464 virtqueue_enable_cb(vv->eventq);
465
466 mutex_unlock(&vv->events_process_lock);
467 }
468
469 /**
470 * Event callback. This processes the returned event buffer and immediately
471 * sends it again to the host so it can send us the next event without ever
472 * starving.
473 */
virtio_media_event_work(struct work_struct * work)474 static void virtio_media_event_work(struct work_struct *work)
475 {
476 struct virtio_media *vv =
477 container_of(work, struct virtio_media, eventq_work);
478
479 virtio_media_process_events(vv);
480 }
481
482 /**
483 * Opens the device and create a new session.
484 */
virtio_media_device_open(struct file * file)485 static int virtio_media_device_open(struct file *file)
486 {
487 struct video_device *video_dev = video_devdata(file);
488 struct virtio_media *vv = to_virtio_media(video_dev);
489 struct virtio_media_cmd_open *cmd_open = &vv->cmd.open;
490 struct virtio_media_resp_open *resp_open = &vv->resp.open;
491 struct scatterlist cmd_sg = {}, resp_sg = {};
492 struct scatterlist *sgs[2] = { &cmd_sg, &resp_sg };
493 struct virtio_media_session *session;
494 u32 session_id;
495 int ret;
496
497 mutex_lock(&vv->vlock);
498
499 sg_set_buf(&cmd_sg, cmd_open, sizeof(*cmd_open));
500 sg_mark_end(&cmd_sg);
501
502 sg_set_buf(&resp_sg, resp_open, sizeof(*resp_open));
503 sg_mark_end(&resp_sg);
504
505 mutex_lock(&vv->bufs_lock);
506 cmd_open->hdr.cmd = VIRTIO_MEDIA_CMD_OPEN;
507 ret = virtio_media_send_command(vv, sgs, 1, 1, sizeof(*resp_open),
508 NULL);
509 session_id = resp_open->session_id;
510 mutex_unlock(&vv->bufs_lock);
511 mutex_unlock(&vv->vlock);
512 if (ret < 0)
513 return ret;
514
515 session = virtio_media_session_alloc(vv, session_id,
516 (file->f_flags & O_NONBLOCK));
517 if (IS_ERR(session))
518 return PTR_ERR(session);
519
520 file->private_data = &session->fh;
521
522 return 0;
523 }
524
525 /**
526 * Close a previously opened session.
527 */
virtio_media_device_close(struct file * file)528 static int virtio_media_device_close(struct file *file)
529 {
530 struct video_device *video_dev = video_devdata(file);
531 struct virtio_media *vv = to_virtio_media(video_dev);
532 struct virtio_media_session *session =
533 fh_to_session(file->private_data);
534 struct virtio_media_cmd_close *cmd_close = &session->cmd.close;
535 struct scatterlist cmd_sg = {};
536 struct scatterlist *sgs[1] = { &cmd_sg };
537 int ret;
538
539 mutex_lock(&vv->vlock);
540
541 cmd_close->hdr.cmd = VIRTIO_MEDIA_CMD_CLOSE;
542 cmd_close->session_id = session->id;
543
544 sg_set_buf(&cmd_sg, cmd_close, sizeof(*cmd_close));
545 sg_mark_end(&cmd_sg);
546
547 ret = virtio_media_send_command(vv, sgs, 1, 0, 0, NULL);
548 mutex_unlock(&vv->vlock);
549 if (ret < 0)
550 return ret;
551
552 virtio_media_session_close(vv, session);
553
554 return 0;
555 }
556
557 /**
558 * Implements poll logic for a virtio-media device.
559 */
virtio_media_device_poll(struct file * file,poll_table * wait)560 static __poll_t virtio_media_device_poll(struct file *file, poll_table *wait)
561 {
562 struct virtio_media_session *session =
563 fh_to_session(file->private_data);
564 enum v4l2_buf_type capture_type =
565 session->uses_mplane ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
566 V4L2_BUF_TYPE_VIDEO_CAPTURE;
567 enum v4l2_buf_type output_type =
568 session->uses_mplane ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
569 V4L2_BUF_TYPE_VIDEO_OUTPUT;
570 struct virtio_media_queue_state *capture_queue =
571 &session->queues[capture_type];
572 struct virtio_media_queue_state *output_queue =
573 &session->queues[output_type];
574 __poll_t req_events = poll_requested_events(wait);
575 __poll_t rc = 0;
576
577 poll_wait(file, &session->dqbufs_wait, wait);
578 poll_wait(file, &session->fh.wait, wait);
579
580 mutex_lock(&session->dqbufs_lock);
581 if (req_events & (EPOLLIN | EPOLLRDNORM)) {
582 if (!capture_queue->streaming ||
583 (capture_queue->queued_bufs == 0 &&
584 list_empty(&capture_queue->pending_dqbufs)))
585 rc |= EPOLLERR;
586 else if (!list_empty(&capture_queue->pending_dqbufs))
587 rc |= EPOLLIN | EPOLLRDNORM;
588 }
589 if (req_events & (EPOLLOUT | EPOLLWRNORM)) {
590 if (!output_queue->streaming)
591 rc |= EPOLLERR;
592 else if (output_queue->queued_bufs <
593 output_queue->allocated_bufs)
594 rc |= EPOLLOUT | EPOLLWRNORM;
595 }
596 mutex_unlock(&session->dqbufs_lock);
597
598 if (v4l2_event_pending(&session->fh))
599 rc |= EPOLLPRI;
600
601 return rc;
602 }
603
604 /**
605 * Inform the host that a previously created MMAP mapping is no longer needed
606 * and can be removed.
607 */
virtio_media_vma_close_locked(struct vm_area_struct * vma)608 static void virtio_media_vma_close_locked(struct vm_area_struct *vma)
609 {
610 struct virtio_media *vv = vma->vm_private_data;
611 struct virtio_media_cmd_munmap *cmd_munmap = &vv->cmd.munmap;
612 struct virtio_media_resp_munmap *resp_munmap = &vv->resp.munmap;
613 struct scatterlist cmd_sg = {}, resp_sg = {};
614 struct scatterlist *sgs[2] = { &cmd_sg, &resp_sg };
615 int ret;
616
617 sg_set_buf(&cmd_sg, cmd_munmap, sizeof(*cmd_munmap));
618 sg_mark_end(&cmd_sg);
619
620 sg_set_buf(&resp_sg, resp_munmap, sizeof(*resp_munmap));
621 sg_mark_end(&resp_sg);
622
623 mutex_lock(&vv->bufs_lock);
624 cmd_munmap->hdr.cmd = VIRTIO_MEDIA_CMD_MUNMAP;
625 cmd_munmap->guest_addr =
626 (vma->vm_pgoff << PAGE_SHIFT) - vv->mmap_region.addr;
627 ret = virtio_media_send_command(vv, sgs, 1, 1, sizeof(*resp_munmap),
628 NULL);
629 mutex_unlock(&vv->bufs_lock);
630 if (ret < 0) {
631 v4l2_err(&vv->v4l2_dev, "host failed to unmap buffer: %d\n",
632 ret);
633 }
634 }
635
virtio_media_vma_close(struct vm_area_struct * vma)636 static void virtio_media_vma_close(struct vm_area_struct *vma)
637 {
638 struct virtio_media *vv = vma->vm_private_data;
639
640 mutex_lock(&vv->vlock);
641 virtio_media_vma_close_locked(vma);
642 mutex_unlock(&vv->vlock);
643 }
644
645 static struct vm_operations_struct virtio_media_vm_ops = {
646 .close = virtio_media_vma_close,
647 };
648
649 /**
650 * Perform a mmap request from the guest.
651 *
652 * This requests the host to map a MMAP buffer for us, so we can make that
653 * mapping visible into the user-space address space.
654 */
virtio_media_device_mmap(struct file * file,struct vm_area_struct * vma)655 static int virtio_media_device_mmap(struct file *file,
656 struct vm_area_struct *vma)
657 {
658 struct video_device *video_dev = video_devdata(file);
659 struct virtio_media *vv = to_virtio_media(video_dev);
660 struct virtio_media_session *session =
661 fh_to_session(file->private_data);
662 struct virtio_media_cmd_mmap *cmd_mmap = &session->cmd.mmap;
663 struct virtio_media_resp_mmap *resp_mmap = &session->resp.mmap;
664 struct scatterlist cmd_sg = {}, resp_sg = {};
665 struct scatterlist *sgs[2] = { &cmd_sg, &resp_sg };
666 int ret;
667
668 if (!(vma->vm_flags & VM_SHARED))
669 return -EINVAL;
670 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
671 return -EINVAL;
672
673 mutex_lock(&vv->vlock);
674
675 cmd_mmap->hdr.cmd = VIRTIO_MEDIA_CMD_MMAP;
676 cmd_mmap->session_id = session->id;
677 cmd_mmap->flags =
678 (vma->vm_flags & VM_WRITE) ? VIRTIO_MEDIA_MMAP_FLAG_RW : 0;
679 cmd_mmap->offset = vma->vm_pgoff << PAGE_SHIFT;
680
681 sg_set_buf(&cmd_sg, cmd_mmap, sizeof(*cmd_mmap));
682 sg_mark_end(&cmd_sg);
683
684 sg_set_buf(&resp_sg, resp_mmap, sizeof(*resp_mmap));
685 sg_mark_end(&resp_sg);
686
687 /*
688 * The host performs reference counting and is smart enough to return the
689 * same guest physical address if this is called several times on the same
690 * buffer.
691 */
692 ret = virtio_media_send_command(vv, sgs, 1, 1, sizeof(*resp_mmap),
693 NULL);
694 if (ret < 0)
695 goto end;
696
697 vma->vm_private_data = vv;
698 /*
699 * Keep the guest address at which the buffer is mapped since we will
700 * use that to unmap.
701 */
702 vma->vm_pgoff = (resp_mmap->guest_addr + vv->mmap_region.addr) >>
703 PAGE_SHIFT;
704
705 if (vma->vm_end - vma->vm_start > PAGE_ALIGN(resp_mmap->len)) {
706 virtio_media_vma_close_locked(vma);
707 ret = -EINVAL;
708 goto end;
709 }
710
711 ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
712 vma->vm_end - vma->vm_start,
713 vma->vm_page_prot);
714 if (ret)
715 goto end;
716
717 vma->vm_ops = &virtio_media_vm_ops;
718
719 end:
720 mutex_unlock(&vv->vlock);
721 return ret;
722 }
723
724 static const struct v4l2_file_operations virtio_media_fops = {
725 .owner = THIS_MODULE,
726 .open = virtio_media_device_open,
727 .release = virtio_media_device_close,
728 .poll = virtio_media_device_poll,
729 .unlocked_ioctl = virtio_media_device_ioctl,
730 .mmap = virtio_media_device_mmap,
731 };
732
virtio_media_probe(struct virtio_device * virtio_dev)733 static int virtio_media_probe(struct virtio_device *virtio_dev)
734 {
735 struct device *dev = &virtio_dev->dev;
736 struct virtqueue *vqs[2];
737 #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 11, 0)
738 static struct virtqueue_info vq_info[2] = {
739 {
740 .name = "command",
741 .callback = commandq_callback,
742 },
743 {
744 .name = "event",
745 .callback = eventq_callback,
746 },
747 };
748 #else
749 static vq_callback_t *vq_callbacks[] = {
750 commandq_callback,
751 eventq_callback,
752 };
753 static const char *const vq_names[] = { "command", "event" };
754 #endif
755 struct virtio_media *vv;
756 struct video_device *vd;
757 int i;
758 int ret;
759
760 vv = devm_kzalloc(dev, sizeof(*vv), GFP_KERNEL);
761 if (!vv)
762 return -ENOMEM;
763
764 vv->event_buffer = devm_kzalloc(
765 dev, VIRTIO_MEDIA_EVENT_MAX_SIZE * VIRTIO_MEDIA_NUM_EVENT_BUFS,
766 GFP_KERNEL);
767 if (!vv->event_buffer) {
768 return -ENOMEM;
769 }
770
771 mutex_init(&vv->bufs_lock);
772
773 INIT_LIST_HEAD(&vv->sessions);
774 mutex_init(&vv->sessions_lock);
775 mutex_init(&vv->events_process_lock);
776 mutex_init(&vv->vlock);
777
778 vv->virtio_dev = virtio_dev;
779 virtio_dev->priv = vv;
780
781 init_waitqueue_head(&vv->wq);
782
783 ret = v4l2_device_register(dev, &vv->v4l2_dev);
784 if (ret)
785 return ret;
786
787 #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 11, 0)
788 ret = virtio_find_vqs(virtio_dev, 2, vqs, vq_info, NULL);
789 #else
790 ret = virtio_find_vqs(virtio_dev, 2, vqs, vq_callbacks, vq_names, NULL);
791 #endif
792 if (ret)
793 goto err_find_vqs;
794
795 vv->commandq = vqs[0];
796 vv->eventq = vqs[1];
797 INIT_WORK(&vv->eventq_work, virtio_media_event_work);
798
799 /* Get MMAP buffer mapping SHM region */
800 virtio_get_shm_region(virtio_dev, &vv->mmap_region,
801 VIRTIO_MEDIA_SHM_MMAP);
802
803 virtio_device_ready(virtio_dev);
804
805 vd = &vv->video_dev;
806
807 vd->v4l2_dev = &vv->v4l2_dev;
808 vd->vfl_type = VFL_TYPE_VIDEO;
809 vd->ioctl_ops = &virtio_media_ioctl_ops;
810 vd->fops = &virtio_media_fops;
811 vd->device_caps = virtio_cread32(virtio_dev, 0);
812 if (vd->device_caps & (V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE))
813 vd->vfl_dir = VFL_DIR_M2M;
814 else if (vd->device_caps &
815 (V4L2_CAP_VIDEO_OUTPUT | V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE))
816 vd->vfl_dir = VFL_DIR_TX;
817 else
818 vd->vfl_dir = VFL_DIR_RX;
819 vd->release = video_device_release_empty;
820 strscpy(vd->name, "virtio-media", sizeof(vd->name));
821
822 video_set_drvdata(vd, vv);
823
824 /* TODO find out when we should enable this ioctl? */
825 v4l2_disable_ioctl(vd, VIDIOC_S_HW_FREQ_SEEK);
826
827 ret = video_register_device(vd, virtio_cread32(virtio_dev, 4), 0);
828 if (ret)
829 return ret;
830
831 for (i = 0; i < VIRTIO_MEDIA_NUM_EVENT_BUFS; i++) {
832 ret = virtio_media_send_event_buffer(
833 vv, vv->event_buffer + VIRTIO_MEDIA_EVENT_MAX_SIZE * i);
834 if (ret) {
835 goto send_event_buffer;
836 }
837 }
838
839 return 0;
840
841 send_event_buffer:
842 video_unregister_device(&vv->video_dev);
843 virtio_dev->config->del_vqs(virtio_dev);
844 err_find_vqs:
845 v4l2_device_unregister(&vv->v4l2_dev);
846
847 return ret;
848 }
849
virtio_media_remove(struct virtio_device * virtio_dev)850 static void virtio_media_remove(struct virtio_device *virtio_dev)
851 {
852 struct virtio_media *vv = virtio_dev->priv;
853 struct list_head *p, *n;
854
855 virtio_reset_device(virtio_dev);
856
857 v4l2_device_unregister(&vv->v4l2_dev);
858 virtio_dev->config->del_vqs(virtio_dev);
859 video_unregister_device(&vv->video_dev);
860
861 list_for_each_safe(p, n, &vv->sessions) {
862 struct virtio_media_session *s =
863 list_entry(p, struct virtio_media_session, list);
864
865 virtio_media_session_close(vv, s);
866 }
867 }
868
869 static struct virtio_device_id id_table[] = {
870 { VIRTIO_ID_MEDIA, VIRTIO_DEV_ANY_ID },
871 { 0 },
872 };
873
874 static unsigned int features[] = {};
875
876 static struct virtio_driver virtio_media_driver = {
877 .feature_table = features,
878 .feature_table_size = ARRAY_SIZE(features),
879 .driver.name = VIRTIO_MEDIA_DEFAULT_DRIVER_NAME,
880 .driver.owner = THIS_MODULE,
881 .id_table = id_table,
882 .probe = virtio_media_probe,
883 .remove = virtio_media_remove,
884 };
885
886 module_virtio_driver(virtio_media_driver);
887
888 MODULE_DEVICE_TABLE(virtio, id_table);
889 MODULE_DESCRIPTION("virtio media driver");
890 MODULE_AUTHOR("Alexandre Courbot <[email protected]>");
891 MODULE_LICENSE("Dual BSD/GPL");
892