1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * inode.c -- user mode filesystem api for usb gadget controllers
4 *
5 * Copyright (C) 2003-2004 David Brownell
6 * Copyright (C) 2003 Agilent Technologies
7 */
8
9
10 /* #define VERBOSE_DEBUG */
11
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/fs_context.h>
16 #include <linux/pagemap.h>
17 #include <linux/uts.h>
18 #include <linux/wait.h>
19 #include <linux/compiler.h>
20 #include <linux/uaccess.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/string_choices.h>
24 #include <linux/poll.h>
25 #include <linux/kthread.h>
26 #include <linux/aio.h>
27 #include <linux/uio.h>
28 #include <linux/refcount.h>
29 #include <linux/delay.h>
30 #include <linux/device.h>
31 #include <linux/moduleparam.h>
32
33 #include <linux/usb/gadgetfs.h>
34 #include <linux/usb/gadget.h>
35 #include <linux/usb/composite.h> /* for USB_GADGET_DELAYED_STATUS */
36
37 /* Undef helpers from linux/usb/composite.h as gadgetfs redefines them */
38 #undef DBG
39 #undef ERROR
40 #undef INFO
41
42
43 /*
44 * The gadgetfs API maps each endpoint to a file descriptor so that you
45 * can use standard synchronous read/write calls for I/O. There's some
46 * O_NONBLOCK and O_ASYNC/FASYNC style i/o support. Example usermode
47 * drivers show how this works in practice. You can also use AIO to
48 * eliminate I/O gaps between requests, to help when streaming data.
49 *
50 * Key parts that must be USB-specific are protocols defining how the
51 * read/write operations relate to the hardware state machines. There
52 * are two types of files. One type is for the device, implementing ep0.
53 * The other type is for each IN or OUT endpoint. In both cases, the
54 * user mode driver must configure the hardware before using it.
55 *
56 * - First, dev_config() is called when /dev/gadget/$CHIP is configured
57 * (by writing configuration and device descriptors). Afterwards it
58 * may serve as a source of device events, used to handle all control
59 * requests other than basic enumeration.
60 *
61 * - Then, after a SET_CONFIGURATION control request, ep_config() is
62 * called when each /dev/gadget/ep* file is configured (by writing
63 * endpoint descriptors). Afterwards these files are used to write()
64 * IN data or to read() OUT data. To halt the endpoint, a "wrong
65 * direction" request is issued (like reading an IN endpoint).
66 *
67 * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
68 * not possible on all hardware. For example, precise fault handling with
69 * respect to data left in endpoint fifos after aborted operations; or
70 * selective clearing of endpoint halts, to implement SET_INTERFACE.
71 */
72
73 #define DRIVER_DESC "USB Gadget filesystem"
74 #define DRIVER_VERSION "24 Aug 2004"
75
76 static const char driver_desc [] = DRIVER_DESC;
77 static const char shortname [] = "gadgetfs";
78
79 MODULE_DESCRIPTION (DRIVER_DESC);
80 MODULE_AUTHOR ("David Brownell");
81 MODULE_LICENSE ("GPL");
82
83 static int ep_open(struct inode *, struct file *);
84
85
86 /*----------------------------------------------------------------------*/
87
88 #define GADGETFS_MAGIC 0xaee71ee7
89
90 /* /dev/gadget/$CHIP represents ep0 and the whole device */
91 enum ep0_state {
92 /* DISABLED is the initial state. */
93 STATE_DEV_DISABLED = 0,
94
95 /* Only one open() of /dev/gadget/$CHIP; only one file tracks
96 * ep0/device i/o modes and binding to the controller. Driver
97 * must always write descriptors to initialize the device, then
98 * the device becomes UNCONNECTED until enumeration.
99 */
100 STATE_DEV_OPENED,
101
102 /* From then on, ep0 fd is in either of two basic modes:
103 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
104 * - SETUP: read/write will transfer control data and succeed;
105 * or if "wrong direction", performs protocol stall
106 */
107 STATE_DEV_UNCONNECTED,
108 STATE_DEV_CONNECTED,
109 STATE_DEV_SETUP,
110
111 /* UNBOUND means the driver closed ep0, so the device won't be
112 * accessible again (DEV_DISABLED) until all fds are closed.
113 */
114 STATE_DEV_UNBOUND,
115 };
116
117 /* enough for the whole queue: most events invalidate others */
118 #define N_EVENT 5
119
120 #define RBUF_SIZE 256
121
122 struct dev_data {
123 spinlock_t lock;
124 refcount_t count;
125 int udc_usage;
126 enum ep0_state state; /* P: lock */
127 struct usb_gadgetfs_event event [N_EVENT];
128 unsigned ev_next;
129 struct fasync_struct *fasync;
130 u8 current_config;
131
132 /* drivers reading ep0 MUST handle control requests (SETUP)
133 * reported that way; else the host will time out.
134 */
135 unsigned usermode_setup : 1,
136 setup_in : 1,
137 setup_can_stall : 1,
138 setup_out_ready : 1,
139 setup_out_error : 1,
140 setup_abort : 1,
141 gadget_registered : 1;
142 unsigned setup_wLength;
143
144 /* the rest is basically write-once */
145 struct usb_config_descriptor *config, *hs_config;
146 struct usb_device_descriptor *dev;
147 struct usb_request *req;
148 struct usb_gadget *gadget;
149 struct list_head epfiles;
150 void *buf;
151 wait_queue_head_t wait;
152 struct super_block *sb;
153 struct dentry *dentry;
154
155 /* except this scratch i/o buffer for ep0 */
156 u8 rbuf[RBUF_SIZE];
157 };
158
get_dev(struct dev_data * data)159 static inline void get_dev (struct dev_data *data)
160 {
161 refcount_inc (&data->count);
162 }
163
put_dev(struct dev_data * data)164 static void put_dev (struct dev_data *data)
165 {
166 if (likely (!refcount_dec_and_test (&data->count)))
167 return;
168 /* needs no more cleanup */
169 BUG_ON (waitqueue_active (&data->wait));
170 kfree (data);
171 }
172
dev_new(void)173 static struct dev_data *dev_new (void)
174 {
175 struct dev_data *dev;
176
177 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
178 if (!dev)
179 return NULL;
180 dev->state = STATE_DEV_DISABLED;
181 refcount_set (&dev->count, 1);
182 spin_lock_init (&dev->lock);
183 INIT_LIST_HEAD (&dev->epfiles);
184 init_waitqueue_head (&dev->wait);
185 return dev;
186 }
187
188 /*----------------------------------------------------------------------*/
189
190 /* other /dev/gadget/$ENDPOINT files represent endpoints */
191 enum ep_state {
192 STATE_EP_DISABLED = 0,
193 STATE_EP_READY,
194 STATE_EP_ENABLED,
195 STATE_EP_UNBOUND,
196 };
197
198 struct ep_data {
199 struct mutex lock;
200 enum ep_state state;
201 refcount_t count;
202 struct dev_data *dev;
203 /* must hold dev->lock before accessing ep or req */
204 struct usb_ep *ep;
205 struct usb_request *req;
206 ssize_t status;
207 char name [16];
208 struct usb_endpoint_descriptor desc, hs_desc;
209 struct list_head epfiles;
210 wait_queue_head_t wait;
211 struct dentry *dentry;
212 };
213
get_ep(struct ep_data * data)214 static inline void get_ep (struct ep_data *data)
215 {
216 refcount_inc (&data->count);
217 }
218
put_ep(struct ep_data * data)219 static void put_ep (struct ep_data *data)
220 {
221 if (likely (!refcount_dec_and_test (&data->count)))
222 return;
223 put_dev (data->dev);
224 /* needs no more cleanup */
225 BUG_ON (!list_empty (&data->epfiles));
226 BUG_ON (waitqueue_active (&data->wait));
227 kfree (data);
228 }
229
230 /*----------------------------------------------------------------------*/
231
232 /* most "how to use the hardware" policy choices are in userspace:
233 * mapping endpoint roles (which the driver needs) to the capabilities
234 * which the usb controller has. most of those capabilities are exposed
235 * implicitly, starting with the driver name and then endpoint names.
236 */
237
238 static const char *CHIP;
239 static DEFINE_MUTEX(sb_mutex); /* Serialize superblock operations */
240
241 /*----------------------------------------------------------------------*/
242
243 /* NOTE: don't use dev_printk calls before binding to the gadget
244 * at the end of ep0 configuration, or after unbind.
245 */
246
247 /* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */
248 #define xprintk(d,level,fmt,args...) \
249 printk(level "%s: " fmt , shortname , ## args)
250
251 #ifdef DEBUG
252 #define DBG(dev,fmt,args...) \
253 xprintk(dev , KERN_DEBUG , fmt , ## args)
254 #else
255 #define DBG(dev,fmt,args...) \
256 do { } while (0)
257 #endif /* DEBUG */
258
259 #ifdef VERBOSE_DEBUG
260 #define VDEBUG DBG
261 #else
262 #define VDEBUG(dev,fmt,args...) \
263 do { } while (0)
264 #endif /* DEBUG */
265
266 #define ERROR(dev,fmt,args...) \
267 xprintk(dev , KERN_ERR , fmt , ## args)
268 #define INFO(dev,fmt,args...) \
269 xprintk(dev , KERN_INFO , fmt , ## args)
270
271
272 /*----------------------------------------------------------------------*/
273
274 /* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso)
275 *
276 * After opening, configure non-control endpoints. Then use normal
277 * stream read() and write() requests; and maybe ioctl() to get more
278 * precise FIFO status when recovering from cancellation.
279 */
280
epio_complete(struct usb_ep * ep,struct usb_request * req)281 static void epio_complete (struct usb_ep *ep, struct usb_request *req)
282 {
283 struct ep_data *epdata = ep->driver_data;
284
285 if (!req->context)
286 return;
287 if (req->status)
288 epdata->status = req->status;
289 else
290 epdata->status = req->actual;
291 complete ((struct completion *)req->context);
292 }
293
294 /* tasklock endpoint, returning when it's connected.
295 * still need dev->lock to use epdata->ep.
296 */
297 static int
get_ready_ep(unsigned f_flags,struct ep_data * epdata,bool is_write)298 get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write)
299 {
300 int val;
301
302 if (f_flags & O_NONBLOCK) {
303 if (!mutex_trylock(&epdata->lock))
304 goto nonblock;
305 if (epdata->state != STATE_EP_ENABLED &&
306 (!is_write || epdata->state != STATE_EP_READY)) {
307 mutex_unlock(&epdata->lock);
308 nonblock:
309 val = -EAGAIN;
310 } else
311 val = 0;
312 return val;
313 }
314
315 val = mutex_lock_interruptible(&epdata->lock);
316 if (val < 0)
317 return val;
318
319 switch (epdata->state) {
320 case STATE_EP_ENABLED:
321 return 0;
322 case STATE_EP_READY: /* not configured yet */
323 if (is_write)
324 return 0;
325 fallthrough;
326 case STATE_EP_UNBOUND: /* clean disconnect */
327 break;
328 // case STATE_EP_DISABLED: /* "can't happen" */
329 default: /* error! */
330 pr_debug ("%s: ep %p not available, state %d\n",
331 shortname, epdata, epdata->state);
332 }
333 mutex_unlock(&epdata->lock);
334 return -ENODEV;
335 }
336
337 static ssize_t
ep_io(struct ep_data * epdata,void * buf,unsigned len)338 ep_io (struct ep_data *epdata, void *buf, unsigned len)
339 {
340 DECLARE_COMPLETION_ONSTACK (done);
341 int value;
342
343 spin_lock_irq (&epdata->dev->lock);
344 if (likely (epdata->ep != NULL)) {
345 struct usb_request *req = epdata->req;
346
347 req->context = &done;
348 req->complete = epio_complete;
349 req->buf = buf;
350 req->length = len;
351 value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC);
352 } else
353 value = -ENODEV;
354 spin_unlock_irq (&epdata->dev->lock);
355
356 if (likely (value == 0)) {
357 value = wait_for_completion_interruptible(&done);
358 if (value != 0) {
359 spin_lock_irq (&epdata->dev->lock);
360 if (likely (epdata->ep != NULL)) {
361 DBG (epdata->dev, "%s i/o interrupted\n",
362 epdata->name);
363 usb_ep_dequeue (epdata->ep, epdata->req);
364 spin_unlock_irq (&epdata->dev->lock);
365
366 wait_for_completion(&done);
367 if (epdata->status == -ECONNRESET)
368 epdata->status = -EINTR;
369 } else {
370 spin_unlock_irq (&epdata->dev->lock);
371
372 DBG (epdata->dev, "endpoint gone\n");
373 wait_for_completion(&done);
374 epdata->status = -ENODEV;
375 }
376 }
377 return epdata->status;
378 }
379 return value;
380 }
381
382 static int
ep_release(struct inode * inode,struct file * fd)383 ep_release (struct inode *inode, struct file *fd)
384 {
385 struct ep_data *data = fd->private_data;
386 int value;
387
388 value = mutex_lock_interruptible(&data->lock);
389 if (value < 0)
390 return value;
391
392 /* clean up if this can be reopened */
393 if (data->state != STATE_EP_UNBOUND) {
394 data->state = STATE_EP_DISABLED;
395 data->desc.bDescriptorType = 0;
396 data->hs_desc.bDescriptorType = 0;
397 usb_ep_disable(data->ep);
398 }
399 mutex_unlock(&data->lock);
400 put_ep (data);
401 return 0;
402 }
403
ep_ioctl(struct file * fd,unsigned code,unsigned long value)404 static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
405 {
406 struct ep_data *data = fd->private_data;
407 int status;
408
409 if ((status = get_ready_ep (fd->f_flags, data, false)) < 0)
410 return status;
411
412 spin_lock_irq (&data->dev->lock);
413 if (likely (data->ep != NULL)) {
414 switch (code) {
415 case GADGETFS_FIFO_STATUS:
416 status = usb_ep_fifo_status (data->ep);
417 break;
418 case GADGETFS_FIFO_FLUSH:
419 usb_ep_fifo_flush (data->ep);
420 break;
421 case GADGETFS_CLEAR_HALT:
422 status = usb_ep_clear_halt (data->ep);
423 break;
424 default:
425 status = -ENOTTY;
426 }
427 } else
428 status = -ENODEV;
429 spin_unlock_irq (&data->dev->lock);
430 mutex_unlock(&data->lock);
431 return status;
432 }
433
434 /*----------------------------------------------------------------------*/
435
436 /* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */
437
438 struct kiocb_priv {
439 struct usb_request *req;
440 struct ep_data *epdata;
441 struct kiocb *iocb;
442 struct mm_struct *mm;
443 struct work_struct work;
444 void *buf;
445 struct iov_iter to;
446 const void *to_free;
447 unsigned actual;
448 };
449
ep_aio_cancel(struct kiocb * iocb)450 static int ep_aio_cancel(struct kiocb *iocb)
451 {
452 struct kiocb_priv *priv = iocb->private;
453 struct ep_data *epdata;
454 int value;
455
456 local_irq_disable();
457 epdata = priv->epdata;
458 // spin_lock(&epdata->dev->lock);
459 if (likely(epdata && epdata->ep && priv->req))
460 value = usb_ep_dequeue (epdata->ep, priv->req);
461 else
462 value = -EINVAL;
463 // spin_unlock(&epdata->dev->lock);
464 local_irq_enable();
465
466 return value;
467 }
468
ep_user_copy_worker(struct work_struct * work)469 static void ep_user_copy_worker(struct work_struct *work)
470 {
471 struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
472 struct mm_struct *mm = priv->mm;
473 struct kiocb *iocb = priv->iocb;
474 size_t ret;
475
476 kthread_use_mm(mm);
477 ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
478 kthread_unuse_mm(mm);
479 if (!ret)
480 ret = -EFAULT;
481
482 /* completing the iocb can drop the ctx and mm, don't touch mm after */
483 iocb->ki_complete(iocb, ret);
484
485 kfree(priv->buf);
486 kfree(priv->to_free);
487 kfree(priv);
488 }
489
ep_aio_complete(struct usb_ep * ep,struct usb_request * req)490 static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
491 {
492 struct kiocb *iocb = req->context;
493 struct kiocb_priv *priv = iocb->private;
494 struct ep_data *epdata = priv->epdata;
495
496 /* lock against disconnect (and ideally, cancel) */
497 spin_lock(&epdata->dev->lock);
498 priv->req = NULL;
499 priv->epdata = NULL;
500
501 /* if this was a write or a read returning no data then we
502 * don't need to copy anything to userspace, so we can
503 * complete the aio request immediately.
504 */
505 if (priv->to_free == NULL || unlikely(req->actual == 0)) {
506 kfree(req->buf);
507 kfree(priv->to_free);
508 kfree(priv);
509 iocb->private = NULL;
510 iocb->ki_complete(iocb,
511 req->actual ? req->actual : (long)req->status);
512 } else {
513 /* ep_copy_to_user() won't report both; we hide some faults */
514 if (unlikely(0 != req->status))
515 DBG(epdata->dev, "%s fault %d len %d\n",
516 ep->name, req->status, req->actual);
517
518 priv->buf = req->buf;
519 priv->actual = req->actual;
520 INIT_WORK(&priv->work, ep_user_copy_worker);
521 schedule_work(&priv->work);
522 }
523
524 usb_ep_free_request(ep, req);
525 spin_unlock(&epdata->dev->lock);
526 put_ep(epdata);
527 }
528
ep_aio(struct kiocb * iocb,struct kiocb_priv * priv,struct ep_data * epdata,char * buf,size_t len)529 static ssize_t ep_aio(struct kiocb *iocb,
530 struct kiocb_priv *priv,
531 struct ep_data *epdata,
532 char *buf,
533 size_t len)
534 {
535 struct usb_request *req;
536 ssize_t value;
537
538 iocb->private = priv;
539 priv->iocb = iocb;
540
541 kiocb_set_cancel_fn(iocb, ep_aio_cancel);
542 get_ep(epdata);
543 priv->epdata = epdata;
544 priv->actual = 0;
545 priv->mm = current->mm; /* mm teardown waits for iocbs in exit_aio() */
546
547 /* each kiocb is coupled to one usb_request, but we can't
548 * allocate or submit those if the host disconnected.
549 */
550 spin_lock_irq(&epdata->dev->lock);
551 value = -ENODEV;
552 if (unlikely(epdata->ep == NULL))
553 goto fail;
554
555 req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
556 value = -ENOMEM;
557 if (unlikely(!req))
558 goto fail;
559
560 priv->req = req;
561 req->buf = buf;
562 req->length = len;
563 req->complete = ep_aio_complete;
564 req->context = iocb;
565 value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
566 if (unlikely(0 != value)) {
567 usb_ep_free_request(epdata->ep, req);
568 goto fail;
569 }
570 spin_unlock_irq(&epdata->dev->lock);
571 return -EIOCBQUEUED;
572
573 fail:
574 spin_unlock_irq(&epdata->dev->lock);
575 kfree(priv->to_free);
576 kfree(priv);
577 put_ep(epdata);
578 return value;
579 }
580
581 static ssize_t
ep_read_iter(struct kiocb * iocb,struct iov_iter * to)582 ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
583 {
584 struct file *file = iocb->ki_filp;
585 struct ep_data *epdata = file->private_data;
586 size_t len = iov_iter_count(to);
587 ssize_t value;
588 char *buf;
589
590 if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0)
591 return value;
592
593 /* halt any endpoint by doing a "wrong direction" i/o call */
594 if (usb_endpoint_dir_in(&epdata->desc)) {
595 if (usb_endpoint_xfer_isoc(&epdata->desc) ||
596 !is_sync_kiocb(iocb)) {
597 mutex_unlock(&epdata->lock);
598 return -EINVAL;
599 }
600 DBG (epdata->dev, "%s halt\n", epdata->name);
601 spin_lock_irq(&epdata->dev->lock);
602 if (likely(epdata->ep != NULL))
603 usb_ep_set_halt(epdata->ep);
604 spin_unlock_irq(&epdata->dev->lock);
605 mutex_unlock(&epdata->lock);
606 return -EBADMSG;
607 }
608
609 buf = kmalloc(len, GFP_KERNEL);
610 if (unlikely(!buf)) {
611 mutex_unlock(&epdata->lock);
612 return -ENOMEM;
613 }
614 if (is_sync_kiocb(iocb)) {
615 value = ep_io(epdata, buf, len);
616 if (value >= 0 && (copy_to_iter(buf, value, to) != value))
617 value = -EFAULT;
618 } else {
619 struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
620 value = -ENOMEM;
621 if (!priv)
622 goto fail;
623 priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
624 if (!iter_is_ubuf(&priv->to) && !priv->to_free) {
625 kfree(priv);
626 goto fail;
627 }
628 value = ep_aio(iocb, priv, epdata, buf, len);
629 if (value == -EIOCBQUEUED)
630 buf = NULL;
631 }
632 fail:
633 kfree(buf);
634 mutex_unlock(&epdata->lock);
635 return value;
636 }
637
638 static ssize_t ep_config(struct ep_data *, const char *, size_t);
639
640 static ssize_t
ep_write_iter(struct kiocb * iocb,struct iov_iter * from)641 ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
642 {
643 struct file *file = iocb->ki_filp;
644 struct ep_data *epdata = file->private_data;
645 size_t len = iov_iter_count(from);
646 bool configured;
647 ssize_t value;
648 char *buf;
649
650 if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0)
651 return value;
652
653 configured = epdata->state == STATE_EP_ENABLED;
654
655 /* halt any endpoint by doing a "wrong direction" i/o call */
656 if (configured && !usb_endpoint_dir_in(&epdata->desc)) {
657 if (usb_endpoint_xfer_isoc(&epdata->desc) ||
658 !is_sync_kiocb(iocb)) {
659 mutex_unlock(&epdata->lock);
660 return -EINVAL;
661 }
662 DBG (epdata->dev, "%s halt\n", epdata->name);
663 spin_lock_irq(&epdata->dev->lock);
664 if (likely(epdata->ep != NULL))
665 usb_ep_set_halt(epdata->ep);
666 spin_unlock_irq(&epdata->dev->lock);
667 mutex_unlock(&epdata->lock);
668 return -EBADMSG;
669 }
670
671 buf = kmalloc(len, GFP_KERNEL);
672 if (unlikely(!buf)) {
673 mutex_unlock(&epdata->lock);
674 return -ENOMEM;
675 }
676
677 if (unlikely(!copy_from_iter_full(buf, len, from))) {
678 value = -EFAULT;
679 goto out;
680 }
681
682 if (unlikely(!configured)) {
683 value = ep_config(epdata, buf, len);
684 } else if (is_sync_kiocb(iocb)) {
685 value = ep_io(epdata, buf, len);
686 } else {
687 struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
688 value = -ENOMEM;
689 if (priv) {
690 value = ep_aio(iocb, priv, epdata, buf, len);
691 if (value == -EIOCBQUEUED)
692 buf = NULL;
693 }
694 }
695 out:
696 kfree(buf);
697 mutex_unlock(&epdata->lock);
698 return value;
699 }
700
701 /*----------------------------------------------------------------------*/
702
703 /* used after endpoint configuration */
704 static const struct file_operations ep_io_operations = {
705 .owner = THIS_MODULE,
706
707 .open = ep_open,
708 .release = ep_release,
709 .unlocked_ioctl = ep_ioctl,
710 .read_iter = ep_read_iter,
711 .write_iter = ep_write_iter,
712 };
713
714 /* ENDPOINT INITIALIZATION
715 *
716 * fd = open ("/dev/gadget/$ENDPOINT", O_RDWR)
717 * status = write (fd, descriptors, sizeof descriptors)
718 *
719 * That write establishes the endpoint configuration, configuring
720 * the controller to process bulk, interrupt, or isochronous transfers
721 * at the right maxpacket size, and so on.
722 *
723 * The descriptors are message type 1, identified by a host order u32
724 * at the beginning of what's written. Descriptor order is: full/low
725 * speed descriptor, then optional high speed descriptor.
726 */
727 static ssize_t
ep_config(struct ep_data * data,const char * buf,size_t len)728 ep_config (struct ep_data *data, const char *buf, size_t len)
729 {
730 struct usb_ep *ep;
731 u32 tag;
732 int value, length = len;
733
734 if (data->state != STATE_EP_READY) {
735 value = -EL2HLT;
736 goto fail;
737 }
738
739 value = len;
740 if (len < USB_DT_ENDPOINT_SIZE + 4)
741 goto fail0;
742
743 /* we might need to change message format someday */
744 memcpy(&tag, buf, 4);
745 if (tag != 1) {
746 DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
747 goto fail0;
748 }
749 buf += 4;
750 len -= 4;
751
752 /* NOTE: audio endpoint extensions not accepted here;
753 * just don't include the extra bytes.
754 */
755
756 /* full/low speed descriptor, then high speed */
757 memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE);
758 if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
759 || data->desc.bDescriptorType != USB_DT_ENDPOINT)
760 goto fail0;
761 if (len != USB_DT_ENDPOINT_SIZE) {
762 if (len != 2 * USB_DT_ENDPOINT_SIZE)
763 goto fail0;
764 memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
765 USB_DT_ENDPOINT_SIZE);
766 if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
767 || data->hs_desc.bDescriptorType
768 != USB_DT_ENDPOINT) {
769 DBG(data->dev, "config %s, bad hs length or type\n",
770 data->name);
771 goto fail0;
772 }
773 }
774
775 spin_lock_irq (&data->dev->lock);
776 if (data->dev->state == STATE_DEV_UNBOUND) {
777 value = -ENOENT;
778 goto gone;
779 } else {
780 ep = data->ep;
781 if (ep == NULL) {
782 value = -ENODEV;
783 goto gone;
784 }
785 }
786 switch (data->dev->gadget->speed) {
787 case USB_SPEED_LOW:
788 case USB_SPEED_FULL:
789 ep->desc = &data->desc;
790 break;
791 case USB_SPEED_HIGH:
792 /* fails if caller didn't provide that descriptor... */
793 ep->desc = &data->hs_desc;
794 break;
795 default:
796 DBG(data->dev, "unconnected, %s init abandoned\n",
797 data->name);
798 value = -EINVAL;
799 goto gone;
800 }
801 value = usb_ep_enable(ep);
802 if (value == 0) {
803 data->state = STATE_EP_ENABLED;
804 value = length;
805 }
806 gone:
807 spin_unlock_irq (&data->dev->lock);
808 if (value < 0) {
809 fail:
810 data->desc.bDescriptorType = 0;
811 data->hs_desc.bDescriptorType = 0;
812 }
813 return value;
814 fail0:
815 value = -EINVAL;
816 goto fail;
817 }
818
819 static int
ep_open(struct inode * inode,struct file * fd)820 ep_open (struct inode *inode, struct file *fd)
821 {
822 struct ep_data *data = inode->i_private;
823 int value = -EBUSY;
824
825 if (mutex_lock_interruptible(&data->lock) != 0)
826 return -EINTR;
827 spin_lock_irq (&data->dev->lock);
828 if (data->dev->state == STATE_DEV_UNBOUND)
829 value = -ENOENT;
830 else if (data->state == STATE_EP_DISABLED) {
831 value = 0;
832 data->state = STATE_EP_READY;
833 get_ep (data);
834 fd->private_data = data;
835 VDEBUG (data->dev, "%s ready\n", data->name);
836 } else
837 DBG (data->dev, "%s state %d\n",
838 data->name, data->state);
839 spin_unlock_irq (&data->dev->lock);
840 mutex_unlock(&data->lock);
841 return value;
842 }
843
844 /*----------------------------------------------------------------------*/
845
846 /* EP0 IMPLEMENTATION can be partly in userspace.
847 *
848 * Drivers that use this facility receive various events, including
849 * control requests the kernel doesn't handle. Drivers that don't
850 * use this facility may be too simple-minded for real applications.
851 */
852
ep0_readable(struct dev_data * dev)853 static inline void ep0_readable (struct dev_data *dev)
854 {
855 wake_up (&dev->wait);
856 kill_fasync (&dev->fasync, SIGIO, POLL_IN);
857 }
858
clean_req(struct usb_ep * ep,struct usb_request * req)859 static void clean_req (struct usb_ep *ep, struct usb_request *req)
860 {
861 struct dev_data *dev = ep->driver_data;
862
863 if (req->buf != dev->rbuf) {
864 kfree(req->buf);
865 req->buf = dev->rbuf;
866 }
867 req->complete = epio_complete;
868 dev->setup_out_ready = 0;
869 }
870
ep0_complete(struct usb_ep * ep,struct usb_request * req)871 static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
872 {
873 struct dev_data *dev = ep->driver_data;
874 unsigned long flags;
875 int free = 1;
876
877 /* for control OUT, data must still get to userspace */
878 spin_lock_irqsave(&dev->lock, flags);
879 if (!dev->setup_in) {
880 dev->setup_out_error = (req->status != 0);
881 if (!dev->setup_out_error)
882 free = 0;
883 dev->setup_out_ready = 1;
884 ep0_readable (dev);
885 }
886
887 /* clean up as appropriate */
888 if (free && req->buf != &dev->rbuf)
889 clean_req (ep, req);
890 req->complete = epio_complete;
891 spin_unlock_irqrestore(&dev->lock, flags);
892 }
893
setup_req(struct usb_ep * ep,struct usb_request * req,u16 len)894 static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
895 {
896 struct dev_data *dev = ep->driver_data;
897
898 if (dev->setup_out_ready) {
899 DBG (dev, "ep0 request busy!\n");
900 return -EBUSY;
901 }
902 if (len > sizeof (dev->rbuf))
903 req->buf = kmalloc(len, GFP_ATOMIC);
904 if (req->buf == NULL) {
905 req->buf = dev->rbuf;
906 return -ENOMEM;
907 }
908 req->complete = ep0_complete;
909 req->length = len;
910 req->zero = 0;
911 return 0;
912 }
913
914 static ssize_t
ep0_read(struct file * fd,char __user * buf,size_t len,loff_t * ptr)915 ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
916 {
917 struct dev_data *dev = fd->private_data;
918 ssize_t retval;
919 enum ep0_state state;
920
921 spin_lock_irq (&dev->lock);
922 if (dev->state <= STATE_DEV_OPENED) {
923 retval = -EINVAL;
924 goto done;
925 }
926
927 /* report fd mode change before acting on it */
928 if (dev->setup_abort) {
929 dev->setup_abort = 0;
930 retval = -EIDRM;
931 goto done;
932 }
933
934 /* control DATA stage */
935 if ((state = dev->state) == STATE_DEV_SETUP) {
936
937 if (dev->setup_in) { /* stall IN */
938 VDEBUG(dev, "ep0in stall\n");
939 (void) usb_ep_set_halt (dev->gadget->ep0);
940 retval = -EL2HLT;
941 dev->state = STATE_DEV_CONNECTED;
942
943 } else if (len == 0) { /* ack SET_CONFIGURATION etc */
944 struct usb_ep *ep = dev->gadget->ep0;
945 struct usb_request *req = dev->req;
946
947 if ((retval = setup_req (ep, req, 0)) == 0) {
948 ++dev->udc_usage;
949 spin_unlock_irq (&dev->lock);
950 retval = usb_ep_queue (ep, req, GFP_KERNEL);
951 spin_lock_irq (&dev->lock);
952 --dev->udc_usage;
953 }
954 dev->state = STATE_DEV_CONNECTED;
955
956 /* assume that was SET_CONFIGURATION */
957 if (dev->current_config) {
958 unsigned power;
959
960 if (gadget_is_dualspeed(dev->gadget)
961 && (dev->gadget->speed
962 == USB_SPEED_HIGH))
963 power = dev->hs_config->bMaxPower;
964 else
965 power = dev->config->bMaxPower;
966 usb_gadget_vbus_draw(dev->gadget, 2 * power);
967 }
968
969 } else { /* collect OUT data */
970 if ((fd->f_flags & O_NONBLOCK) != 0
971 && !dev->setup_out_ready) {
972 retval = -EAGAIN;
973 goto done;
974 }
975 spin_unlock_irq (&dev->lock);
976 retval = wait_event_interruptible (dev->wait,
977 dev->setup_out_ready != 0);
978
979 /* FIXME state could change from under us */
980 spin_lock_irq (&dev->lock);
981 if (retval)
982 goto done;
983
984 if (dev->state != STATE_DEV_SETUP) {
985 retval = -ECANCELED;
986 goto done;
987 }
988 dev->state = STATE_DEV_CONNECTED;
989
990 if (dev->setup_out_error)
991 retval = -EIO;
992 else {
993 len = min (len, (size_t)dev->req->actual);
994 ++dev->udc_usage;
995 spin_unlock_irq(&dev->lock);
996 if (copy_to_user (buf, dev->req->buf, len))
997 retval = -EFAULT;
998 else
999 retval = len;
1000 spin_lock_irq(&dev->lock);
1001 --dev->udc_usage;
1002 clean_req (dev->gadget->ep0, dev->req);
1003 /* NOTE userspace can't yet choose to stall */
1004 }
1005 }
1006 goto done;
1007 }
1008
1009 /* else normal: return event data */
1010 if (len < sizeof dev->event [0]) {
1011 retval = -EINVAL;
1012 goto done;
1013 }
1014 len -= len % sizeof (struct usb_gadgetfs_event);
1015 dev->usermode_setup = 1;
1016
1017 scan:
1018 /* return queued events right away */
1019 if (dev->ev_next != 0) {
1020 unsigned i, n;
1021
1022 n = len / sizeof (struct usb_gadgetfs_event);
1023 if (dev->ev_next < n)
1024 n = dev->ev_next;
1025
1026 /* ep0 i/o has special semantics during STATE_DEV_SETUP */
1027 for (i = 0; i < n; i++) {
1028 if (dev->event [i].type == GADGETFS_SETUP) {
1029 dev->state = STATE_DEV_SETUP;
1030 n = i + 1;
1031 break;
1032 }
1033 }
1034 spin_unlock_irq (&dev->lock);
1035 len = n * sizeof (struct usb_gadgetfs_event);
1036 if (copy_to_user (buf, &dev->event, len))
1037 retval = -EFAULT;
1038 else
1039 retval = len;
1040 if (len > 0) {
1041 /* NOTE this doesn't guard against broken drivers;
1042 * concurrent ep0 readers may lose events.
1043 */
1044 spin_lock_irq (&dev->lock);
1045 if (dev->ev_next > n) {
1046 memmove(&dev->event[0], &dev->event[n],
1047 sizeof (struct usb_gadgetfs_event)
1048 * (dev->ev_next - n));
1049 }
1050 dev->ev_next -= n;
1051 spin_unlock_irq (&dev->lock);
1052 }
1053 return retval;
1054 }
1055 if (fd->f_flags & O_NONBLOCK) {
1056 retval = -EAGAIN;
1057 goto done;
1058 }
1059
1060 switch (state) {
1061 default:
1062 DBG (dev, "fail %s, state %d\n", __func__, state);
1063 retval = -ESRCH;
1064 break;
1065 case STATE_DEV_UNCONNECTED:
1066 case STATE_DEV_CONNECTED:
1067 spin_unlock_irq (&dev->lock);
1068 DBG (dev, "%s wait\n", __func__);
1069
1070 /* wait for events */
1071 retval = wait_event_interruptible (dev->wait,
1072 dev->ev_next != 0);
1073 if (retval < 0)
1074 return retval;
1075 spin_lock_irq (&dev->lock);
1076 goto scan;
1077 }
1078
1079 done:
1080 spin_unlock_irq (&dev->lock);
1081 return retval;
1082 }
1083
1084 static struct usb_gadgetfs_event *
next_event(struct dev_data * dev,enum usb_gadgetfs_event_type type)1085 next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
1086 {
1087 struct usb_gadgetfs_event *event;
1088 unsigned i;
1089
1090 switch (type) {
1091 /* these events purge the queue */
1092 case GADGETFS_DISCONNECT:
1093 if (dev->state == STATE_DEV_SETUP)
1094 dev->setup_abort = 1;
1095 fallthrough;
1096 case GADGETFS_CONNECT:
1097 dev->ev_next = 0;
1098 break;
1099 case GADGETFS_SETUP: /* previous request timed out */
1100 case GADGETFS_SUSPEND: /* same effect */
1101 /* these events can't be repeated */
1102 for (i = 0; i != dev->ev_next; i++) {
1103 if (dev->event [i].type != type)
1104 continue;
1105 DBG(dev, "discard old event[%d] %d\n", i, type);
1106 dev->ev_next--;
1107 if (i == dev->ev_next)
1108 break;
1109 /* indices start at zero, for simplicity */
1110 memmove (&dev->event [i], &dev->event [i + 1],
1111 sizeof (struct usb_gadgetfs_event)
1112 * (dev->ev_next - i));
1113 }
1114 break;
1115 default:
1116 BUG ();
1117 }
1118 VDEBUG(dev, "event[%d] = %d\n", dev->ev_next, type);
1119 event = &dev->event [dev->ev_next++];
1120 BUG_ON (dev->ev_next > N_EVENT);
1121 memset (event, 0, sizeof *event);
1122 event->type = type;
1123 return event;
1124 }
1125
1126 static ssize_t
ep0_write(struct file * fd,const char __user * buf,size_t len,loff_t * ptr)1127 ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1128 {
1129 struct dev_data *dev = fd->private_data;
1130 ssize_t retval = -ESRCH;
1131
1132 /* report fd mode change before acting on it */
1133 if (dev->setup_abort) {
1134 dev->setup_abort = 0;
1135 retval = -EIDRM;
1136
1137 /* data and/or status stage for control request */
1138 } else if (dev->state == STATE_DEV_SETUP) {
1139
1140 len = min_t(size_t, len, dev->setup_wLength);
1141 if (dev->setup_in) {
1142 retval = setup_req (dev->gadget->ep0, dev->req, len);
1143 if (retval == 0) {
1144 dev->state = STATE_DEV_CONNECTED;
1145 ++dev->udc_usage;
1146 spin_unlock_irq (&dev->lock);
1147 if (copy_from_user (dev->req->buf, buf, len))
1148 retval = -EFAULT;
1149 else {
1150 if (len < dev->setup_wLength)
1151 dev->req->zero = 1;
1152 retval = usb_ep_queue (
1153 dev->gadget->ep0, dev->req,
1154 GFP_KERNEL);
1155 }
1156 spin_lock_irq(&dev->lock);
1157 --dev->udc_usage;
1158 if (retval < 0) {
1159 clean_req (dev->gadget->ep0, dev->req);
1160 } else
1161 retval = len;
1162
1163 return retval;
1164 }
1165
1166 /* can stall some OUT transfers */
1167 } else if (dev->setup_can_stall) {
1168 VDEBUG(dev, "ep0out stall\n");
1169 (void) usb_ep_set_halt (dev->gadget->ep0);
1170 retval = -EL2HLT;
1171 dev->state = STATE_DEV_CONNECTED;
1172 } else {
1173 DBG(dev, "bogus ep0out stall!\n");
1174 }
1175 } else
1176 DBG (dev, "fail %s, state %d\n", __func__, dev->state);
1177
1178 return retval;
1179 }
1180
1181 static int
ep0_fasync(int f,struct file * fd,int on)1182 ep0_fasync (int f, struct file *fd, int on)
1183 {
1184 struct dev_data *dev = fd->private_data;
1185 // caller must F_SETOWN before signal delivery happens
1186 VDEBUG(dev, "%s %s\n", __func__, str_on_off(on));
1187 return fasync_helper (f, fd, on, &dev->fasync);
1188 }
1189
1190 static struct usb_gadget_driver gadgetfs_driver;
1191
1192 static int
dev_release(struct inode * inode,struct file * fd)1193 dev_release (struct inode *inode, struct file *fd)
1194 {
1195 struct dev_data *dev = fd->private_data;
1196
1197 /* closing ep0 === shutdown all */
1198
1199 if (dev->gadget_registered) {
1200 usb_gadget_unregister_driver (&gadgetfs_driver);
1201 dev->gadget_registered = false;
1202 }
1203
1204 /* at this point "good" hardware has disconnected the
1205 * device from USB; the host won't see it any more.
1206 * alternatively, all host requests will time out.
1207 */
1208
1209 kfree (dev->buf);
1210 dev->buf = NULL;
1211
1212 /* other endpoints were all decoupled from this device */
1213 spin_lock_irq(&dev->lock);
1214 dev->state = STATE_DEV_DISABLED;
1215 spin_unlock_irq(&dev->lock);
1216
1217 put_dev (dev);
1218 return 0;
1219 }
1220
1221 static __poll_t
ep0_poll(struct file * fd,poll_table * wait)1222 ep0_poll (struct file *fd, poll_table *wait)
1223 {
1224 struct dev_data *dev = fd->private_data;
1225 __poll_t mask = 0;
1226
1227 if (dev->state <= STATE_DEV_OPENED)
1228 return DEFAULT_POLLMASK;
1229
1230 poll_wait(fd, &dev->wait, wait);
1231
1232 spin_lock_irq(&dev->lock);
1233
1234 /* report fd mode change before acting on it */
1235 if (dev->setup_abort) {
1236 dev->setup_abort = 0;
1237 mask = EPOLLHUP;
1238 goto out;
1239 }
1240
1241 if (dev->state == STATE_DEV_SETUP) {
1242 if (dev->setup_in || dev->setup_can_stall)
1243 mask = EPOLLOUT;
1244 } else {
1245 if (dev->ev_next != 0)
1246 mask = EPOLLIN;
1247 }
1248 out:
1249 spin_unlock_irq(&dev->lock);
1250 return mask;
1251 }
1252
gadget_dev_ioctl(struct file * fd,unsigned code,unsigned long value)1253 static long gadget_dev_ioctl (struct file *fd, unsigned code, unsigned long value)
1254 {
1255 struct dev_data *dev = fd->private_data;
1256 struct usb_gadget *gadget = dev->gadget;
1257 long ret = -ENOTTY;
1258
1259 spin_lock_irq(&dev->lock);
1260 if (dev->state == STATE_DEV_OPENED ||
1261 dev->state == STATE_DEV_UNBOUND) {
1262 /* Not bound to a UDC */
1263 } else if (gadget->ops->ioctl) {
1264 ++dev->udc_usage;
1265 spin_unlock_irq(&dev->lock);
1266
1267 ret = gadget->ops->ioctl (gadget, code, value);
1268
1269 spin_lock_irq(&dev->lock);
1270 --dev->udc_usage;
1271 }
1272 spin_unlock_irq(&dev->lock);
1273
1274 return ret;
1275 }
1276
1277 /*----------------------------------------------------------------------*/
1278
1279 /* The in-kernel gadget driver handles most ep0 issues, in particular
1280 * enumerating the single configuration (as provided from user space).
1281 *
1282 * Unrecognized ep0 requests may be handled in user space.
1283 */
1284
make_qualifier(struct dev_data * dev)1285 static void make_qualifier (struct dev_data *dev)
1286 {
1287 struct usb_qualifier_descriptor qual;
1288 struct usb_device_descriptor *desc;
1289
1290 qual.bLength = sizeof qual;
1291 qual.bDescriptorType = USB_DT_DEVICE_QUALIFIER;
1292 qual.bcdUSB = cpu_to_le16 (0x0200);
1293
1294 desc = dev->dev;
1295 qual.bDeviceClass = desc->bDeviceClass;
1296 qual.bDeviceSubClass = desc->bDeviceSubClass;
1297 qual.bDeviceProtocol = desc->bDeviceProtocol;
1298
1299 /* assumes ep0 uses the same value for both speeds ... */
1300 qual.bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1301
1302 qual.bNumConfigurations = 1;
1303 qual.bRESERVED = 0;
1304
1305 memcpy (dev->rbuf, &qual, sizeof qual);
1306 }
1307
1308 static int
config_buf(struct dev_data * dev,u8 type,unsigned index)1309 config_buf (struct dev_data *dev, u8 type, unsigned index)
1310 {
1311 int len;
1312 int hs = 0;
1313
1314 /* only one configuration */
1315 if (index > 0)
1316 return -EINVAL;
1317
1318 if (gadget_is_dualspeed(dev->gadget)) {
1319 hs = (dev->gadget->speed == USB_SPEED_HIGH);
1320 if (type == USB_DT_OTHER_SPEED_CONFIG)
1321 hs = !hs;
1322 }
1323 if (hs) {
1324 dev->req->buf = dev->hs_config;
1325 len = le16_to_cpu(dev->hs_config->wTotalLength);
1326 } else {
1327 dev->req->buf = dev->config;
1328 len = le16_to_cpu(dev->config->wTotalLength);
1329 }
1330 ((u8 *)dev->req->buf) [1] = type;
1331 return len;
1332 }
1333
1334 static int
gadgetfs_setup(struct usb_gadget * gadget,const struct usb_ctrlrequest * ctrl)1335 gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1336 {
1337 struct dev_data *dev = get_gadget_data (gadget);
1338 struct usb_request *req = dev->req;
1339 int value = -EOPNOTSUPP;
1340 struct usb_gadgetfs_event *event;
1341 u16 w_value = le16_to_cpu(ctrl->wValue);
1342 u16 w_length = le16_to_cpu(ctrl->wLength);
1343
1344 if (w_length > RBUF_SIZE) {
1345 if (ctrl->bRequestType & USB_DIR_IN) {
1346 /* Cast away the const, we are going to overwrite on purpose. */
1347 __le16 *temp = (__le16 *)&ctrl->wLength;
1348
1349 *temp = cpu_to_le16(RBUF_SIZE);
1350 w_length = RBUF_SIZE;
1351 } else {
1352 return value;
1353 }
1354 }
1355
1356 spin_lock (&dev->lock);
1357 dev->setup_abort = 0;
1358 if (dev->state == STATE_DEV_UNCONNECTED) {
1359 if (gadget_is_dualspeed(gadget)
1360 && gadget->speed == USB_SPEED_HIGH
1361 && dev->hs_config == NULL) {
1362 spin_unlock(&dev->lock);
1363 ERROR (dev, "no high speed config??\n");
1364 return -EINVAL;
1365 }
1366
1367 dev->state = STATE_DEV_CONNECTED;
1368
1369 INFO (dev, "connected\n");
1370 event = next_event (dev, GADGETFS_CONNECT);
1371 event->u.speed = gadget->speed;
1372 ep0_readable (dev);
1373
1374 /* host may have given up waiting for response. we can miss control
1375 * requests handled lower down (device/endpoint status and features);
1376 * then ep0_{read,write} will report the wrong status. controller
1377 * driver will have aborted pending i/o.
1378 */
1379 } else if (dev->state == STATE_DEV_SETUP)
1380 dev->setup_abort = 1;
1381
1382 req->buf = dev->rbuf;
1383 req->context = NULL;
1384 switch (ctrl->bRequest) {
1385
1386 case USB_REQ_GET_DESCRIPTOR:
1387 if (ctrl->bRequestType != USB_DIR_IN)
1388 goto unrecognized;
1389 switch (w_value >> 8) {
1390
1391 case USB_DT_DEVICE:
1392 value = min (w_length, (u16) sizeof *dev->dev);
1393 dev->dev->bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1394 req->buf = dev->dev;
1395 break;
1396 case USB_DT_DEVICE_QUALIFIER:
1397 if (!dev->hs_config)
1398 break;
1399 value = min (w_length, (u16)
1400 sizeof (struct usb_qualifier_descriptor));
1401 make_qualifier (dev);
1402 break;
1403 case USB_DT_OTHER_SPEED_CONFIG:
1404 case USB_DT_CONFIG:
1405 value = config_buf (dev,
1406 w_value >> 8,
1407 w_value & 0xff);
1408 if (value >= 0)
1409 value = min (w_length, (u16) value);
1410 break;
1411 case USB_DT_STRING:
1412 goto unrecognized;
1413
1414 default: // all others are errors
1415 break;
1416 }
1417 break;
1418
1419 /* currently one config, two speeds */
1420 case USB_REQ_SET_CONFIGURATION:
1421 if (ctrl->bRequestType != 0)
1422 goto unrecognized;
1423 if (0 == (u8) w_value) {
1424 value = 0;
1425 dev->current_config = 0;
1426 usb_gadget_vbus_draw(gadget, 8 /* mA */ );
1427 // user mode expected to disable endpoints
1428 } else {
1429 u8 config, power;
1430
1431 if (gadget_is_dualspeed(gadget)
1432 && gadget->speed == USB_SPEED_HIGH) {
1433 config = dev->hs_config->bConfigurationValue;
1434 power = dev->hs_config->bMaxPower;
1435 } else {
1436 config = dev->config->bConfigurationValue;
1437 power = dev->config->bMaxPower;
1438 }
1439
1440 if (config == (u8) w_value) {
1441 value = 0;
1442 dev->current_config = config;
1443 usb_gadget_vbus_draw(gadget, 2 * power);
1444 }
1445 }
1446
1447 /* report SET_CONFIGURATION like any other control request,
1448 * except that usermode may not stall this. the next
1449 * request mustn't be allowed start until this finishes:
1450 * endpoints and threads set up, etc.
1451 *
1452 * NOTE: older PXA hardware (before PXA 255: without UDCCFR)
1453 * has bad/racey automagic that prevents synchronizing here.
1454 * even kernel mode drivers often miss them.
1455 */
1456 if (value == 0) {
1457 INFO (dev, "configuration #%d\n", dev->current_config);
1458 usb_gadget_set_state(gadget, USB_STATE_CONFIGURED);
1459 if (dev->usermode_setup) {
1460 dev->setup_can_stall = 0;
1461 goto delegate;
1462 }
1463 }
1464 break;
1465
1466 #ifndef CONFIG_USB_PXA25X
1467 /* PXA automagically handles this request too */
1468 case USB_REQ_GET_CONFIGURATION:
1469 if (ctrl->bRequestType != 0x80)
1470 goto unrecognized;
1471 *(u8 *)req->buf = dev->current_config;
1472 value = min (w_length, (u16) 1);
1473 break;
1474 #endif
1475
1476 default:
1477 unrecognized:
1478 VDEBUG (dev, "%s req%02x.%02x v%04x i%04x l%d\n",
1479 dev->usermode_setup ? "delegate" : "fail",
1480 ctrl->bRequestType, ctrl->bRequest,
1481 w_value, le16_to_cpu(ctrl->wIndex), w_length);
1482
1483 /* if there's an ep0 reader, don't stall */
1484 if (dev->usermode_setup) {
1485 dev->setup_can_stall = 1;
1486 delegate:
1487 dev->setup_in = (ctrl->bRequestType & USB_DIR_IN)
1488 ? 1 : 0;
1489 dev->setup_wLength = w_length;
1490 dev->setup_out_ready = 0;
1491 dev->setup_out_error = 0;
1492
1493 /* read DATA stage for OUT right away */
1494 if (unlikely (!dev->setup_in && w_length)) {
1495 value = setup_req (gadget->ep0, dev->req,
1496 w_length);
1497 if (value < 0)
1498 break;
1499
1500 ++dev->udc_usage;
1501 spin_unlock (&dev->lock);
1502 value = usb_ep_queue (gadget->ep0, dev->req,
1503 GFP_KERNEL);
1504 spin_lock (&dev->lock);
1505 --dev->udc_usage;
1506 if (value < 0) {
1507 clean_req (gadget->ep0, dev->req);
1508 break;
1509 }
1510
1511 /* we can't currently stall these */
1512 dev->setup_can_stall = 0;
1513 }
1514
1515 /* state changes when reader collects event */
1516 event = next_event (dev, GADGETFS_SETUP);
1517 event->u.setup = *ctrl;
1518 ep0_readable (dev);
1519 spin_unlock (&dev->lock);
1520 /*
1521 * Return USB_GADGET_DELAYED_STATUS as a workaround to
1522 * stop some UDC drivers (e.g. dwc3) from automatically
1523 * proceeding with the status stage for 0-length
1524 * transfers.
1525 * Should be removed once all UDC drivers are fixed to
1526 * always delay the status stage until a response is
1527 * queued to EP0.
1528 */
1529 return w_length == 0 ? USB_GADGET_DELAYED_STATUS : 0;
1530 }
1531 }
1532
1533 /* proceed with data transfer and status phases? */
1534 if (value >= 0 && dev->state != STATE_DEV_SETUP) {
1535 req->length = value;
1536 req->zero = value < w_length;
1537
1538 ++dev->udc_usage;
1539 spin_unlock (&dev->lock);
1540 value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
1541 spin_lock(&dev->lock);
1542 --dev->udc_usage;
1543 spin_unlock(&dev->lock);
1544 if (value < 0) {
1545 DBG (dev, "ep_queue --> %d\n", value);
1546 req->status = 0;
1547 }
1548 return value;
1549 }
1550
1551 /* device stalls when value < 0 */
1552 spin_unlock (&dev->lock);
1553 return value;
1554 }
1555
destroy_ep_files(struct dev_data * dev)1556 static void destroy_ep_files (struct dev_data *dev)
1557 {
1558 DBG (dev, "%s %d\n", __func__, dev->state);
1559
1560 /* dev->state must prevent interference */
1561 spin_lock_irq (&dev->lock);
1562 while (!list_empty(&dev->epfiles)) {
1563 struct ep_data *ep;
1564 struct inode *parent;
1565 struct dentry *dentry;
1566
1567 /* break link to FS */
1568 ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
1569 list_del_init (&ep->epfiles);
1570 spin_unlock_irq (&dev->lock);
1571
1572 dentry = ep->dentry;
1573 ep->dentry = NULL;
1574 parent = d_inode(dentry->d_parent);
1575
1576 /* break link to controller */
1577 mutex_lock(&ep->lock);
1578 if (ep->state == STATE_EP_ENABLED)
1579 (void) usb_ep_disable (ep->ep);
1580 ep->state = STATE_EP_UNBOUND;
1581 usb_ep_free_request (ep->ep, ep->req);
1582 ep->ep = NULL;
1583 mutex_unlock(&ep->lock);
1584
1585 wake_up (&ep->wait);
1586 put_ep (ep);
1587
1588 /* break link to dcache */
1589 inode_lock(parent);
1590 d_delete (dentry);
1591 dput (dentry);
1592 inode_unlock(parent);
1593
1594 spin_lock_irq (&dev->lock);
1595 }
1596 spin_unlock_irq (&dev->lock);
1597 }
1598
1599
1600 static struct dentry *
1601 gadgetfs_create_file (struct super_block *sb, char const *name,
1602 void *data, const struct file_operations *fops);
1603
activate_ep_files(struct dev_data * dev)1604 static int activate_ep_files (struct dev_data *dev)
1605 {
1606 struct usb_ep *ep;
1607 struct ep_data *data;
1608
1609 gadget_for_each_ep (ep, dev->gadget) {
1610
1611 data = kzalloc(sizeof(*data), GFP_KERNEL);
1612 if (!data)
1613 goto enomem0;
1614 data->state = STATE_EP_DISABLED;
1615 mutex_init(&data->lock);
1616 init_waitqueue_head (&data->wait);
1617
1618 strncpy (data->name, ep->name, sizeof (data->name) - 1);
1619 refcount_set (&data->count, 1);
1620 data->dev = dev;
1621 get_dev (dev);
1622
1623 data->ep = ep;
1624 ep->driver_data = data;
1625
1626 data->req = usb_ep_alloc_request (ep, GFP_KERNEL);
1627 if (!data->req)
1628 goto enomem1;
1629
1630 data->dentry = gadgetfs_create_file (dev->sb, data->name,
1631 data, &ep_io_operations);
1632 if (!data->dentry)
1633 goto enomem2;
1634 list_add_tail (&data->epfiles, &dev->epfiles);
1635 }
1636 return 0;
1637
1638 enomem2:
1639 usb_ep_free_request (ep, data->req);
1640 enomem1:
1641 put_dev (dev);
1642 kfree (data);
1643 enomem0:
1644 DBG (dev, "%s enomem\n", __func__);
1645 destroy_ep_files (dev);
1646 return -ENOMEM;
1647 }
1648
1649 static void
gadgetfs_unbind(struct usb_gadget * gadget)1650 gadgetfs_unbind (struct usb_gadget *gadget)
1651 {
1652 struct dev_data *dev = get_gadget_data (gadget);
1653
1654 DBG (dev, "%s\n", __func__);
1655
1656 spin_lock_irq (&dev->lock);
1657 dev->state = STATE_DEV_UNBOUND;
1658 while (dev->udc_usage > 0) {
1659 spin_unlock_irq(&dev->lock);
1660 usleep_range(1000, 2000);
1661 spin_lock_irq(&dev->lock);
1662 }
1663 spin_unlock_irq (&dev->lock);
1664
1665 destroy_ep_files (dev);
1666 gadget->ep0->driver_data = NULL;
1667 set_gadget_data (gadget, NULL);
1668
1669 /* we've already been disconnected ... no i/o is active */
1670 if (dev->req)
1671 usb_ep_free_request (gadget->ep0, dev->req);
1672 DBG (dev, "%s done\n", __func__);
1673 put_dev (dev);
1674 }
1675
1676 static struct dev_data *the_device;
1677
gadgetfs_bind(struct usb_gadget * gadget,struct usb_gadget_driver * driver)1678 static int gadgetfs_bind(struct usb_gadget *gadget,
1679 struct usb_gadget_driver *driver)
1680 {
1681 struct dev_data *dev = the_device;
1682
1683 if (!dev)
1684 return -ESRCH;
1685 if (0 != strcmp (CHIP, gadget->name)) {
1686 pr_err("%s expected %s controller not %s\n",
1687 shortname, CHIP, gadget->name);
1688 return -ENODEV;
1689 }
1690
1691 set_gadget_data (gadget, dev);
1692 dev->gadget = gadget;
1693 gadget->ep0->driver_data = dev;
1694
1695 /* preallocate control response and buffer */
1696 dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
1697 if (!dev->req)
1698 goto enomem;
1699 dev->req->context = NULL;
1700 dev->req->complete = epio_complete;
1701
1702 if (activate_ep_files (dev) < 0)
1703 goto enomem;
1704
1705 INFO (dev, "bound to %s driver\n", gadget->name);
1706 spin_lock_irq(&dev->lock);
1707 dev->state = STATE_DEV_UNCONNECTED;
1708 spin_unlock_irq(&dev->lock);
1709 get_dev (dev);
1710 return 0;
1711
1712 enomem:
1713 gadgetfs_unbind (gadget);
1714 return -ENOMEM;
1715 }
1716
1717 static void
gadgetfs_disconnect(struct usb_gadget * gadget)1718 gadgetfs_disconnect (struct usb_gadget *gadget)
1719 {
1720 struct dev_data *dev = get_gadget_data (gadget);
1721 unsigned long flags;
1722
1723 spin_lock_irqsave (&dev->lock, flags);
1724 if (dev->state == STATE_DEV_UNCONNECTED)
1725 goto exit;
1726 dev->state = STATE_DEV_UNCONNECTED;
1727
1728 INFO (dev, "disconnected\n");
1729 next_event (dev, GADGETFS_DISCONNECT);
1730 ep0_readable (dev);
1731 exit:
1732 spin_unlock_irqrestore (&dev->lock, flags);
1733 }
1734
1735 static void
gadgetfs_suspend(struct usb_gadget * gadget)1736 gadgetfs_suspend (struct usb_gadget *gadget)
1737 {
1738 struct dev_data *dev = get_gadget_data (gadget);
1739 unsigned long flags;
1740
1741 INFO (dev, "suspended from state %d\n", dev->state);
1742 spin_lock_irqsave(&dev->lock, flags);
1743 switch (dev->state) {
1744 case STATE_DEV_SETUP: // VERY odd... host died??
1745 case STATE_DEV_CONNECTED:
1746 case STATE_DEV_UNCONNECTED:
1747 next_event (dev, GADGETFS_SUSPEND);
1748 ep0_readable (dev);
1749 fallthrough;
1750 default:
1751 break;
1752 }
1753 spin_unlock_irqrestore(&dev->lock, flags);
1754 }
1755
1756 static struct usb_gadget_driver gadgetfs_driver = {
1757 .function = (char *) driver_desc,
1758 .bind = gadgetfs_bind,
1759 .unbind = gadgetfs_unbind,
1760 .setup = gadgetfs_setup,
1761 .reset = gadgetfs_disconnect,
1762 .disconnect = gadgetfs_disconnect,
1763 .suspend = gadgetfs_suspend,
1764
1765 .driver = {
1766 .name = shortname,
1767 },
1768 };
1769
1770 /*----------------------------------------------------------------------*/
1771 /* DEVICE INITIALIZATION
1772 *
1773 * fd = open ("/dev/gadget/$CHIP", O_RDWR)
1774 * status = write (fd, descriptors, sizeof descriptors)
1775 *
1776 * That write establishes the device configuration, so the kernel can
1777 * bind to the controller ... guaranteeing it can handle enumeration
1778 * at all necessary speeds. Descriptor order is:
1779 *
1780 * . message tag (u32, host order) ... for now, must be zero; it
1781 * would change to support features like multi-config devices
1782 * . full/low speed config ... all wTotalLength bytes (with interface,
1783 * class, altsetting, endpoint, and other descriptors)
1784 * . high speed config ... all descriptors, for high speed operation;
1785 * this one's optional except for high-speed hardware
1786 * . device descriptor
1787 *
1788 * Endpoints are not yet enabled. Drivers must wait until device
1789 * configuration and interface altsetting changes create
1790 * the need to configure (or unconfigure) them.
1791 *
1792 * After initialization, the device stays active for as long as that
1793 * $CHIP file is open. Events must then be read from that descriptor,
1794 * such as configuration notifications.
1795 */
1796
is_valid_config(struct usb_config_descriptor * config,unsigned int total)1797 static int is_valid_config(struct usb_config_descriptor *config,
1798 unsigned int total)
1799 {
1800 return config->bDescriptorType == USB_DT_CONFIG
1801 && config->bLength == USB_DT_CONFIG_SIZE
1802 && total >= USB_DT_CONFIG_SIZE
1803 && config->bConfigurationValue != 0
1804 && (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
1805 && (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
1806 /* FIXME if gadget->is_otg, _must_ include an otg descriptor */
1807 /* FIXME check lengths: walk to end */
1808 }
1809
1810 static ssize_t
dev_config(struct file * fd,const char __user * buf,size_t len,loff_t * ptr)1811 dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1812 {
1813 struct dev_data *dev = fd->private_data;
1814 ssize_t value, length = len;
1815 unsigned total;
1816 u32 tag;
1817 char *kbuf;
1818
1819 spin_lock_irq(&dev->lock);
1820 if (dev->state > STATE_DEV_OPENED) {
1821 value = ep0_write(fd, buf, len, ptr);
1822 spin_unlock_irq(&dev->lock);
1823 return value;
1824 }
1825 spin_unlock_irq(&dev->lock);
1826
1827 if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ||
1828 (len > PAGE_SIZE * 4))
1829 return -EINVAL;
1830
1831 /* we might need to change message format someday */
1832 if (copy_from_user (&tag, buf, 4))
1833 return -EFAULT;
1834 if (tag != 0)
1835 return -EINVAL;
1836 buf += 4;
1837 length -= 4;
1838
1839 kbuf = memdup_user(buf, length);
1840 if (IS_ERR(kbuf))
1841 return PTR_ERR(kbuf);
1842
1843 spin_lock_irq (&dev->lock);
1844 value = -EINVAL;
1845 if (dev->buf) {
1846 spin_unlock_irq(&dev->lock);
1847 kfree(kbuf);
1848 return value;
1849 }
1850 dev->buf = kbuf;
1851
1852 /* full or low speed config */
1853 dev->config = (void *) kbuf;
1854 total = le16_to_cpu(dev->config->wTotalLength);
1855 if (!is_valid_config(dev->config, total) ||
1856 total > length - USB_DT_DEVICE_SIZE)
1857 goto fail;
1858 kbuf += total;
1859 length -= total;
1860
1861 /* optional high speed config */
1862 if (kbuf [1] == USB_DT_CONFIG) {
1863 dev->hs_config = (void *) kbuf;
1864 total = le16_to_cpu(dev->hs_config->wTotalLength);
1865 if (!is_valid_config(dev->hs_config, total) ||
1866 total > length - USB_DT_DEVICE_SIZE)
1867 goto fail;
1868 kbuf += total;
1869 length -= total;
1870 } else {
1871 dev->hs_config = NULL;
1872 }
1873
1874 /* could support multiple configs, using another encoding! */
1875
1876 /* device descriptor (tweaked for paranoia) */
1877 if (length != USB_DT_DEVICE_SIZE)
1878 goto fail;
1879 dev->dev = (void *)kbuf;
1880 if (dev->dev->bLength != USB_DT_DEVICE_SIZE
1881 || dev->dev->bDescriptorType != USB_DT_DEVICE
1882 || dev->dev->bNumConfigurations != 1)
1883 goto fail;
1884 dev->dev->bcdUSB = cpu_to_le16 (0x0200);
1885
1886 /* triggers gadgetfs_bind(); then we can enumerate. */
1887 spin_unlock_irq (&dev->lock);
1888 if (dev->hs_config)
1889 gadgetfs_driver.max_speed = USB_SPEED_HIGH;
1890 else
1891 gadgetfs_driver.max_speed = USB_SPEED_FULL;
1892
1893 value = usb_gadget_register_driver(&gadgetfs_driver);
1894 if (value != 0) {
1895 spin_lock_irq(&dev->lock);
1896 goto fail;
1897 } else {
1898 /* at this point "good" hardware has for the first time
1899 * let the USB the host see us. alternatively, if users
1900 * unplug/replug that will clear all the error state.
1901 *
1902 * note: everything running before here was guaranteed
1903 * to choke driver model style diagnostics. from here
1904 * on, they can work ... except in cleanup paths that
1905 * kick in after the ep0 descriptor is closed.
1906 */
1907 value = len;
1908 dev->gadget_registered = true;
1909 }
1910 return value;
1911
1912 fail:
1913 dev->config = NULL;
1914 dev->hs_config = NULL;
1915 dev->dev = NULL;
1916 spin_unlock_irq (&dev->lock);
1917 pr_debug ("%s: %s fail %zd, %p\n", shortname, __func__, value, dev);
1918 kfree (dev->buf);
1919 dev->buf = NULL;
1920 return value;
1921 }
1922
1923 static int
gadget_dev_open(struct inode * inode,struct file * fd)1924 gadget_dev_open (struct inode *inode, struct file *fd)
1925 {
1926 struct dev_data *dev = inode->i_private;
1927 int value = -EBUSY;
1928
1929 spin_lock_irq(&dev->lock);
1930 if (dev->state == STATE_DEV_DISABLED) {
1931 dev->ev_next = 0;
1932 dev->state = STATE_DEV_OPENED;
1933 fd->private_data = dev;
1934 get_dev (dev);
1935 value = 0;
1936 }
1937 spin_unlock_irq(&dev->lock);
1938 return value;
1939 }
1940
1941 static const struct file_operations ep0_operations = {
1942
1943 .open = gadget_dev_open,
1944 .read = ep0_read,
1945 .write = dev_config,
1946 .fasync = ep0_fasync,
1947 .poll = ep0_poll,
1948 .unlocked_ioctl = gadget_dev_ioctl,
1949 .release = dev_release,
1950 };
1951
1952 /*----------------------------------------------------------------------*/
1953
1954 /* FILESYSTEM AND SUPERBLOCK OPERATIONS
1955 *
1956 * Mounting the filesystem creates a controller file, used first for
1957 * device configuration then later for event monitoring.
1958 */
1959
1960
1961 /* FIXME PAM etc could set this security policy without mount options
1962 * if epfiles inherited ownership and permissons from ep0 ...
1963 */
1964
1965 static unsigned default_uid;
1966 static unsigned default_gid;
1967 static unsigned default_perm = S_IRUSR | S_IWUSR;
1968
1969 module_param (default_uid, uint, 0644);
1970 module_param (default_gid, uint, 0644);
1971 module_param (default_perm, uint, 0644);
1972
1973
1974 static struct inode *
gadgetfs_make_inode(struct super_block * sb,void * data,const struct file_operations * fops,int mode)1975 gadgetfs_make_inode (struct super_block *sb,
1976 void *data, const struct file_operations *fops,
1977 int mode)
1978 {
1979 struct inode *inode = new_inode (sb);
1980
1981 if (inode) {
1982 inode->i_ino = get_next_ino();
1983 inode->i_mode = mode;
1984 inode->i_uid = make_kuid(&init_user_ns, default_uid);
1985 inode->i_gid = make_kgid(&init_user_ns, default_gid);
1986 simple_inode_init_ts(inode);
1987 inode->i_private = data;
1988 inode->i_fop = fops;
1989 }
1990 return inode;
1991 }
1992
1993 /* creates in fs root directory, so non-renamable and non-linkable.
1994 * so inode and dentry are paired, until device reconfig.
1995 */
1996 static struct dentry *
gadgetfs_create_file(struct super_block * sb,char const * name,void * data,const struct file_operations * fops)1997 gadgetfs_create_file (struct super_block *sb, char const *name,
1998 void *data, const struct file_operations *fops)
1999 {
2000 struct dentry *dentry;
2001 struct inode *inode;
2002
2003 dentry = d_alloc_name(sb->s_root, name);
2004 if (!dentry)
2005 return NULL;
2006
2007 inode = gadgetfs_make_inode (sb, data, fops,
2008 S_IFREG | (default_perm & S_IRWXUGO));
2009 if (!inode) {
2010 dput(dentry);
2011 return NULL;
2012 }
2013 d_add (dentry, inode);
2014 return dentry;
2015 }
2016
2017 static const struct super_operations gadget_fs_operations = {
2018 .statfs = simple_statfs,
2019 .drop_inode = generic_delete_inode,
2020 };
2021
2022 static int
gadgetfs_fill_super(struct super_block * sb,struct fs_context * fc)2023 gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
2024 {
2025 struct inode *inode;
2026 struct dev_data *dev;
2027 int rc;
2028
2029 mutex_lock(&sb_mutex);
2030
2031 if (the_device) {
2032 rc = -ESRCH;
2033 goto Done;
2034 }
2035
2036 CHIP = usb_get_gadget_udc_name();
2037 if (!CHIP) {
2038 rc = -ENODEV;
2039 goto Done;
2040 }
2041
2042 /* superblock */
2043 sb->s_blocksize = PAGE_SIZE;
2044 sb->s_blocksize_bits = PAGE_SHIFT;
2045 sb->s_magic = GADGETFS_MAGIC;
2046 sb->s_op = &gadget_fs_operations;
2047 sb->s_time_gran = 1;
2048
2049 /* root inode */
2050 inode = gadgetfs_make_inode (sb,
2051 NULL, &simple_dir_operations,
2052 S_IFDIR | S_IRUGO | S_IXUGO);
2053 if (!inode)
2054 goto Enomem;
2055 inode->i_op = &simple_dir_inode_operations;
2056 if (!(sb->s_root = d_make_root (inode)))
2057 goto Enomem;
2058
2059 /* the ep0 file is named after the controller we expect;
2060 * user mode code can use it for sanity checks, like we do.
2061 */
2062 dev = dev_new ();
2063 if (!dev)
2064 goto Enomem;
2065
2066 dev->sb = sb;
2067 dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations);
2068 if (!dev->dentry) {
2069 put_dev(dev);
2070 goto Enomem;
2071 }
2072
2073 /* other endpoint files are available after hardware setup,
2074 * from binding to a controller.
2075 */
2076 the_device = dev;
2077 rc = 0;
2078 goto Done;
2079
2080 Enomem:
2081 kfree(CHIP);
2082 CHIP = NULL;
2083 rc = -ENOMEM;
2084
2085 Done:
2086 mutex_unlock(&sb_mutex);
2087 return rc;
2088 }
2089
2090 /* "mount -t gadgetfs path /dev/gadget" ends up here */
gadgetfs_get_tree(struct fs_context * fc)2091 static int gadgetfs_get_tree(struct fs_context *fc)
2092 {
2093 return get_tree_single(fc, gadgetfs_fill_super);
2094 }
2095
2096 static const struct fs_context_operations gadgetfs_context_ops = {
2097 .get_tree = gadgetfs_get_tree,
2098 };
2099
gadgetfs_init_fs_context(struct fs_context * fc)2100 static int gadgetfs_init_fs_context(struct fs_context *fc)
2101 {
2102 fc->ops = &gadgetfs_context_ops;
2103 return 0;
2104 }
2105
2106 static void
gadgetfs_kill_sb(struct super_block * sb)2107 gadgetfs_kill_sb (struct super_block *sb)
2108 {
2109 mutex_lock(&sb_mutex);
2110 kill_litter_super (sb);
2111 if (the_device) {
2112 put_dev (the_device);
2113 the_device = NULL;
2114 }
2115 kfree(CHIP);
2116 CHIP = NULL;
2117 mutex_unlock(&sb_mutex);
2118 }
2119
2120 /*----------------------------------------------------------------------*/
2121
2122 static struct file_system_type gadgetfs_type = {
2123 .owner = THIS_MODULE,
2124 .name = shortname,
2125 .init_fs_context = gadgetfs_init_fs_context,
2126 .kill_sb = gadgetfs_kill_sb,
2127 };
2128 MODULE_ALIAS_FS("gadgetfs");
2129
2130 /*----------------------------------------------------------------------*/
2131
gadgetfs_init(void)2132 static int __init gadgetfs_init (void)
2133 {
2134 int status;
2135
2136 status = register_filesystem (&gadgetfs_type);
2137 if (status == 0)
2138 pr_info ("%s: %s, version " DRIVER_VERSION "\n",
2139 shortname, driver_desc);
2140 return status;
2141 }
2142 module_init (gadgetfs_init);
2143
gadgetfs_cleanup(void)2144 static void __exit gadgetfs_cleanup (void)
2145 {
2146 pr_debug ("unregister %s\n", shortname);
2147 unregister_filesystem (&gadgetfs_type);
2148 }
2149 module_exit (gadgetfs_cleanup);
2150
2151