[go: nahoru, domu]

1/*
2 * inode.c -- user mode filesystem api for usb gadget controllers
3 *
4 * Copyright (C) 2003-2004 David Brownell
5 * Copyright (C) 2003 Agilent Technologies
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13
14/* #define VERBOSE_DEBUG */
15
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/fs.h>
19#include <linux/pagemap.h>
20#include <linux/uts.h>
21#include <linux/wait.h>
22#include <linux/compiler.h>
23#include <asm/uaccess.h>
24#include <linux/sched.h>
25#include <linux/slab.h>
26#include <linux/poll.h>
27#include <linux/mmu_context.h>
28#include <linux/aio.h>
29
30#include <linux/device.h>
31#include <linux/moduleparam.h>
32
33#include <linux/usb/gadgetfs.h>
34#include <linux/usb/gadget.h>
35
36
37/*
38 * The gadgetfs API maps each endpoint to a file descriptor so that you
39 * can use standard synchronous read/write calls for I/O.  There's some
40 * O_NONBLOCK and O_ASYNC/FASYNC style i/o support.  Example usermode
41 * drivers show how this works in practice.  You can also use AIO to
42 * eliminate I/O gaps between requests, to help when streaming data.
43 *
44 * Key parts that must be USB-specific are protocols defining how the
45 * read/write operations relate to the hardware state machines.  There
46 * are two types of files.  One type is for the device, implementing ep0.
47 * The other type is for each IN or OUT endpoint.  In both cases, the
48 * user mode driver must configure the hardware before using it.
49 *
50 * - First, dev_config() is called when /dev/gadget/$CHIP is configured
51 *   (by writing configuration and device descriptors).  Afterwards it
52 *   may serve as a source of device events, used to handle all control
53 *   requests other than basic enumeration.
54 *
55 * - Then, after a SET_CONFIGURATION control request, ep_config() is
56 *   called when each /dev/gadget/ep* file is configured (by writing
57 *   endpoint descriptors).  Afterwards these files are used to write()
58 *   IN data or to read() OUT data.  To halt the endpoint, a "wrong
59 *   direction" request is issued (like reading an IN endpoint).
60 *
61 * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
62 * not possible on all hardware.  For example, precise fault handling with
63 * respect to data left in endpoint fifos after aborted operations; or
64 * selective clearing of endpoint halts, to implement SET_INTERFACE.
65 */
66
67#define	DRIVER_DESC	"USB Gadget filesystem"
68#define	DRIVER_VERSION	"24 Aug 2004"
69
70static const char driver_desc [] = DRIVER_DESC;
71static const char shortname [] = "gadgetfs";
72
73MODULE_DESCRIPTION (DRIVER_DESC);
74MODULE_AUTHOR ("David Brownell");
75MODULE_LICENSE ("GPL");
76
77
78/*----------------------------------------------------------------------*/
79
80#define GADGETFS_MAGIC		0xaee71ee7
81
82/* /dev/gadget/$CHIP represents ep0 and the whole device */
83enum ep0_state {
84	/* DISBLED is the initial state.
85	 */
86	STATE_DEV_DISABLED = 0,
87
88	/* Only one open() of /dev/gadget/$CHIP; only one file tracks
89	 * ep0/device i/o modes and binding to the controller.  Driver
90	 * must always write descriptors to initialize the device, then
91	 * the device becomes UNCONNECTED until enumeration.
92	 */
93	STATE_DEV_OPENED,
94
95	/* From then on, ep0 fd is in either of two basic modes:
96	 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
97	 * - SETUP: read/write will transfer control data and succeed;
98	 *   or if "wrong direction", performs protocol stall
99	 */
100	STATE_DEV_UNCONNECTED,
101	STATE_DEV_CONNECTED,
102	STATE_DEV_SETUP,
103
104	/* UNBOUND means the driver closed ep0, so the device won't be
105	 * accessible again (DEV_DISABLED) until all fds are closed.
106	 */
107	STATE_DEV_UNBOUND,
108};
109
110/* enough for the whole queue: most events invalidate others */
111#define	N_EVENT			5
112
113struct dev_data {
114	spinlock_t			lock;
115	atomic_t			count;
116	enum ep0_state			state;		/* P: lock */
117	struct usb_gadgetfs_event	event [N_EVENT];
118	unsigned			ev_next;
119	struct fasync_struct		*fasync;
120	u8				current_config;
121
122	/* drivers reading ep0 MUST handle control requests (SETUP)
123	 * reported that way; else the host will time out.
124	 */
125	unsigned			usermode_setup : 1,
126					setup_in : 1,
127					setup_can_stall : 1,
128					setup_out_ready : 1,
129					setup_out_error : 1,
130					setup_abort : 1;
131	unsigned			setup_wLength;
132
133	/* the rest is basically write-once */
134	struct usb_config_descriptor	*config, *hs_config;
135	struct usb_device_descriptor	*dev;
136	struct usb_request		*req;
137	struct usb_gadget		*gadget;
138	struct list_head		epfiles;
139	void				*buf;
140	wait_queue_head_t		wait;
141	struct super_block		*sb;
142	struct dentry			*dentry;
143
144	/* except this scratch i/o buffer for ep0 */
145	u8				rbuf [256];
146};
147
148static inline void get_dev (struct dev_data *data)
149{
150	atomic_inc (&data->count);
151}
152
153static void put_dev (struct dev_data *data)
154{
155	if (likely (!atomic_dec_and_test (&data->count)))
156		return;
157	/* needs no more cleanup */
158	BUG_ON (waitqueue_active (&data->wait));
159	kfree (data);
160}
161
162static struct dev_data *dev_new (void)
163{
164	struct dev_data		*dev;
165
166	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
167	if (!dev)
168		return NULL;
169	dev->state = STATE_DEV_DISABLED;
170	atomic_set (&dev->count, 1);
171	spin_lock_init (&dev->lock);
172	INIT_LIST_HEAD (&dev->epfiles);
173	init_waitqueue_head (&dev->wait);
174	return dev;
175}
176
177/*----------------------------------------------------------------------*/
178
179/* other /dev/gadget/$ENDPOINT files represent endpoints */
180enum ep_state {
181	STATE_EP_DISABLED = 0,
182	STATE_EP_READY,
183	STATE_EP_ENABLED,
184	STATE_EP_UNBOUND,
185};
186
187struct ep_data {
188	struct mutex			lock;
189	enum ep_state			state;
190	atomic_t			count;
191	struct dev_data			*dev;
192	/* must hold dev->lock before accessing ep or req */
193	struct usb_ep			*ep;
194	struct usb_request		*req;
195	ssize_t				status;
196	char				name [16];
197	struct usb_endpoint_descriptor	desc, hs_desc;
198	struct list_head		epfiles;
199	wait_queue_head_t		wait;
200	struct dentry			*dentry;
201};
202
203static inline void get_ep (struct ep_data *data)
204{
205	atomic_inc (&data->count);
206}
207
208static void put_ep (struct ep_data *data)
209{
210	if (likely (!atomic_dec_and_test (&data->count)))
211		return;
212	put_dev (data->dev);
213	/* needs no more cleanup */
214	BUG_ON (!list_empty (&data->epfiles));
215	BUG_ON (waitqueue_active (&data->wait));
216	kfree (data);
217}
218
219/*----------------------------------------------------------------------*/
220
221/* most "how to use the hardware" policy choices are in userspace:
222 * mapping endpoint roles (which the driver needs) to the capabilities
223 * which the usb controller has.  most of those capabilities are exposed
224 * implicitly, starting with the driver name and then endpoint names.
225 */
226
227static const char *CHIP;
228
229/*----------------------------------------------------------------------*/
230
231/* NOTE:  don't use dev_printk calls before binding to the gadget
232 * at the end of ep0 configuration, or after unbind.
233 */
234
235/* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */
236#define xprintk(d,level,fmt,args...) \
237	printk(level "%s: " fmt , shortname , ## args)
238
239#ifdef DEBUG
240#define DBG(dev,fmt,args...) \
241	xprintk(dev , KERN_DEBUG , fmt , ## args)
242#else
243#define DBG(dev,fmt,args...) \
244	do { } while (0)
245#endif /* DEBUG */
246
247#ifdef VERBOSE_DEBUG
248#define VDEBUG	DBG
249#else
250#define VDEBUG(dev,fmt,args...) \
251	do { } while (0)
252#endif /* DEBUG */
253
254#define ERROR(dev,fmt,args...) \
255	xprintk(dev , KERN_ERR , fmt , ## args)
256#define INFO(dev,fmt,args...) \
257	xprintk(dev , KERN_INFO , fmt , ## args)
258
259
260/*----------------------------------------------------------------------*/
261
262/* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso)
263 *
264 * After opening, configure non-control endpoints.  Then use normal
265 * stream read() and write() requests; and maybe ioctl() to get more
266 * precise FIFO status when recovering from cancellation.
267 */
268
269static void epio_complete (struct usb_ep *ep, struct usb_request *req)
270{
271	struct ep_data	*epdata = ep->driver_data;
272
273	if (!req->context)
274		return;
275	if (req->status)
276		epdata->status = req->status;
277	else
278		epdata->status = req->actual;
279	complete ((struct completion *)req->context);
280}
281
282/* tasklock endpoint, returning when it's connected.
283 * still need dev->lock to use epdata->ep.
284 */
285static int
286get_ready_ep (unsigned f_flags, struct ep_data *epdata)
287{
288	int	val;
289
290	if (f_flags & O_NONBLOCK) {
291		if (!mutex_trylock(&epdata->lock))
292			goto nonblock;
293		if (epdata->state != STATE_EP_ENABLED) {
294			mutex_unlock(&epdata->lock);
295nonblock:
296			val = -EAGAIN;
297		} else
298			val = 0;
299		return val;
300	}
301
302	val = mutex_lock_interruptible(&epdata->lock);
303	if (val < 0)
304		return val;
305
306	switch (epdata->state) {
307	case STATE_EP_ENABLED:
308		break;
309	// case STATE_EP_DISABLED:		/* "can't happen" */
310	// case STATE_EP_READY:			/* "can't happen" */
311	default:				/* error! */
312		pr_debug ("%s: ep %p not available, state %d\n",
313				shortname, epdata, epdata->state);
314		// FALLTHROUGH
315	case STATE_EP_UNBOUND:			/* clean disconnect */
316		val = -ENODEV;
317		mutex_unlock(&epdata->lock);
318	}
319	return val;
320}
321
322static ssize_t
323ep_io (struct ep_data *epdata, void *buf, unsigned len)
324{
325	DECLARE_COMPLETION_ONSTACK (done);
326	int value;
327
328	spin_lock_irq (&epdata->dev->lock);
329	if (likely (epdata->ep != NULL)) {
330		struct usb_request	*req = epdata->req;
331
332		req->context = &done;
333		req->complete = epio_complete;
334		req->buf = buf;
335		req->length = len;
336		value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC);
337	} else
338		value = -ENODEV;
339	spin_unlock_irq (&epdata->dev->lock);
340
341	if (likely (value == 0)) {
342		value = wait_event_interruptible (done.wait, done.done);
343		if (value != 0) {
344			spin_lock_irq (&epdata->dev->lock);
345			if (likely (epdata->ep != NULL)) {
346				DBG (epdata->dev, "%s i/o interrupted\n",
347						epdata->name);
348				usb_ep_dequeue (epdata->ep, epdata->req);
349				spin_unlock_irq (&epdata->dev->lock);
350
351				wait_event (done.wait, done.done);
352				if (epdata->status == -ECONNRESET)
353					epdata->status = -EINTR;
354			} else {
355				spin_unlock_irq (&epdata->dev->lock);
356
357				DBG (epdata->dev, "endpoint gone\n");
358				epdata->status = -ENODEV;
359			}
360		}
361		return epdata->status;
362	}
363	return value;
364}
365
366
367/* handle a synchronous OUT bulk/intr/iso transfer */
368static ssize_t
369ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
370{
371	struct ep_data		*data = fd->private_data;
372	void			*kbuf;
373	ssize_t			value;
374
375	if ((value = get_ready_ep (fd->f_flags, data)) < 0)
376		return value;
377
378	/* halt any endpoint by doing a "wrong direction" i/o call */
379	if (usb_endpoint_dir_in(&data->desc)) {
380		if (usb_endpoint_xfer_isoc(&data->desc)) {
381			mutex_unlock(&data->lock);
382			return -EINVAL;
383		}
384		DBG (data->dev, "%s halt\n", data->name);
385		spin_lock_irq (&data->dev->lock);
386		if (likely (data->ep != NULL))
387			usb_ep_set_halt (data->ep);
388		spin_unlock_irq (&data->dev->lock);
389		mutex_unlock(&data->lock);
390		return -EBADMSG;
391	}
392
393	/* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */
394
395	value = -ENOMEM;
396	kbuf = kmalloc (len, GFP_KERNEL);
397	if (unlikely (!kbuf))
398		goto free1;
399
400	value = ep_io (data, kbuf, len);
401	VDEBUG (data->dev, "%s read %zu OUT, status %d\n",
402		data->name, len, (int) value);
403	if (value >= 0 && copy_to_user (buf, kbuf, value))
404		value = -EFAULT;
405
406free1:
407	mutex_unlock(&data->lock);
408	kfree (kbuf);
409	return value;
410}
411
412/* handle a synchronous IN bulk/intr/iso transfer */
413static ssize_t
414ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
415{
416	struct ep_data		*data = fd->private_data;
417	void			*kbuf;
418	ssize_t			value;
419
420	if ((value = get_ready_ep (fd->f_flags, data)) < 0)
421		return value;
422
423	/* halt any endpoint by doing a "wrong direction" i/o call */
424	if (!usb_endpoint_dir_in(&data->desc)) {
425		if (usb_endpoint_xfer_isoc(&data->desc)) {
426			mutex_unlock(&data->lock);
427			return -EINVAL;
428		}
429		DBG (data->dev, "%s halt\n", data->name);
430		spin_lock_irq (&data->dev->lock);
431		if (likely (data->ep != NULL))
432			usb_ep_set_halt (data->ep);
433		spin_unlock_irq (&data->dev->lock);
434		mutex_unlock(&data->lock);
435		return -EBADMSG;
436	}
437
438	/* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */
439
440	value = -ENOMEM;
441	kbuf = memdup_user(buf, len);
442	if (IS_ERR(kbuf)) {
443		value = PTR_ERR(kbuf);
444		goto free1;
445	}
446
447	value = ep_io (data, kbuf, len);
448	VDEBUG (data->dev, "%s write %zu IN, status %d\n",
449		data->name, len, (int) value);
450free1:
451	mutex_unlock(&data->lock);
452	return value;
453}
454
455static int
456ep_release (struct inode *inode, struct file *fd)
457{
458	struct ep_data		*data = fd->private_data;
459	int value;
460
461	value = mutex_lock_interruptible(&data->lock);
462	if (value < 0)
463		return value;
464
465	/* clean up if this can be reopened */
466	if (data->state != STATE_EP_UNBOUND) {
467		data->state = STATE_EP_DISABLED;
468		data->desc.bDescriptorType = 0;
469		data->hs_desc.bDescriptorType = 0;
470		usb_ep_disable(data->ep);
471	}
472	mutex_unlock(&data->lock);
473	put_ep (data);
474	return 0;
475}
476
477static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
478{
479	struct ep_data		*data = fd->private_data;
480	int			status;
481
482	if ((status = get_ready_ep (fd->f_flags, data)) < 0)
483		return status;
484
485	spin_lock_irq (&data->dev->lock);
486	if (likely (data->ep != NULL)) {
487		switch (code) {
488		case GADGETFS_FIFO_STATUS:
489			status = usb_ep_fifo_status (data->ep);
490			break;
491		case GADGETFS_FIFO_FLUSH:
492			usb_ep_fifo_flush (data->ep);
493			break;
494		case GADGETFS_CLEAR_HALT:
495			status = usb_ep_clear_halt (data->ep);
496			break;
497		default:
498			status = -ENOTTY;
499		}
500	} else
501		status = -ENODEV;
502	spin_unlock_irq (&data->dev->lock);
503	mutex_unlock(&data->lock);
504	return status;
505}
506
507/*----------------------------------------------------------------------*/
508
509/* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */
510
511struct kiocb_priv {
512	struct usb_request	*req;
513	struct ep_data		*epdata;
514	struct kiocb		*iocb;
515	struct mm_struct	*mm;
516	struct work_struct	work;
517	void			*buf;
518	const struct iovec	*iv;
519	unsigned long		nr_segs;
520	unsigned		actual;
521};
522
523static int ep_aio_cancel(struct kiocb *iocb)
524{
525	struct kiocb_priv	*priv = iocb->private;
526	struct ep_data		*epdata;
527	int			value;
528
529	local_irq_disable();
530	epdata = priv->epdata;
531	// spin_lock(&epdata->dev->lock);
532	if (likely(epdata && epdata->ep && priv->req))
533		value = usb_ep_dequeue (epdata->ep, priv->req);
534	else
535		value = -EINVAL;
536	// spin_unlock(&epdata->dev->lock);
537	local_irq_enable();
538
539	return value;
540}
541
542static ssize_t ep_copy_to_user(struct kiocb_priv *priv)
543{
544	ssize_t			len, total;
545	void			*to_copy;
546	int			i;
547
548	/* copy stuff into user buffers */
549	total = priv->actual;
550	len = 0;
551	to_copy = priv->buf;
552	for (i=0; i < priv->nr_segs; i++) {
553		ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total);
554
555		if (copy_to_user(priv->iv[i].iov_base, to_copy, this)) {
556			if (len == 0)
557				len = -EFAULT;
558			break;
559		}
560
561		total -= this;
562		len += this;
563		to_copy += this;
564		if (total == 0)
565			break;
566	}
567
568	return len;
569}
570
571static void ep_user_copy_worker(struct work_struct *work)
572{
573	struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
574	struct mm_struct *mm = priv->mm;
575	struct kiocb *iocb = priv->iocb;
576	size_t ret;
577
578	use_mm(mm);
579	ret = ep_copy_to_user(priv);
580	unuse_mm(mm);
581
582	/* completing the iocb can drop the ctx and mm, don't touch mm after */
583	aio_complete(iocb, ret, ret);
584
585	kfree(priv->buf);
586	kfree(priv);
587}
588
589static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
590{
591	struct kiocb		*iocb = req->context;
592	struct kiocb_priv	*priv = iocb->private;
593	struct ep_data		*epdata = priv->epdata;
594
595	/* lock against disconnect (and ideally, cancel) */
596	spin_lock(&epdata->dev->lock);
597	priv->req = NULL;
598	priv->epdata = NULL;
599
600	/* if this was a write or a read returning no data then we
601	 * don't need to copy anything to userspace, so we can
602	 * complete the aio request immediately.
603	 */
604	if (priv->iv == NULL || unlikely(req->actual == 0)) {
605		kfree(req->buf);
606		kfree(priv);
607		iocb->private = NULL;
608		/* aio_complete() reports bytes-transferred _and_ faults */
609		aio_complete(iocb, req->actual ? req->actual : req->status,
610				req->status);
611	} else {
612		/* ep_copy_to_user() won't report both; we hide some faults */
613		if (unlikely(0 != req->status))
614			DBG(epdata->dev, "%s fault %d len %d\n",
615				ep->name, req->status, req->actual);
616
617		priv->buf = req->buf;
618		priv->actual = req->actual;
619		schedule_work(&priv->work);
620	}
621	spin_unlock(&epdata->dev->lock);
622
623	usb_ep_free_request(ep, req);
624	put_ep(epdata);
625}
626
627static ssize_t
628ep_aio_rwtail(
629	struct kiocb	*iocb,
630	char		*buf,
631	size_t		len,
632	struct ep_data	*epdata,
633	const struct iovec *iv,
634	unsigned long	nr_segs
635)
636{
637	struct kiocb_priv	*priv;
638	struct usb_request	*req;
639	ssize_t			value;
640
641	priv = kmalloc(sizeof *priv, GFP_KERNEL);
642	if (!priv) {
643		value = -ENOMEM;
644fail:
645		kfree(buf);
646		return value;
647	}
648	iocb->private = priv;
649	priv->iocb = iocb;
650	priv->iv = iv;
651	priv->nr_segs = nr_segs;
652	INIT_WORK(&priv->work, ep_user_copy_worker);
653
654	value = get_ready_ep(iocb->ki_filp->f_flags, epdata);
655	if (unlikely(value < 0)) {
656		kfree(priv);
657		goto fail;
658	}
659
660	kiocb_set_cancel_fn(iocb, ep_aio_cancel);
661	get_ep(epdata);
662	priv->epdata = epdata;
663	priv->actual = 0;
664	priv->mm = current->mm; /* mm teardown waits for iocbs in exit_aio() */
665
666	/* each kiocb is coupled to one usb_request, but we can't
667	 * allocate or submit those if the host disconnected.
668	 */
669	spin_lock_irq(&epdata->dev->lock);
670	if (likely(epdata->ep)) {
671		req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
672		if (likely(req)) {
673			priv->req = req;
674			req->buf = buf;
675			req->length = len;
676			req->complete = ep_aio_complete;
677			req->context = iocb;
678			value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
679			if (unlikely(0 != value))
680				usb_ep_free_request(epdata->ep, req);
681		} else
682			value = -EAGAIN;
683	} else
684		value = -ENODEV;
685	spin_unlock_irq(&epdata->dev->lock);
686
687	mutex_unlock(&epdata->lock);
688
689	if (unlikely(value)) {
690		kfree(priv);
691		put_ep(epdata);
692	} else
693		value = -EIOCBQUEUED;
694	return value;
695}
696
697static ssize_t
698ep_aio_read(struct kiocb *iocb, const struct iovec *iov,
699		unsigned long nr_segs, loff_t o)
700{
701	struct ep_data		*epdata = iocb->ki_filp->private_data;
702	char			*buf;
703
704	if (unlikely(usb_endpoint_dir_in(&epdata->desc)))
705		return -EINVAL;
706
707	buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL);
708	if (unlikely(!buf))
709		return -ENOMEM;
710
711	return ep_aio_rwtail(iocb, buf, iocb->ki_nbytes, epdata, iov, nr_segs);
712}
713
714static ssize_t
715ep_aio_write(struct kiocb *iocb, const struct iovec *iov,
716		unsigned long nr_segs, loff_t o)
717{
718	struct ep_data		*epdata = iocb->ki_filp->private_data;
719	char			*buf;
720	size_t			len = 0;
721	int			i = 0;
722
723	if (unlikely(!usb_endpoint_dir_in(&epdata->desc)))
724		return -EINVAL;
725
726	buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL);
727	if (unlikely(!buf))
728		return -ENOMEM;
729
730	for (i=0; i < nr_segs; i++) {
731		if (unlikely(copy_from_user(&buf[len], iov[i].iov_base,
732				iov[i].iov_len) != 0)) {
733			kfree(buf);
734			return -EFAULT;
735		}
736		len += iov[i].iov_len;
737	}
738	return ep_aio_rwtail(iocb, buf, len, epdata, NULL, 0);
739}
740
741/*----------------------------------------------------------------------*/
742
743/* used after endpoint configuration */
744static const struct file_operations ep_io_operations = {
745	.owner =	THIS_MODULE,
746	.llseek =	no_llseek,
747
748	.read =		ep_read,
749	.write =	ep_write,
750	.unlocked_ioctl = ep_ioctl,
751	.release =	ep_release,
752
753	.aio_read =	ep_aio_read,
754	.aio_write =	ep_aio_write,
755};
756
757/* ENDPOINT INITIALIZATION
758 *
759 *     fd = open ("/dev/gadget/$ENDPOINT", O_RDWR)
760 *     status = write (fd, descriptors, sizeof descriptors)
761 *
762 * That write establishes the endpoint configuration, configuring
763 * the controller to process bulk, interrupt, or isochronous transfers
764 * at the right maxpacket size, and so on.
765 *
766 * The descriptors are message type 1, identified by a host order u32
767 * at the beginning of what's written.  Descriptor order is: full/low
768 * speed descriptor, then optional high speed descriptor.
769 */
770static ssize_t
771ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
772{
773	struct ep_data		*data = fd->private_data;
774	struct usb_ep		*ep;
775	u32			tag;
776	int			value, length = len;
777
778	value = mutex_lock_interruptible(&data->lock);
779	if (value < 0)
780		return value;
781
782	if (data->state != STATE_EP_READY) {
783		value = -EL2HLT;
784		goto fail;
785	}
786
787	value = len;
788	if (len < USB_DT_ENDPOINT_SIZE + 4)
789		goto fail0;
790
791	/* we might need to change message format someday */
792	if (copy_from_user (&tag, buf, 4)) {
793		goto fail1;
794	}
795	if (tag != 1) {
796		DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
797		goto fail0;
798	}
799	buf += 4;
800	len -= 4;
801
802	/* NOTE:  audio endpoint extensions not accepted here;
803	 * just don't include the extra bytes.
804	 */
805
806	/* full/low speed descriptor, then high speed */
807	if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) {
808		goto fail1;
809	}
810	if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
811			|| data->desc.bDescriptorType != USB_DT_ENDPOINT)
812		goto fail0;
813	if (len != USB_DT_ENDPOINT_SIZE) {
814		if (len != 2 * USB_DT_ENDPOINT_SIZE)
815			goto fail0;
816		if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
817					USB_DT_ENDPOINT_SIZE)) {
818			goto fail1;
819		}
820		if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
821				|| data->hs_desc.bDescriptorType
822					!= USB_DT_ENDPOINT) {
823			DBG(data->dev, "config %s, bad hs length or type\n",
824					data->name);
825			goto fail0;
826		}
827	}
828
829	spin_lock_irq (&data->dev->lock);
830	if (data->dev->state == STATE_DEV_UNBOUND) {
831		value = -ENOENT;
832		goto gone;
833	} else if ((ep = data->ep) == NULL) {
834		value = -ENODEV;
835		goto gone;
836	}
837	switch (data->dev->gadget->speed) {
838	case USB_SPEED_LOW:
839	case USB_SPEED_FULL:
840		ep->desc = &data->desc;
841		value = usb_ep_enable(ep);
842		if (value == 0)
843			data->state = STATE_EP_ENABLED;
844		break;
845	case USB_SPEED_HIGH:
846		/* fails if caller didn't provide that descriptor... */
847		ep->desc = &data->hs_desc;
848		value = usb_ep_enable(ep);
849		if (value == 0)
850			data->state = STATE_EP_ENABLED;
851		break;
852	default:
853		DBG(data->dev, "unconnected, %s init abandoned\n",
854				data->name);
855		value = -EINVAL;
856	}
857	if (value == 0) {
858		fd->f_op = &ep_io_operations;
859		value = length;
860	}
861gone:
862	spin_unlock_irq (&data->dev->lock);
863	if (value < 0) {
864fail:
865		data->desc.bDescriptorType = 0;
866		data->hs_desc.bDescriptorType = 0;
867	}
868	mutex_unlock(&data->lock);
869	return value;
870fail0:
871	value = -EINVAL;
872	goto fail;
873fail1:
874	value = -EFAULT;
875	goto fail;
876}
877
878static int
879ep_open (struct inode *inode, struct file *fd)
880{
881	struct ep_data		*data = inode->i_private;
882	int			value = -EBUSY;
883
884	if (mutex_lock_interruptible(&data->lock) != 0)
885		return -EINTR;
886	spin_lock_irq (&data->dev->lock);
887	if (data->dev->state == STATE_DEV_UNBOUND)
888		value = -ENOENT;
889	else if (data->state == STATE_EP_DISABLED) {
890		value = 0;
891		data->state = STATE_EP_READY;
892		get_ep (data);
893		fd->private_data = data;
894		VDEBUG (data->dev, "%s ready\n", data->name);
895	} else
896		DBG (data->dev, "%s state %d\n",
897			data->name, data->state);
898	spin_unlock_irq (&data->dev->lock);
899	mutex_unlock(&data->lock);
900	return value;
901}
902
903/* used before endpoint configuration */
904static const struct file_operations ep_config_operations = {
905	.llseek =	no_llseek,
906
907	.open =		ep_open,
908	.write =	ep_config,
909	.release =	ep_release,
910};
911
912/*----------------------------------------------------------------------*/
913
914/* EP0 IMPLEMENTATION can be partly in userspace.
915 *
916 * Drivers that use this facility receive various events, including
917 * control requests the kernel doesn't handle.  Drivers that don't
918 * use this facility may be too simple-minded for real applications.
919 */
920
921static inline void ep0_readable (struct dev_data *dev)
922{
923	wake_up (&dev->wait);
924	kill_fasync (&dev->fasync, SIGIO, POLL_IN);
925}
926
927static void clean_req (struct usb_ep *ep, struct usb_request *req)
928{
929	struct dev_data		*dev = ep->driver_data;
930
931	if (req->buf != dev->rbuf) {
932		kfree(req->buf);
933		req->buf = dev->rbuf;
934	}
935	req->complete = epio_complete;
936	dev->setup_out_ready = 0;
937}
938
939static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
940{
941	struct dev_data		*dev = ep->driver_data;
942	unsigned long		flags;
943	int			free = 1;
944
945	/* for control OUT, data must still get to userspace */
946	spin_lock_irqsave(&dev->lock, flags);
947	if (!dev->setup_in) {
948		dev->setup_out_error = (req->status != 0);
949		if (!dev->setup_out_error)
950			free = 0;
951		dev->setup_out_ready = 1;
952		ep0_readable (dev);
953	}
954
955	/* clean up as appropriate */
956	if (free && req->buf != &dev->rbuf)
957		clean_req (ep, req);
958	req->complete = epio_complete;
959	spin_unlock_irqrestore(&dev->lock, flags);
960}
961
962static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
963{
964	struct dev_data	*dev = ep->driver_data;
965
966	if (dev->setup_out_ready) {
967		DBG (dev, "ep0 request busy!\n");
968		return -EBUSY;
969	}
970	if (len > sizeof (dev->rbuf))
971		req->buf = kmalloc(len, GFP_ATOMIC);
972	if (req->buf == NULL) {
973		req->buf = dev->rbuf;
974		return -ENOMEM;
975	}
976	req->complete = ep0_complete;
977	req->length = len;
978	req->zero = 0;
979	return 0;
980}
981
982static ssize_t
983ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
984{
985	struct dev_data			*dev = fd->private_data;
986	ssize_t				retval;
987	enum ep0_state			state;
988
989	spin_lock_irq (&dev->lock);
990
991	/* report fd mode change before acting on it */
992	if (dev->setup_abort) {
993		dev->setup_abort = 0;
994		retval = -EIDRM;
995		goto done;
996	}
997
998	/* control DATA stage */
999	if ((state = dev->state) == STATE_DEV_SETUP) {
1000
1001		if (dev->setup_in) {		/* stall IN */
1002			VDEBUG(dev, "ep0in stall\n");
1003			(void) usb_ep_set_halt (dev->gadget->ep0);
1004			retval = -EL2HLT;
1005			dev->state = STATE_DEV_CONNECTED;
1006
1007		} else if (len == 0) {		/* ack SET_CONFIGURATION etc */
1008			struct usb_ep		*ep = dev->gadget->ep0;
1009			struct usb_request	*req = dev->req;
1010
1011			if ((retval = setup_req (ep, req, 0)) == 0)
1012				retval = usb_ep_queue (ep, req, GFP_ATOMIC);
1013			dev->state = STATE_DEV_CONNECTED;
1014
1015			/* assume that was SET_CONFIGURATION */
1016			if (dev->current_config) {
1017				unsigned power;
1018
1019				if (gadget_is_dualspeed(dev->gadget)
1020						&& (dev->gadget->speed
1021							== USB_SPEED_HIGH))
1022					power = dev->hs_config->bMaxPower;
1023				else
1024					power = dev->config->bMaxPower;
1025				usb_gadget_vbus_draw(dev->gadget, 2 * power);
1026			}
1027
1028		} else {			/* collect OUT data */
1029			if ((fd->f_flags & O_NONBLOCK) != 0
1030					&& !dev->setup_out_ready) {
1031				retval = -EAGAIN;
1032				goto done;
1033			}
1034			spin_unlock_irq (&dev->lock);
1035			retval = wait_event_interruptible (dev->wait,
1036					dev->setup_out_ready != 0);
1037
1038			/* FIXME state could change from under us */
1039			spin_lock_irq (&dev->lock);
1040			if (retval)
1041				goto done;
1042
1043			if (dev->state != STATE_DEV_SETUP) {
1044				retval = -ECANCELED;
1045				goto done;
1046			}
1047			dev->state = STATE_DEV_CONNECTED;
1048
1049			if (dev->setup_out_error)
1050				retval = -EIO;
1051			else {
1052				len = min (len, (size_t)dev->req->actual);
1053// FIXME don't call this with the spinlock held ...
1054				if (copy_to_user (buf, dev->req->buf, len))
1055					retval = -EFAULT;
1056				else
1057					retval = len;
1058				clean_req (dev->gadget->ep0, dev->req);
1059				/* NOTE userspace can't yet choose to stall */
1060			}
1061		}
1062		goto done;
1063	}
1064
1065	/* else normal: return event data */
1066	if (len < sizeof dev->event [0]) {
1067		retval = -EINVAL;
1068		goto done;
1069	}
1070	len -= len % sizeof (struct usb_gadgetfs_event);
1071	dev->usermode_setup = 1;
1072
1073scan:
1074	/* return queued events right away */
1075	if (dev->ev_next != 0) {
1076		unsigned		i, n;
1077
1078		n = len / sizeof (struct usb_gadgetfs_event);
1079		if (dev->ev_next < n)
1080			n = dev->ev_next;
1081
1082		/* ep0 i/o has special semantics during STATE_DEV_SETUP */
1083		for (i = 0; i < n; i++) {
1084			if (dev->event [i].type == GADGETFS_SETUP) {
1085				dev->state = STATE_DEV_SETUP;
1086				n = i + 1;
1087				break;
1088			}
1089		}
1090		spin_unlock_irq (&dev->lock);
1091		len = n * sizeof (struct usb_gadgetfs_event);
1092		if (copy_to_user (buf, &dev->event, len))
1093			retval = -EFAULT;
1094		else
1095			retval = len;
1096		if (len > 0) {
1097			/* NOTE this doesn't guard against broken drivers;
1098			 * concurrent ep0 readers may lose events.
1099			 */
1100			spin_lock_irq (&dev->lock);
1101			if (dev->ev_next > n) {
1102				memmove(&dev->event[0], &dev->event[n],
1103					sizeof (struct usb_gadgetfs_event)
1104						* (dev->ev_next - n));
1105			}
1106			dev->ev_next -= n;
1107			spin_unlock_irq (&dev->lock);
1108		}
1109		return retval;
1110	}
1111	if (fd->f_flags & O_NONBLOCK) {
1112		retval = -EAGAIN;
1113		goto done;
1114	}
1115
1116	switch (state) {
1117	default:
1118		DBG (dev, "fail %s, state %d\n", __func__, state);
1119		retval = -ESRCH;
1120		break;
1121	case STATE_DEV_UNCONNECTED:
1122	case STATE_DEV_CONNECTED:
1123		spin_unlock_irq (&dev->lock);
1124		DBG (dev, "%s wait\n", __func__);
1125
1126		/* wait for events */
1127		retval = wait_event_interruptible (dev->wait,
1128				dev->ev_next != 0);
1129		if (retval < 0)
1130			return retval;
1131		spin_lock_irq (&dev->lock);
1132		goto scan;
1133	}
1134
1135done:
1136	spin_unlock_irq (&dev->lock);
1137	return retval;
1138}
1139
1140static struct usb_gadgetfs_event *
1141next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
1142{
1143	struct usb_gadgetfs_event	*event;
1144	unsigned			i;
1145
1146	switch (type) {
1147	/* these events purge the queue */
1148	case GADGETFS_DISCONNECT:
1149		if (dev->state == STATE_DEV_SETUP)
1150			dev->setup_abort = 1;
1151		// FALL THROUGH
1152	case GADGETFS_CONNECT:
1153		dev->ev_next = 0;
1154		break;
1155	case GADGETFS_SETUP:		/* previous request timed out */
1156	case GADGETFS_SUSPEND:		/* same effect */
1157		/* these events can't be repeated */
1158		for (i = 0; i != dev->ev_next; i++) {
1159			if (dev->event [i].type != type)
1160				continue;
1161			DBG(dev, "discard old event[%d] %d\n", i, type);
1162			dev->ev_next--;
1163			if (i == dev->ev_next)
1164				break;
1165			/* indices start at zero, for simplicity */
1166			memmove (&dev->event [i], &dev->event [i + 1],
1167				sizeof (struct usb_gadgetfs_event)
1168					* (dev->ev_next - i));
1169		}
1170		break;
1171	default:
1172		BUG ();
1173	}
1174	VDEBUG(dev, "event[%d] = %d\n", dev->ev_next, type);
1175	event = &dev->event [dev->ev_next++];
1176	BUG_ON (dev->ev_next > N_EVENT);
1177	memset (event, 0, sizeof *event);
1178	event->type = type;
1179	return event;
1180}
1181
1182static ssize_t
1183ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1184{
1185	struct dev_data		*dev = fd->private_data;
1186	ssize_t			retval = -ESRCH;
1187
1188	spin_lock_irq (&dev->lock);
1189
1190	/* report fd mode change before acting on it */
1191	if (dev->setup_abort) {
1192		dev->setup_abort = 0;
1193		retval = -EIDRM;
1194
1195	/* data and/or status stage for control request */
1196	} else if (dev->state == STATE_DEV_SETUP) {
1197
1198		/* IN DATA+STATUS caller makes len <= wLength */
1199		if (dev->setup_in) {
1200			retval = setup_req (dev->gadget->ep0, dev->req, len);
1201			if (retval == 0) {
1202				dev->state = STATE_DEV_CONNECTED;
1203				spin_unlock_irq (&dev->lock);
1204				if (copy_from_user (dev->req->buf, buf, len))
1205					retval = -EFAULT;
1206				else {
1207					if (len < dev->setup_wLength)
1208						dev->req->zero = 1;
1209					retval = usb_ep_queue (
1210						dev->gadget->ep0, dev->req,
1211						GFP_KERNEL);
1212				}
1213				if (retval < 0) {
1214					spin_lock_irq (&dev->lock);
1215					clean_req (dev->gadget->ep0, dev->req);
1216					spin_unlock_irq (&dev->lock);
1217				} else
1218					retval = len;
1219
1220				return retval;
1221			}
1222
1223		/* can stall some OUT transfers */
1224		} else if (dev->setup_can_stall) {
1225			VDEBUG(dev, "ep0out stall\n");
1226			(void) usb_ep_set_halt (dev->gadget->ep0);
1227			retval = -EL2HLT;
1228			dev->state = STATE_DEV_CONNECTED;
1229		} else {
1230			DBG(dev, "bogus ep0out stall!\n");
1231		}
1232	} else
1233		DBG (dev, "fail %s, state %d\n", __func__, dev->state);
1234
1235	spin_unlock_irq (&dev->lock);
1236	return retval;
1237}
1238
1239static int
1240ep0_fasync (int f, struct file *fd, int on)
1241{
1242	struct dev_data		*dev = fd->private_data;
1243	// caller must F_SETOWN before signal delivery happens
1244	VDEBUG (dev, "%s %s\n", __func__, on ? "on" : "off");
1245	return fasync_helper (f, fd, on, &dev->fasync);
1246}
1247
1248static struct usb_gadget_driver gadgetfs_driver;
1249
1250static int
1251dev_release (struct inode *inode, struct file *fd)
1252{
1253	struct dev_data		*dev = fd->private_data;
1254
1255	/* closing ep0 === shutdown all */
1256
1257	usb_gadget_unregister_driver (&gadgetfs_driver);
1258
1259	/* at this point "good" hardware has disconnected the
1260	 * device from USB; the host won't see it any more.
1261	 * alternatively, all host requests will time out.
1262	 */
1263
1264	kfree (dev->buf);
1265	dev->buf = NULL;
1266
1267	/* other endpoints were all decoupled from this device */
1268	spin_lock_irq(&dev->lock);
1269	dev->state = STATE_DEV_DISABLED;
1270	spin_unlock_irq(&dev->lock);
1271
1272	put_dev (dev);
1273	return 0;
1274}
1275
1276static unsigned int
1277ep0_poll (struct file *fd, poll_table *wait)
1278{
1279       struct dev_data         *dev = fd->private_data;
1280       int                     mask = 0;
1281
1282       poll_wait(fd, &dev->wait, wait);
1283
1284       spin_lock_irq (&dev->lock);
1285
1286       /* report fd mode change before acting on it */
1287       if (dev->setup_abort) {
1288               dev->setup_abort = 0;
1289               mask = POLLHUP;
1290               goto out;
1291       }
1292
1293       if (dev->state == STATE_DEV_SETUP) {
1294               if (dev->setup_in || dev->setup_can_stall)
1295                       mask = POLLOUT;
1296       } else {
1297               if (dev->ev_next != 0)
1298                       mask = POLLIN;
1299       }
1300out:
1301       spin_unlock_irq(&dev->lock);
1302       return mask;
1303}
1304
1305static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
1306{
1307	struct dev_data		*dev = fd->private_data;
1308	struct usb_gadget	*gadget = dev->gadget;
1309	long ret = -ENOTTY;
1310
1311	if (gadget->ops->ioctl)
1312		ret = gadget->ops->ioctl (gadget, code, value);
1313
1314	return ret;
1315}
1316
1317/* used after device configuration */
1318static const struct file_operations ep0_io_operations = {
1319	.owner =	THIS_MODULE,
1320	.llseek =	no_llseek,
1321
1322	.read =		ep0_read,
1323	.write =	ep0_write,
1324	.fasync =	ep0_fasync,
1325	.poll =		ep0_poll,
1326	.unlocked_ioctl =	dev_ioctl,
1327	.release =	dev_release,
1328};
1329
1330/*----------------------------------------------------------------------*/
1331
1332/* The in-kernel gadget driver handles most ep0 issues, in particular
1333 * enumerating the single configuration (as provided from user space).
1334 *
1335 * Unrecognized ep0 requests may be handled in user space.
1336 */
1337
1338static void make_qualifier (struct dev_data *dev)
1339{
1340	struct usb_qualifier_descriptor		qual;
1341	struct usb_device_descriptor		*desc;
1342
1343	qual.bLength = sizeof qual;
1344	qual.bDescriptorType = USB_DT_DEVICE_QUALIFIER;
1345	qual.bcdUSB = cpu_to_le16 (0x0200);
1346
1347	desc = dev->dev;
1348	qual.bDeviceClass = desc->bDeviceClass;
1349	qual.bDeviceSubClass = desc->bDeviceSubClass;
1350	qual.bDeviceProtocol = desc->bDeviceProtocol;
1351
1352	/* assumes ep0 uses the same value for both speeds ... */
1353	qual.bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1354
1355	qual.bNumConfigurations = 1;
1356	qual.bRESERVED = 0;
1357
1358	memcpy (dev->rbuf, &qual, sizeof qual);
1359}
1360
1361static int
1362config_buf (struct dev_data *dev, u8 type, unsigned index)
1363{
1364	int		len;
1365	int		hs = 0;
1366
1367	/* only one configuration */
1368	if (index > 0)
1369		return -EINVAL;
1370
1371	if (gadget_is_dualspeed(dev->gadget)) {
1372		hs = (dev->gadget->speed == USB_SPEED_HIGH);
1373		if (type == USB_DT_OTHER_SPEED_CONFIG)
1374			hs = !hs;
1375	}
1376	if (hs) {
1377		dev->req->buf = dev->hs_config;
1378		len = le16_to_cpu(dev->hs_config->wTotalLength);
1379	} else {
1380		dev->req->buf = dev->config;
1381		len = le16_to_cpu(dev->config->wTotalLength);
1382	}
1383	((u8 *)dev->req->buf) [1] = type;
1384	return len;
1385}
1386
1387static int
1388gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1389{
1390	struct dev_data			*dev = get_gadget_data (gadget);
1391	struct usb_request		*req = dev->req;
1392	int				value = -EOPNOTSUPP;
1393	struct usb_gadgetfs_event	*event;
1394	u16				w_value = le16_to_cpu(ctrl->wValue);
1395	u16				w_length = le16_to_cpu(ctrl->wLength);
1396
1397	spin_lock (&dev->lock);
1398	dev->setup_abort = 0;
1399	if (dev->state == STATE_DEV_UNCONNECTED) {
1400		if (gadget_is_dualspeed(gadget)
1401				&& gadget->speed == USB_SPEED_HIGH
1402				&& dev->hs_config == NULL) {
1403			spin_unlock(&dev->lock);
1404			ERROR (dev, "no high speed config??\n");
1405			return -EINVAL;
1406		}
1407
1408		dev->state = STATE_DEV_CONNECTED;
1409
1410		INFO (dev, "connected\n");
1411		event = next_event (dev, GADGETFS_CONNECT);
1412		event->u.speed = gadget->speed;
1413		ep0_readable (dev);
1414
1415	/* host may have given up waiting for response.  we can miss control
1416	 * requests handled lower down (device/endpoint status and features);
1417	 * then ep0_{read,write} will report the wrong status. controller
1418	 * driver will have aborted pending i/o.
1419	 */
1420	} else if (dev->state == STATE_DEV_SETUP)
1421		dev->setup_abort = 1;
1422
1423	req->buf = dev->rbuf;
1424	req->context = NULL;
1425	value = -EOPNOTSUPP;
1426	switch (ctrl->bRequest) {
1427
1428	case USB_REQ_GET_DESCRIPTOR:
1429		if (ctrl->bRequestType != USB_DIR_IN)
1430			goto unrecognized;
1431		switch (w_value >> 8) {
1432
1433		case USB_DT_DEVICE:
1434			value = min (w_length, (u16) sizeof *dev->dev);
1435			dev->dev->bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1436			req->buf = dev->dev;
1437			break;
1438		case USB_DT_DEVICE_QUALIFIER:
1439			if (!dev->hs_config)
1440				break;
1441			value = min (w_length, (u16)
1442				sizeof (struct usb_qualifier_descriptor));
1443			make_qualifier (dev);
1444			break;
1445		case USB_DT_OTHER_SPEED_CONFIG:
1446			// FALLTHROUGH
1447		case USB_DT_CONFIG:
1448			value = config_buf (dev,
1449					w_value >> 8,
1450					w_value & 0xff);
1451			if (value >= 0)
1452				value = min (w_length, (u16) value);
1453			break;
1454		case USB_DT_STRING:
1455			goto unrecognized;
1456
1457		default:		// all others are errors
1458			break;
1459		}
1460		break;
1461
1462	/* currently one config, two speeds */
1463	case USB_REQ_SET_CONFIGURATION:
1464		if (ctrl->bRequestType != 0)
1465			goto unrecognized;
1466		if (0 == (u8) w_value) {
1467			value = 0;
1468			dev->current_config = 0;
1469			usb_gadget_vbus_draw(gadget, 8 /* mA */ );
1470			// user mode expected to disable endpoints
1471		} else {
1472			u8	config, power;
1473
1474			if (gadget_is_dualspeed(gadget)
1475					&& gadget->speed == USB_SPEED_HIGH) {
1476				config = dev->hs_config->bConfigurationValue;
1477				power = dev->hs_config->bMaxPower;
1478			} else {
1479				config = dev->config->bConfigurationValue;
1480				power = dev->config->bMaxPower;
1481			}
1482
1483			if (config == (u8) w_value) {
1484				value = 0;
1485				dev->current_config = config;
1486				usb_gadget_vbus_draw(gadget, 2 * power);
1487			}
1488		}
1489
1490		/* report SET_CONFIGURATION like any other control request,
1491		 * except that usermode may not stall this.  the next
1492		 * request mustn't be allowed start until this finishes:
1493		 * endpoints and threads set up, etc.
1494		 *
1495		 * NOTE:  older PXA hardware (before PXA 255: without UDCCFR)
1496		 * has bad/racey automagic that prevents synchronizing here.
1497		 * even kernel mode drivers often miss them.
1498		 */
1499		if (value == 0) {
1500			INFO (dev, "configuration #%d\n", dev->current_config);
1501			usb_gadget_set_state(gadget, USB_STATE_CONFIGURED);
1502			if (dev->usermode_setup) {
1503				dev->setup_can_stall = 0;
1504				goto delegate;
1505			}
1506		}
1507		break;
1508
1509#ifndef	CONFIG_USB_PXA25X
1510	/* PXA automagically handles this request too */
1511	case USB_REQ_GET_CONFIGURATION:
1512		if (ctrl->bRequestType != 0x80)
1513			goto unrecognized;
1514		*(u8 *)req->buf = dev->current_config;
1515		value = min (w_length, (u16) 1);
1516		break;
1517#endif
1518
1519	default:
1520unrecognized:
1521		VDEBUG (dev, "%s req%02x.%02x v%04x i%04x l%d\n",
1522			dev->usermode_setup ? "delegate" : "fail",
1523			ctrl->bRequestType, ctrl->bRequest,
1524			w_value, le16_to_cpu(ctrl->wIndex), w_length);
1525
1526		/* if there's an ep0 reader, don't stall */
1527		if (dev->usermode_setup) {
1528			dev->setup_can_stall = 1;
1529delegate:
1530			dev->setup_in = (ctrl->bRequestType & USB_DIR_IN)
1531						? 1 : 0;
1532			dev->setup_wLength = w_length;
1533			dev->setup_out_ready = 0;
1534			dev->setup_out_error = 0;
1535			value = 0;
1536
1537			/* read DATA stage for OUT right away */
1538			if (unlikely (!dev->setup_in && w_length)) {
1539				value = setup_req (gadget->ep0, dev->req,
1540							w_length);
1541				if (value < 0)
1542					break;
1543				value = usb_ep_queue (gadget->ep0, dev->req,
1544							GFP_ATOMIC);
1545				if (value < 0) {
1546					clean_req (gadget->ep0, dev->req);
1547					break;
1548				}
1549
1550				/* we can't currently stall these */
1551				dev->setup_can_stall = 0;
1552			}
1553
1554			/* state changes when reader collects event */
1555			event = next_event (dev, GADGETFS_SETUP);
1556			event->u.setup = *ctrl;
1557			ep0_readable (dev);
1558			spin_unlock (&dev->lock);
1559			return 0;
1560		}
1561	}
1562
1563	/* proceed with data transfer and status phases? */
1564	if (value >= 0 && dev->state != STATE_DEV_SETUP) {
1565		req->length = value;
1566		req->zero = value < w_length;
1567		value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
1568		if (value < 0) {
1569			DBG (dev, "ep_queue --> %d\n", value);
1570			req->status = 0;
1571		}
1572	}
1573
1574	/* device stalls when value < 0 */
1575	spin_unlock (&dev->lock);
1576	return value;
1577}
1578
1579static void destroy_ep_files (struct dev_data *dev)
1580{
1581	DBG (dev, "%s %d\n", __func__, dev->state);
1582
1583	/* dev->state must prevent interference */
1584	spin_lock_irq (&dev->lock);
1585	while (!list_empty(&dev->epfiles)) {
1586		struct ep_data	*ep;
1587		struct inode	*parent;
1588		struct dentry	*dentry;
1589
1590		/* break link to FS */
1591		ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
1592		list_del_init (&ep->epfiles);
1593		dentry = ep->dentry;
1594		ep->dentry = NULL;
1595		parent = dentry->d_parent->d_inode;
1596
1597		/* break link to controller */
1598		if (ep->state == STATE_EP_ENABLED)
1599			(void) usb_ep_disable (ep->ep);
1600		ep->state = STATE_EP_UNBOUND;
1601		usb_ep_free_request (ep->ep, ep->req);
1602		ep->ep = NULL;
1603		wake_up (&ep->wait);
1604		put_ep (ep);
1605
1606		spin_unlock_irq (&dev->lock);
1607
1608		/* break link to dcache */
1609		mutex_lock (&parent->i_mutex);
1610		d_delete (dentry);
1611		dput (dentry);
1612		mutex_unlock (&parent->i_mutex);
1613
1614		spin_lock_irq (&dev->lock);
1615	}
1616	spin_unlock_irq (&dev->lock);
1617}
1618
1619
1620static struct dentry *
1621gadgetfs_create_file (struct super_block *sb, char const *name,
1622		void *data, const struct file_operations *fops);
1623
1624static int activate_ep_files (struct dev_data *dev)
1625{
1626	struct usb_ep	*ep;
1627	struct ep_data	*data;
1628
1629	gadget_for_each_ep (ep, dev->gadget) {
1630
1631		data = kzalloc(sizeof(*data), GFP_KERNEL);
1632		if (!data)
1633			goto enomem0;
1634		data->state = STATE_EP_DISABLED;
1635		mutex_init(&data->lock);
1636		init_waitqueue_head (&data->wait);
1637
1638		strncpy (data->name, ep->name, sizeof (data->name) - 1);
1639		atomic_set (&data->count, 1);
1640		data->dev = dev;
1641		get_dev (dev);
1642
1643		data->ep = ep;
1644		ep->driver_data = data;
1645
1646		data->req = usb_ep_alloc_request (ep, GFP_KERNEL);
1647		if (!data->req)
1648			goto enomem1;
1649
1650		data->dentry = gadgetfs_create_file (dev->sb, data->name,
1651				data, &ep_config_operations);
1652		if (!data->dentry)
1653			goto enomem2;
1654		list_add_tail (&data->epfiles, &dev->epfiles);
1655	}
1656	return 0;
1657
1658enomem2:
1659	usb_ep_free_request (ep, data->req);
1660enomem1:
1661	put_dev (dev);
1662	kfree (data);
1663enomem0:
1664	DBG (dev, "%s enomem\n", __func__);
1665	destroy_ep_files (dev);
1666	return -ENOMEM;
1667}
1668
1669static void
1670gadgetfs_unbind (struct usb_gadget *gadget)
1671{
1672	struct dev_data		*dev = get_gadget_data (gadget);
1673
1674	DBG (dev, "%s\n", __func__);
1675
1676	spin_lock_irq (&dev->lock);
1677	dev->state = STATE_DEV_UNBOUND;
1678	spin_unlock_irq (&dev->lock);
1679
1680	destroy_ep_files (dev);
1681	gadget->ep0->driver_data = NULL;
1682	set_gadget_data (gadget, NULL);
1683
1684	/* we've already been disconnected ... no i/o is active */
1685	if (dev->req)
1686		usb_ep_free_request (gadget->ep0, dev->req);
1687	DBG (dev, "%s done\n", __func__);
1688	put_dev (dev);
1689}
1690
1691static struct dev_data		*the_device;
1692
1693static int gadgetfs_bind(struct usb_gadget *gadget,
1694		struct usb_gadget_driver *driver)
1695{
1696	struct dev_data		*dev = the_device;
1697
1698	if (!dev)
1699		return -ESRCH;
1700	if (0 != strcmp (CHIP, gadget->name)) {
1701		pr_err("%s expected %s controller not %s\n",
1702			shortname, CHIP, gadget->name);
1703		return -ENODEV;
1704	}
1705
1706	set_gadget_data (gadget, dev);
1707	dev->gadget = gadget;
1708	gadget->ep0->driver_data = dev;
1709
1710	/* preallocate control response and buffer */
1711	dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
1712	if (!dev->req)
1713		goto enomem;
1714	dev->req->context = NULL;
1715	dev->req->complete = epio_complete;
1716
1717	if (activate_ep_files (dev) < 0)
1718		goto enomem;
1719
1720	INFO (dev, "bound to %s driver\n", gadget->name);
1721	spin_lock_irq(&dev->lock);
1722	dev->state = STATE_DEV_UNCONNECTED;
1723	spin_unlock_irq(&dev->lock);
1724	get_dev (dev);
1725	return 0;
1726
1727enomem:
1728	gadgetfs_unbind (gadget);
1729	return -ENOMEM;
1730}
1731
1732static void
1733gadgetfs_disconnect (struct usb_gadget *gadget)
1734{
1735	struct dev_data		*dev = get_gadget_data (gadget);
1736	unsigned long		flags;
1737
1738	spin_lock_irqsave (&dev->lock, flags);
1739	if (dev->state == STATE_DEV_UNCONNECTED)
1740		goto exit;
1741	dev->state = STATE_DEV_UNCONNECTED;
1742
1743	INFO (dev, "disconnected\n");
1744	next_event (dev, GADGETFS_DISCONNECT);
1745	ep0_readable (dev);
1746exit:
1747	spin_unlock_irqrestore (&dev->lock, flags);
1748}
1749
1750static void
1751gadgetfs_suspend (struct usb_gadget *gadget)
1752{
1753	struct dev_data		*dev = get_gadget_data (gadget);
1754
1755	INFO (dev, "suspended from state %d\n", dev->state);
1756	spin_lock (&dev->lock);
1757	switch (dev->state) {
1758	case STATE_DEV_SETUP:		// VERY odd... host died??
1759	case STATE_DEV_CONNECTED:
1760	case STATE_DEV_UNCONNECTED:
1761		next_event (dev, GADGETFS_SUSPEND);
1762		ep0_readable (dev);
1763		/* FALLTHROUGH */
1764	default:
1765		break;
1766	}
1767	spin_unlock (&dev->lock);
1768}
1769
1770static struct usb_gadget_driver gadgetfs_driver = {
1771	.function	= (char *) driver_desc,
1772	.bind		= gadgetfs_bind,
1773	.unbind		= gadgetfs_unbind,
1774	.setup		= gadgetfs_setup,
1775	.reset		= gadgetfs_disconnect,
1776	.disconnect	= gadgetfs_disconnect,
1777	.suspend	= gadgetfs_suspend,
1778
1779	.driver	= {
1780		.name		= (char *) shortname,
1781	},
1782};
1783
1784/*----------------------------------------------------------------------*/
1785
1786static void gadgetfs_nop(struct usb_gadget *arg) { }
1787
1788static int gadgetfs_probe(struct usb_gadget *gadget,
1789		struct usb_gadget_driver *driver)
1790{
1791	CHIP = gadget->name;
1792	return -EISNAM;
1793}
1794
1795static struct usb_gadget_driver probe_driver = {
1796	.max_speed	= USB_SPEED_HIGH,
1797	.bind		= gadgetfs_probe,
1798	.unbind		= gadgetfs_nop,
1799	.setup		= (void *)gadgetfs_nop,
1800	.disconnect	= gadgetfs_nop,
1801	.driver	= {
1802		.name		= "nop",
1803	},
1804};
1805
1806
1807/* DEVICE INITIALIZATION
1808 *
1809 *     fd = open ("/dev/gadget/$CHIP", O_RDWR)
1810 *     status = write (fd, descriptors, sizeof descriptors)
1811 *
1812 * That write establishes the device configuration, so the kernel can
1813 * bind to the controller ... guaranteeing it can handle enumeration
1814 * at all necessary speeds.  Descriptor order is:
1815 *
1816 * . message tag (u32, host order) ... for now, must be zero; it
1817 *	would change to support features like multi-config devices
1818 * . full/low speed config ... all wTotalLength bytes (with interface,
1819 *	class, altsetting, endpoint, and other descriptors)
1820 * . high speed config ... all descriptors, for high speed operation;
1821 *	this one's optional except for high-speed hardware
1822 * . device descriptor
1823 *
1824 * Endpoints are not yet enabled. Drivers must wait until device
1825 * configuration and interface altsetting changes create
1826 * the need to configure (or unconfigure) them.
1827 *
1828 * After initialization, the device stays active for as long as that
1829 * $CHIP file is open.  Events must then be read from that descriptor,
1830 * such as configuration notifications.
1831 */
1832
1833static int is_valid_config (struct usb_config_descriptor *config)
1834{
1835	return config->bDescriptorType == USB_DT_CONFIG
1836		&& config->bLength == USB_DT_CONFIG_SIZE
1837		&& config->bConfigurationValue != 0
1838		&& (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
1839		&& (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
1840	/* FIXME if gadget->is_otg, _must_ include an otg descriptor */
1841	/* FIXME check lengths: walk to end */
1842}
1843
1844static ssize_t
1845dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1846{
1847	struct dev_data		*dev = fd->private_data;
1848	ssize_t			value = len, length = len;
1849	unsigned		total;
1850	u32			tag;
1851	char			*kbuf;
1852
1853	if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
1854		return -EINVAL;
1855
1856	/* we might need to change message format someday */
1857	if (copy_from_user (&tag, buf, 4))
1858		return -EFAULT;
1859	if (tag != 0)
1860		return -EINVAL;
1861	buf += 4;
1862	length -= 4;
1863
1864	kbuf = memdup_user(buf, length);
1865	if (IS_ERR(kbuf))
1866		return PTR_ERR(kbuf);
1867
1868	spin_lock_irq (&dev->lock);
1869	value = -EINVAL;
1870	if (dev->buf)
1871		goto fail;
1872	dev->buf = kbuf;
1873
1874	/* full or low speed config */
1875	dev->config = (void *) kbuf;
1876	total = le16_to_cpu(dev->config->wTotalLength);
1877	if (!is_valid_config (dev->config) || total >= length)
1878		goto fail;
1879	kbuf += total;
1880	length -= total;
1881
1882	/* optional high speed config */
1883	if (kbuf [1] == USB_DT_CONFIG) {
1884		dev->hs_config = (void *) kbuf;
1885		total = le16_to_cpu(dev->hs_config->wTotalLength);
1886		if (!is_valid_config (dev->hs_config) || total >= length)
1887			goto fail;
1888		kbuf += total;
1889		length -= total;
1890	}
1891
1892	/* could support multiple configs, using another encoding! */
1893
1894	/* device descriptor (tweaked for paranoia) */
1895	if (length != USB_DT_DEVICE_SIZE)
1896		goto fail;
1897	dev->dev = (void *)kbuf;
1898	if (dev->dev->bLength != USB_DT_DEVICE_SIZE
1899			|| dev->dev->bDescriptorType != USB_DT_DEVICE
1900			|| dev->dev->bNumConfigurations != 1)
1901		goto fail;
1902	dev->dev->bNumConfigurations = 1;
1903	dev->dev->bcdUSB = cpu_to_le16 (0x0200);
1904
1905	/* triggers gadgetfs_bind(); then we can enumerate. */
1906	spin_unlock_irq (&dev->lock);
1907	if (dev->hs_config)
1908		gadgetfs_driver.max_speed = USB_SPEED_HIGH;
1909	else
1910		gadgetfs_driver.max_speed = USB_SPEED_FULL;
1911
1912	value = usb_gadget_probe_driver(&gadgetfs_driver);
1913	if (value != 0) {
1914		kfree (dev->buf);
1915		dev->buf = NULL;
1916	} else {
1917		/* at this point "good" hardware has for the first time
1918		 * let the USB the host see us.  alternatively, if users
1919		 * unplug/replug that will clear all the error state.
1920		 *
1921		 * note:  everything running before here was guaranteed
1922		 * to choke driver model style diagnostics.  from here
1923		 * on, they can work ... except in cleanup paths that
1924		 * kick in after the ep0 descriptor is closed.
1925		 */
1926		fd->f_op = &ep0_io_operations;
1927		value = len;
1928	}
1929	return value;
1930
1931fail:
1932	spin_unlock_irq (&dev->lock);
1933	pr_debug ("%s: %s fail %Zd, %p\n", shortname, __func__, value, dev);
1934	kfree (dev->buf);
1935	dev->buf = NULL;
1936	return value;
1937}
1938
1939static int
1940dev_open (struct inode *inode, struct file *fd)
1941{
1942	struct dev_data		*dev = inode->i_private;
1943	int			value = -EBUSY;
1944
1945	spin_lock_irq(&dev->lock);
1946	if (dev->state == STATE_DEV_DISABLED) {
1947		dev->ev_next = 0;
1948		dev->state = STATE_DEV_OPENED;
1949		fd->private_data = dev;
1950		get_dev (dev);
1951		value = 0;
1952	}
1953	spin_unlock_irq(&dev->lock);
1954	return value;
1955}
1956
1957static const struct file_operations dev_init_operations = {
1958	.llseek =	no_llseek,
1959
1960	.open =		dev_open,
1961	.write =	dev_config,
1962	.fasync =	ep0_fasync,
1963	.unlocked_ioctl = dev_ioctl,
1964	.release =	dev_release,
1965};
1966
1967/*----------------------------------------------------------------------*/
1968
1969/* FILESYSTEM AND SUPERBLOCK OPERATIONS
1970 *
1971 * Mounting the filesystem creates a controller file, used first for
1972 * device configuration then later for event monitoring.
1973 */
1974
1975
1976/* FIXME PAM etc could set this security policy without mount options
1977 * if epfiles inherited ownership and permissons from ep0 ...
1978 */
1979
1980static unsigned default_uid;
1981static unsigned default_gid;
1982static unsigned default_perm = S_IRUSR | S_IWUSR;
1983
1984module_param (default_uid, uint, 0644);
1985module_param (default_gid, uint, 0644);
1986module_param (default_perm, uint, 0644);
1987
1988
1989static struct inode *
1990gadgetfs_make_inode (struct super_block *sb,
1991		void *data, const struct file_operations *fops,
1992		int mode)
1993{
1994	struct inode *inode = new_inode (sb);
1995
1996	if (inode) {
1997		inode->i_ino = get_next_ino();
1998		inode->i_mode = mode;
1999		inode->i_uid = make_kuid(&init_user_ns, default_uid);
2000		inode->i_gid = make_kgid(&init_user_ns, default_gid);
2001		inode->i_atime = inode->i_mtime = inode->i_ctime
2002				= CURRENT_TIME;
2003		inode->i_private = data;
2004		inode->i_fop = fops;
2005	}
2006	return inode;
2007}
2008
2009/* creates in fs root directory, so non-renamable and non-linkable.
2010 * so inode and dentry are paired, until device reconfig.
2011 */
2012static struct dentry *
2013gadgetfs_create_file (struct super_block *sb, char const *name,
2014		void *data, const struct file_operations *fops)
2015{
2016	struct dentry	*dentry;
2017	struct inode	*inode;
2018
2019	dentry = d_alloc_name(sb->s_root, name);
2020	if (!dentry)
2021		return NULL;
2022
2023	inode = gadgetfs_make_inode (sb, data, fops,
2024			S_IFREG | (default_perm & S_IRWXUGO));
2025	if (!inode) {
2026		dput(dentry);
2027		return NULL;
2028	}
2029	d_add (dentry, inode);
2030	return dentry;
2031}
2032
2033static const struct super_operations gadget_fs_operations = {
2034	.statfs =	simple_statfs,
2035	.drop_inode =	generic_delete_inode,
2036};
2037
2038static int
2039gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
2040{
2041	struct inode	*inode;
2042	struct dev_data	*dev;
2043
2044	if (the_device)
2045		return -ESRCH;
2046
2047	/* fake probe to determine $CHIP */
2048	CHIP = NULL;
2049	usb_gadget_probe_driver(&probe_driver);
2050	if (!CHIP)
2051		return -ENODEV;
2052
2053	/* superblock */
2054	sb->s_blocksize = PAGE_CACHE_SIZE;
2055	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2056	sb->s_magic = GADGETFS_MAGIC;
2057	sb->s_op = &gadget_fs_operations;
2058	sb->s_time_gran = 1;
2059
2060	/* root inode */
2061	inode = gadgetfs_make_inode (sb,
2062			NULL, &simple_dir_operations,
2063			S_IFDIR | S_IRUGO | S_IXUGO);
2064	if (!inode)
2065		goto Enomem;
2066	inode->i_op = &simple_dir_inode_operations;
2067	if (!(sb->s_root = d_make_root (inode)))
2068		goto Enomem;
2069
2070	/* the ep0 file is named after the controller we expect;
2071	 * user mode code can use it for sanity checks, like we do.
2072	 */
2073	dev = dev_new ();
2074	if (!dev)
2075		goto Enomem;
2076
2077	dev->sb = sb;
2078	dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &dev_init_operations);
2079	if (!dev->dentry) {
2080		put_dev(dev);
2081		goto Enomem;
2082	}
2083
2084	/* other endpoint files are available after hardware setup,
2085	 * from binding to a controller.
2086	 */
2087	the_device = dev;
2088	return 0;
2089
2090Enomem:
2091	return -ENOMEM;
2092}
2093
2094/* "mount -t gadgetfs path /dev/gadget" ends up here */
2095static struct dentry *
2096gadgetfs_mount (struct file_system_type *t, int flags,
2097		const char *path, void *opts)
2098{
2099	return mount_single (t, flags, opts, gadgetfs_fill_super);
2100}
2101
2102static void
2103gadgetfs_kill_sb (struct super_block *sb)
2104{
2105	kill_litter_super (sb);
2106	if (the_device) {
2107		put_dev (the_device);
2108		the_device = NULL;
2109	}
2110}
2111
2112/*----------------------------------------------------------------------*/
2113
2114static struct file_system_type gadgetfs_type = {
2115	.owner		= THIS_MODULE,
2116	.name		= shortname,
2117	.mount		= gadgetfs_mount,
2118	.kill_sb	= gadgetfs_kill_sb,
2119};
2120MODULE_ALIAS_FS("gadgetfs");
2121
2122/*----------------------------------------------------------------------*/
2123
2124static int __init init (void)
2125{
2126	int status;
2127
2128	status = register_filesystem (&gadgetfs_type);
2129	if (status == 0)
2130		pr_info ("%s: %s, version " DRIVER_VERSION "\n",
2131			shortname, driver_desc);
2132	return status;
2133}
2134module_init (init);
2135
2136static void __exit cleanup (void)
2137{
2138	pr_debug ("unregister %s\n", shortname);
2139	unregister_filesystem (&gadgetfs_type);
2140}
2141module_exit (cleanup);
2142
2143