[go: nahoru, domu]

1/*
2 * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
3 * Author: Chao Xie <chao.xie@marvell.com>
4 *	   Neil Zhang <zhangwm@marvell.com>
5 *
6 * This program is free software; you can redistribute  it and/or modify it
7 * under  the terms of  the GNU General  Public License as published by the
8 * Free Software Foundation;  either version 2 of the  License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/dmapool.h>
16#include <linux/kernel.h>
17#include <linux/delay.h>
18#include <linux/ioport.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/timer.h>
24#include <linux/list.h>
25#include <linux/interrupt.h>
26#include <linux/moduleparam.h>
27#include <linux/device.h>
28#include <linux/usb/ch9.h>
29#include <linux/usb/gadget.h>
30#include <linux/usb/otg.h>
31#include <linux/pm.h>
32#include <linux/io.h>
33#include <linux/irq.h>
34#include <linux/platform_device.h>
35#include <linux/clk.h>
36#include <linux/platform_data/mv_usb.h>
37#include <asm/unaligned.h>
38
39#include "mv_udc.h"
40
41#define DRIVER_DESC		"Marvell PXA USB Device Controller driver"
42#define DRIVER_VERSION		"8 Nov 2010"
43
44#define ep_dir(ep)	(((ep)->ep_num == 0) ? \
45				((ep)->udc->ep0_dir) : ((ep)->direction))
46
47/* timeout value -- usec */
48#define RESET_TIMEOUT		10000
49#define FLUSH_TIMEOUT		10000
50#define EPSTATUS_TIMEOUT	10000
51#define PRIME_TIMEOUT		10000
52#define READSAFE_TIMEOUT	1000
53
54#define LOOPS_USEC_SHIFT	1
55#define LOOPS_USEC		(1 << LOOPS_USEC_SHIFT)
56#define LOOPS(timeout)		((timeout) >> LOOPS_USEC_SHIFT)
57
58static DECLARE_COMPLETION(release_done);
59
60static const char driver_name[] = "mv_udc";
61static const char driver_desc[] = DRIVER_DESC;
62
63static void nuke(struct mv_ep *ep, int status);
64static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
65
66/* for endpoint 0 operations */
67static const struct usb_endpoint_descriptor mv_ep0_desc = {
68	.bLength =		USB_DT_ENDPOINT_SIZE,
69	.bDescriptorType =	USB_DT_ENDPOINT,
70	.bEndpointAddress =	0,
71	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
72	.wMaxPacketSize =	EP0_MAX_PKT_SIZE,
73};
74
75static void ep0_reset(struct mv_udc *udc)
76{
77	struct mv_ep *ep;
78	u32 epctrlx;
79	int i = 0;
80
81	/* ep0 in and out */
82	for (i = 0; i < 2; i++) {
83		ep = &udc->eps[i];
84		ep->udc = udc;
85
86		/* ep0 dQH */
87		ep->dqh = &udc->ep_dqh[i];
88
89		/* configure ep0 endpoint capabilities in dQH */
90		ep->dqh->max_packet_length =
91			(EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
92			| EP_QUEUE_HEAD_IOS;
93
94		ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
95
96		epctrlx = readl(&udc->op_regs->epctrlx[0]);
97		if (i) {	/* TX */
98			epctrlx |= EPCTRL_TX_ENABLE
99				| (USB_ENDPOINT_XFER_CONTROL
100					<< EPCTRL_TX_EP_TYPE_SHIFT);
101
102		} else {	/* RX */
103			epctrlx |= EPCTRL_RX_ENABLE
104				| (USB_ENDPOINT_XFER_CONTROL
105					<< EPCTRL_RX_EP_TYPE_SHIFT);
106		}
107
108		writel(epctrlx, &udc->op_regs->epctrlx[0]);
109	}
110}
111
112/* protocol ep0 stall, will automatically be cleared on new transaction */
113static void ep0_stall(struct mv_udc *udc)
114{
115	u32	epctrlx;
116
117	/* set TX and RX to stall */
118	epctrlx = readl(&udc->op_regs->epctrlx[0]);
119	epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
120	writel(epctrlx, &udc->op_regs->epctrlx[0]);
121
122	/* update ep0 state */
123	udc->ep0_state = WAIT_FOR_SETUP;
124	udc->ep0_dir = EP_DIR_OUT;
125}
126
127static int process_ep_req(struct mv_udc *udc, int index,
128	struct mv_req *curr_req)
129{
130	struct mv_dtd	*curr_dtd;
131	struct mv_dqh	*curr_dqh;
132	int td_complete, actual, remaining_length;
133	int i, direction;
134	int retval = 0;
135	u32 errors;
136	u32 bit_pos;
137
138	curr_dqh = &udc->ep_dqh[index];
139	direction = index % 2;
140
141	curr_dtd = curr_req->head;
142	td_complete = 0;
143	actual = curr_req->req.length;
144
145	for (i = 0; i < curr_req->dtd_count; i++) {
146		if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
147			dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
148				udc->eps[index].name);
149			return 1;
150		}
151
152		errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
153		if (!errors) {
154			remaining_length =
155				(curr_dtd->size_ioc_sts	& DTD_PACKET_SIZE)
156					>> DTD_LENGTH_BIT_POS;
157			actual -= remaining_length;
158
159			if (remaining_length) {
160				if (direction) {
161					dev_dbg(&udc->dev->dev,
162						"TX dTD remains data\n");
163					retval = -EPROTO;
164					break;
165				} else
166					break;
167			}
168		} else {
169			dev_info(&udc->dev->dev,
170				"complete_tr error: ep=%d %s: error = 0x%x\n",
171				index >> 1, direction ? "SEND" : "RECV",
172				errors);
173			if (errors & DTD_STATUS_HALTED) {
174				/* Clear the errors and Halt condition */
175				curr_dqh->size_ioc_int_sts &= ~errors;
176				retval = -EPIPE;
177			} else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
178				retval = -EPROTO;
179			} else if (errors & DTD_STATUS_TRANSACTION_ERR) {
180				retval = -EILSEQ;
181			}
182		}
183		if (i != curr_req->dtd_count - 1)
184			curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
185	}
186	if (retval)
187		return retval;
188
189	if (direction == EP_DIR_OUT)
190		bit_pos = 1 << curr_req->ep->ep_num;
191	else
192		bit_pos = 1 << (16 + curr_req->ep->ep_num);
193
194	while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
195		if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
196			while (readl(&udc->op_regs->epstatus) & bit_pos)
197				udelay(1);
198			break;
199		}
200		udelay(1);
201	}
202
203	curr_req->req.actual = actual;
204
205	return 0;
206}
207
208/*
209 * done() - retire a request; caller blocked irqs
210 * @status : request status to be set, only works when
211 * request is still in progress.
212 */
213static void done(struct mv_ep *ep, struct mv_req *req, int status)
214	__releases(&ep->udc->lock)
215	__acquires(&ep->udc->lock)
216{
217	struct mv_udc *udc = NULL;
218	unsigned char stopped = ep->stopped;
219	struct mv_dtd *curr_td, *next_td;
220	int j;
221
222	udc = (struct mv_udc *)ep->udc;
223	/* Removed the req from fsl_ep->queue */
224	list_del_init(&req->queue);
225
226	/* req.status should be set as -EINPROGRESS in ep_queue() */
227	if (req->req.status == -EINPROGRESS)
228		req->req.status = status;
229	else
230		status = req->req.status;
231
232	/* Free dtd for the request */
233	next_td = req->head;
234	for (j = 0; j < req->dtd_count; j++) {
235		curr_td = next_td;
236		if (j != req->dtd_count - 1)
237			next_td = curr_td->next_dtd_virt;
238		dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
239	}
240
241	usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
242
243	if (status && (status != -ESHUTDOWN))
244		dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
245			ep->ep.name, &req->req, status,
246			req->req.actual, req->req.length);
247
248	ep->stopped = 1;
249
250	spin_unlock(&ep->udc->lock);
251
252	usb_gadget_giveback_request(&ep->ep, &req->req);
253
254	spin_lock(&ep->udc->lock);
255	ep->stopped = stopped;
256}
257
258static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
259{
260	struct mv_udc *udc;
261	struct mv_dqh *dqh;
262	u32 bit_pos, direction;
263	u32 usbcmd, epstatus;
264	unsigned int loops;
265	int retval = 0;
266
267	udc = ep->udc;
268	direction = ep_dir(ep);
269	dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
270	bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
271
272	/* check if the pipe is empty */
273	if (!(list_empty(&ep->queue))) {
274		struct mv_req *lastreq;
275		lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
276		lastreq->tail->dtd_next =
277			req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
278
279		wmb();
280
281		if (readl(&udc->op_regs->epprime) & bit_pos)
282			goto done;
283
284		loops = LOOPS(READSAFE_TIMEOUT);
285		while (1) {
286			/* start with setting the semaphores */
287			usbcmd = readl(&udc->op_regs->usbcmd);
288			usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
289			writel(usbcmd, &udc->op_regs->usbcmd);
290
291			/* read the endpoint status */
292			epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
293
294			/*
295			 * Reread the ATDTW semaphore bit to check if it is
296			 * cleared. When hardware see a hazard, it will clear
297			 * the bit or else we remain set to 1 and we can
298			 * proceed with priming of endpoint if not already
299			 * primed.
300			 */
301			if (readl(&udc->op_regs->usbcmd)
302				& USBCMD_ATDTW_TRIPWIRE_SET)
303				break;
304
305			loops--;
306			if (loops == 0) {
307				dev_err(&udc->dev->dev,
308					"Timeout for ATDTW_TRIPWIRE...\n");
309				retval = -ETIME;
310				goto done;
311			}
312			udelay(LOOPS_USEC);
313		}
314
315		/* Clear the semaphore */
316		usbcmd = readl(&udc->op_regs->usbcmd);
317		usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
318		writel(usbcmd, &udc->op_regs->usbcmd);
319
320		if (epstatus)
321			goto done;
322	}
323
324	/* Write dQH next pointer and terminate bit to 0 */
325	dqh->next_dtd_ptr = req->head->td_dma
326				& EP_QUEUE_HEAD_NEXT_POINTER_MASK;
327
328	/* clear active and halt bit, in case set from a previous error */
329	dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
330
331	/* Ensure that updates to the QH will occur before priming. */
332	wmb();
333
334	/* Prime the Endpoint */
335	writel(bit_pos, &udc->op_regs->epprime);
336
337done:
338	return retval;
339}
340
341static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
342		dma_addr_t *dma, int *is_last)
343{
344	struct mv_dtd *dtd;
345	struct mv_udc *udc;
346	struct mv_dqh *dqh;
347	u32 temp, mult = 0;
348
349	/* how big will this transfer be? */
350	if (usb_endpoint_xfer_isoc(req->ep->ep.desc)) {
351		dqh = req->ep->dqh;
352		mult = (dqh->max_packet_length >> EP_QUEUE_HEAD_MULT_POS)
353				& 0x3;
354		*length = min(req->req.length - req->req.actual,
355				(unsigned)(mult * req->ep->ep.maxpacket));
356	} else
357		*length = min(req->req.length - req->req.actual,
358				(unsigned)EP_MAX_LENGTH_TRANSFER);
359
360	udc = req->ep->udc;
361
362	/*
363	 * Be careful that no _GFP_HIGHMEM is set,
364	 * or we can not use dma_to_virt
365	 */
366	dtd = dma_pool_alloc(udc->dtd_pool, GFP_ATOMIC, dma);
367	if (dtd == NULL)
368		return dtd;
369
370	dtd->td_dma = *dma;
371	/* initialize buffer page pointers */
372	temp = (u32)(req->req.dma + req->req.actual);
373	dtd->buff_ptr0 = cpu_to_le32(temp);
374	temp &= ~0xFFF;
375	dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
376	dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
377	dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
378	dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
379
380	req->req.actual += *length;
381
382	/* zlp is needed if req->req.zero is set */
383	if (req->req.zero) {
384		if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
385			*is_last = 1;
386		else
387			*is_last = 0;
388	} else if (req->req.length == req->req.actual)
389		*is_last = 1;
390	else
391		*is_last = 0;
392
393	/* Fill in the transfer size; set active bit */
394	temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
395
396	/* Enable interrupt for the last dtd of a request */
397	if (*is_last && !req->req.no_interrupt)
398		temp |= DTD_IOC;
399
400	temp |= mult << 10;
401
402	dtd->size_ioc_sts = temp;
403
404	mb();
405
406	return dtd;
407}
408
409/* generate dTD linked list for a request */
410static int req_to_dtd(struct mv_req *req)
411{
412	unsigned count;
413	int is_last, is_first = 1;
414	struct mv_dtd *dtd, *last_dtd = NULL;
415	struct mv_udc *udc;
416	dma_addr_t dma;
417
418	udc = req->ep->udc;
419
420	do {
421		dtd = build_dtd(req, &count, &dma, &is_last);
422		if (dtd == NULL)
423			return -ENOMEM;
424
425		if (is_first) {
426			is_first = 0;
427			req->head = dtd;
428		} else {
429			last_dtd->dtd_next = dma;
430			last_dtd->next_dtd_virt = dtd;
431		}
432		last_dtd = dtd;
433		req->dtd_count++;
434	} while (!is_last);
435
436	/* set terminate bit to 1 for the last dTD */
437	dtd->dtd_next = DTD_NEXT_TERMINATE;
438
439	req->tail = dtd;
440
441	return 0;
442}
443
444static int mv_ep_enable(struct usb_ep *_ep,
445		const struct usb_endpoint_descriptor *desc)
446{
447	struct mv_udc *udc;
448	struct mv_ep *ep;
449	struct mv_dqh *dqh;
450	u16 max = 0;
451	u32 bit_pos, epctrlx, direction;
452	unsigned char zlt = 0, ios = 0, mult = 0;
453	unsigned long flags;
454
455	ep = container_of(_ep, struct mv_ep, ep);
456	udc = ep->udc;
457
458	if (!_ep || !desc
459			|| desc->bDescriptorType != USB_DT_ENDPOINT)
460		return -EINVAL;
461
462	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
463		return -ESHUTDOWN;
464
465	direction = ep_dir(ep);
466	max = usb_endpoint_maxp(desc);
467
468	/*
469	 * disable HW zero length termination select
470	 * driver handles zero length packet through req->req.zero
471	 */
472	zlt = 1;
473
474	bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
475
476	/* Check if the Endpoint is Primed */
477	if ((readl(&udc->op_regs->epprime) & bit_pos)
478		|| (readl(&udc->op_regs->epstatus) & bit_pos)) {
479		dev_info(&udc->dev->dev,
480			"ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
481			" ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
482			(unsigned)ep->ep_num, direction ? "SEND" : "RECV",
483			(unsigned)readl(&udc->op_regs->epprime),
484			(unsigned)readl(&udc->op_regs->epstatus),
485			(unsigned)bit_pos);
486		goto en_done;
487	}
488	/* Set the max packet length, interrupt on Setup and Mult fields */
489	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
490	case USB_ENDPOINT_XFER_BULK:
491		zlt = 1;
492		mult = 0;
493		break;
494	case USB_ENDPOINT_XFER_CONTROL:
495		ios = 1;
496	case USB_ENDPOINT_XFER_INT:
497		mult = 0;
498		break;
499	case USB_ENDPOINT_XFER_ISOC:
500		/* Calculate transactions needed for high bandwidth iso */
501		mult = (unsigned char)(1 + ((max >> 11) & 0x03));
502		max = max & 0x7ff;	/* bit 0~10 */
503		/* 3 transactions at most */
504		if (mult > 3)
505			goto en_done;
506		break;
507	default:
508		goto en_done;
509	}
510
511	spin_lock_irqsave(&udc->lock, flags);
512	/* Get the endpoint queue head address */
513	dqh = ep->dqh;
514	dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
515		| (mult << EP_QUEUE_HEAD_MULT_POS)
516		| (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
517		| (ios ? EP_QUEUE_HEAD_IOS : 0);
518	dqh->next_dtd_ptr = 1;
519	dqh->size_ioc_int_sts = 0;
520
521	ep->ep.maxpacket = max;
522	ep->ep.desc = desc;
523	ep->stopped = 0;
524
525	/* Enable the endpoint for Rx or Tx and set the endpoint type */
526	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
527	if (direction == EP_DIR_IN) {
528		epctrlx &= ~EPCTRL_TX_ALL_MASK;
529		epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
530			| ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
531				<< EPCTRL_TX_EP_TYPE_SHIFT);
532	} else {
533		epctrlx &= ~EPCTRL_RX_ALL_MASK;
534		epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
535			| ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
536				<< EPCTRL_RX_EP_TYPE_SHIFT);
537	}
538	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
539
540	/*
541	 * Implement Guideline (GL# USB-7) The unused endpoint type must
542	 * be programmed to bulk.
543	 */
544	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
545	if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
546		epctrlx |= (USB_ENDPOINT_XFER_BULK
547				<< EPCTRL_RX_EP_TYPE_SHIFT);
548		writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
549	}
550
551	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
552	if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
553		epctrlx |= (USB_ENDPOINT_XFER_BULK
554				<< EPCTRL_TX_EP_TYPE_SHIFT);
555		writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
556	}
557
558	spin_unlock_irqrestore(&udc->lock, flags);
559
560	return 0;
561en_done:
562	return -EINVAL;
563}
564
565static int  mv_ep_disable(struct usb_ep *_ep)
566{
567	struct mv_udc *udc;
568	struct mv_ep *ep;
569	struct mv_dqh *dqh;
570	u32 bit_pos, epctrlx, direction;
571	unsigned long flags;
572
573	ep = container_of(_ep, struct mv_ep, ep);
574	if ((_ep == NULL) || !ep->ep.desc)
575		return -EINVAL;
576
577	udc = ep->udc;
578
579	/* Get the endpoint queue head address */
580	dqh = ep->dqh;
581
582	spin_lock_irqsave(&udc->lock, flags);
583
584	direction = ep_dir(ep);
585	bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
586
587	/* Reset the max packet length and the interrupt on Setup */
588	dqh->max_packet_length = 0;
589
590	/* Disable the endpoint for Rx or Tx and reset the endpoint type */
591	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
592	epctrlx &= ~((direction == EP_DIR_IN)
593			? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
594			: (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
595	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
596
597	/* nuke all pending requests (does flush) */
598	nuke(ep, -ESHUTDOWN);
599
600	ep->ep.desc = NULL;
601	ep->stopped = 1;
602
603	spin_unlock_irqrestore(&udc->lock, flags);
604
605	return 0;
606}
607
608static struct usb_request *
609mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
610{
611	struct mv_req *req = NULL;
612
613	req = kzalloc(sizeof *req, gfp_flags);
614	if (!req)
615		return NULL;
616
617	req->req.dma = DMA_ADDR_INVALID;
618	INIT_LIST_HEAD(&req->queue);
619
620	return &req->req;
621}
622
623static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
624{
625	struct mv_req *req = NULL;
626
627	req = container_of(_req, struct mv_req, req);
628
629	if (_req)
630		kfree(req);
631}
632
633static void mv_ep_fifo_flush(struct usb_ep *_ep)
634{
635	struct mv_udc *udc;
636	u32 bit_pos, direction;
637	struct mv_ep *ep;
638	unsigned int loops;
639
640	if (!_ep)
641		return;
642
643	ep = container_of(_ep, struct mv_ep, ep);
644	if (!ep->ep.desc)
645		return;
646
647	udc = ep->udc;
648	direction = ep_dir(ep);
649
650	if (ep->ep_num == 0)
651		bit_pos = (1 << 16) | 1;
652	else if (direction == EP_DIR_OUT)
653		bit_pos = 1 << ep->ep_num;
654	else
655		bit_pos = 1 << (16 + ep->ep_num);
656
657	loops = LOOPS(EPSTATUS_TIMEOUT);
658	do {
659		unsigned int inter_loops;
660
661		if (loops == 0) {
662			dev_err(&udc->dev->dev,
663				"TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
664				(unsigned)readl(&udc->op_regs->epstatus),
665				(unsigned)bit_pos);
666			return;
667		}
668		/* Write 1 to the Flush register */
669		writel(bit_pos, &udc->op_regs->epflush);
670
671		/* Wait until flushing completed */
672		inter_loops = LOOPS(FLUSH_TIMEOUT);
673		while (readl(&udc->op_regs->epflush)) {
674			/*
675			 * ENDPTFLUSH bit should be cleared to indicate this
676			 * operation is complete
677			 */
678			if (inter_loops == 0) {
679				dev_err(&udc->dev->dev,
680					"TIMEOUT for ENDPTFLUSH=0x%x,"
681					"bit_pos=0x%x\n",
682					(unsigned)readl(&udc->op_regs->epflush),
683					(unsigned)bit_pos);
684				return;
685			}
686			inter_loops--;
687			udelay(LOOPS_USEC);
688		}
689		loops--;
690	} while (readl(&udc->op_regs->epstatus) & bit_pos);
691}
692
693/* queues (submits) an I/O request to an endpoint */
694static int
695mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
696{
697	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
698	struct mv_req *req = container_of(_req, struct mv_req, req);
699	struct mv_udc *udc = ep->udc;
700	unsigned long flags;
701	int retval;
702
703	/* catch various bogus parameters */
704	if (!_req || !req->req.complete || !req->req.buf
705			|| !list_empty(&req->queue)) {
706		dev_err(&udc->dev->dev, "%s, bad params", __func__);
707		return -EINVAL;
708	}
709	if (unlikely(!_ep || !ep->ep.desc)) {
710		dev_err(&udc->dev->dev, "%s, bad ep", __func__);
711		return -EINVAL;
712	}
713
714	udc = ep->udc;
715	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
716		return -ESHUTDOWN;
717
718	req->ep = ep;
719
720	/* map virtual address to hardware */
721	retval = usb_gadget_map_request(&udc->gadget, _req, ep_dir(ep));
722	if (retval)
723		return retval;
724
725	req->req.status = -EINPROGRESS;
726	req->req.actual = 0;
727	req->dtd_count = 0;
728
729	spin_lock_irqsave(&udc->lock, flags);
730
731	/* build dtds and push them to device queue */
732	if (!req_to_dtd(req)) {
733		retval = queue_dtd(ep, req);
734		if (retval) {
735			spin_unlock_irqrestore(&udc->lock, flags);
736			dev_err(&udc->dev->dev, "Failed to queue dtd\n");
737			goto err_unmap_dma;
738		}
739	} else {
740		spin_unlock_irqrestore(&udc->lock, flags);
741		dev_err(&udc->dev->dev, "Failed to dma_pool_alloc\n");
742		retval = -ENOMEM;
743		goto err_unmap_dma;
744	}
745
746	/* Update ep0 state */
747	if (ep->ep_num == 0)
748		udc->ep0_state = DATA_STATE_XMIT;
749
750	/* irq handler advances the queue */
751	list_add_tail(&req->queue, &ep->queue);
752	spin_unlock_irqrestore(&udc->lock, flags);
753
754	return 0;
755
756err_unmap_dma:
757	usb_gadget_unmap_request(&udc->gadget, _req, ep_dir(ep));
758
759	return retval;
760}
761
762static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
763{
764	struct mv_dqh *dqh = ep->dqh;
765	u32 bit_pos;
766
767	/* Write dQH next pointer and terminate bit to 0 */
768	dqh->next_dtd_ptr = req->head->td_dma
769		& EP_QUEUE_HEAD_NEXT_POINTER_MASK;
770
771	/* clear active and halt bit, in case set from a previous error */
772	dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
773
774	/* Ensure that updates to the QH will occure before priming. */
775	wmb();
776
777	bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
778
779	/* Prime the Endpoint */
780	writel(bit_pos, &ep->udc->op_regs->epprime);
781}
782
783/* dequeues (cancels, unlinks) an I/O request from an endpoint */
784static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
785{
786	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
787	struct mv_req *req;
788	struct mv_udc *udc = ep->udc;
789	unsigned long flags;
790	int stopped, ret = 0;
791	u32 epctrlx;
792
793	if (!_ep || !_req)
794		return -EINVAL;
795
796	spin_lock_irqsave(&ep->udc->lock, flags);
797	stopped = ep->stopped;
798
799	/* Stop the ep before we deal with the queue */
800	ep->stopped = 1;
801	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
802	if (ep_dir(ep) == EP_DIR_IN)
803		epctrlx &= ~EPCTRL_TX_ENABLE;
804	else
805		epctrlx &= ~EPCTRL_RX_ENABLE;
806	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
807
808	/* make sure it's actually queued on this endpoint */
809	list_for_each_entry(req, &ep->queue, queue) {
810		if (&req->req == _req)
811			break;
812	}
813	if (&req->req != _req) {
814		ret = -EINVAL;
815		goto out;
816	}
817
818	/* The request is in progress, or completed but not dequeued */
819	if (ep->queue.next == &req->queue) {
820		_req->status = -ECONNRESET;
821		mv_ep_fifo_flush(_ep);	/* flush current transfer */
822
823		/* The request isn't the last request in this ep queue */
824		if (req->queue.next != &ep->queue) {
825			struct mv_req *next_req;
826
827			next_req = list_entry(req->queue.next,
828				struct mv_req, queue);
829
830			/* Point the QH to the first TD of next request */
831			mv_prime_ep(ep, next_req);
832		} else {
833			struct mv_dqh *qh;
834
835			qh = ep->dqh;
836			qh->next_dtd_ptr = 1;
837			qh->size_ioc_int_sts = 0;
838		}
839
840		/* The request hasn't been processed, patch up the TD chain */
841	} else {
842		struct mv_req *prev_req;
843
844		prev_req = list_entry(req->queue.prev, struct mv_req, queue);
845		writel(readl(&req->tail->dtd_next),
846				&prev_req->tail->dtd_next);
847
848	}
849
850	done(ep, req, -ECONNRESET);
851
852	/* Enable EP */
853out:
854	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
855	if (ep_dir(ep) == EP_DIR_IN)
856		epctrlx |= EPCTRL_TX_ENABLE;
857	else
858		epctrlx |= EPCTRL_RX_ENABLE;
859	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
860	ep->stopped = stopped;
861
862	spin_unlock_irqrestore(&ep->udc->lock, flags);
863	return ret;
864}
865
866static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
867{
868	u32 epctrlx;
869
870	epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
871
872	if (stall) {
873		if (direction == EP_DIR_IN)
874			epctrlx |= EPCTRL_TX_EP_STALL;
875		else
876			epctrlx |= EPCTRL_RX_EP_STALL;
877	} else {
878		if (direction == EP_DIR_IN) {
879			epctrlx &= ~EPCTRL_TX_EP_STALL;
880			epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
881		} else {
882			epctrlx &= ~EPCTRL_RX_EP_STALL;
883			epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
884		}
885	}
886	writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
887}
888
889static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
890{
891	u32 epctrlx;
892
893	epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
894
895	if (direction == EP_DIR_OUT)
896		return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
897	else
898		return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
899}
900
901static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
902{
903	struct mv_ep *ep;
904	unsigned long flags = 0;
905	int status = 0;
906	struct mv_udc *udc;
907
908	ep = container_of(_ep, struct mv_ep, ep);
909	udc = ep->udc;
910	if (!_ep || !ep->ep.desc) {
911		status = -EINVAL;
912		goto out;
913	}
914
915	if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
916		status = -EOPNOTSUPP;
917		goto out;
918	}
919
920	/*
921	 * Attempt to halt IN ep will fail if any transfer requests
922	 * are still queue
923	 */
924	if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
925		status = -EAGAIN;
926		goto out;
927	}
928
929	spin_lock_irqsave(&ep->udc->lock, flags);
930	ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
931	if (halt && wedge)
932		ep->wedge = 1;
933	else if (!halt)
934		ep->wedge = 0;
935	spin_unlock_irqrestore(&ep->udc->lock, flags);
936
937	if (ep->ep_num == 0) {
938		udc->ep0_state = WAIT_FOR_SETUP;
939		udc->ep0_dir = EP_DIR_OUT;
940	}
941out:
942	return status;
943}
944
945static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
946{
947	return mv_ep_set_halt_wedge(_ep, halt, 0);
948}
949
950static int mv_ep_set_wedge(struct usb_ep *_ep)
951{
952	return mv_ep_set_halt_wedge(_ep, 1, 1);
953}
954
955static struct usb_ep_ops mv_ep_ops = {
956	.enable		= mv_ep_enable,
957	.disable	= mv_ep_disable,
958
959	.alloc_request	= mv_alloc_request,
960	.free_request	= mv_free_request,
961
962	.queue		= mv_ep_queue,
963	.dequeue	= mv_ep_dequeue,
964
965	.set_wedge	= mv_ep_set_wedge,
966	.set_halt	= mv_ep_set_halt,
967	.fifo_flush	= mv_ep_fifo_flush,	/* flush fifo */
968};
969
970static void udc_clock_enable(struct mv_udc *udc)
971{
972	clk_prepare_enable(udc->clk);
973}
974
975static void udc_clock_disable(struct mv_udc *udc)
976{
977	clk_disable_unprepare(udc->clk);
978}
979
980static void udc_stop(struct mv_udc *udc)
981{
982	u32 tmp;
983
984	/* Disable interrupts */
985	tmp = readl(&udc->op_regs->usbintr);
986	tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
987		USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
988	writel(tmp, &udc->op_regs->usbintr);
989
990	udc->stopped = 1;
991
992	/* Reset the Run the bit in the command register to stop VUSB */
993	tmp = readl(&udc->op_regs->usbcmd);
994	tmp &= ~USBCMD_RUN_STOP;
995	writel(tmp, &udc->op_regs->usbcmd);
996}
997
998static void udc_start(struct mv_udc *udc)
999{
1000	u32 usbintr;
1001
1002	usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
1003		| USBINTR_PORT_CHANGE_DETECT_EN
1004		| USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
1005	/* Enable interrupts */
1006	writel(usbintr, &udc->op_regs->usbintr);
1007
1008	udc->stopped = 0;
1009
1010	/* Set the Run bit in the command register */
1011	writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
1012}
1013
1014static int udc_reset(struct mv_udc *udc)
1015{
1016	unsigned int loops;
1017	u32 tmp, portsc;
1018
1019	/* Stop the controller */
1020	tmp = readl(&udc->op_regs->usbcmd);
1021	tmp &= ~USBCMD_RUN_STOP;
1022	writel(tmp, &udc->op_regs->usbcmd);
1023
1024	/* Reset the controller to get default values */
1025	writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
1026
1027	/* wait for reset to complete */
1028	loops = LOOPS(RESET_TIMEOUT);
1029	while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
1030		if (loops == 0) {
1031			dev_err(&udc->dev->dev,
1032				"Wait for RESET completed TIMEOUT\n");
1033			return -ETIMEDOUT;
1034		}
1035		loops--;
1036		udelay(LOOPS_USEC);
1037	}
1038
1039	/* set controller to device mode */
1040	tmp = readl(&udc->op_regs->usbmode);
1041	tmp |= USBMODE_CTRL_MODE_DEVICE;
1042
1043	/* turn setup lockout off, require setup tripwire in usbcmd */
1044	tmp |= USBMODE_SETUP_LOCK_OFF;
1045
1046	writel(tmp, &udc->op_regs->usbmode);
1047
1048	writel(0x0, &udc->op_regs->epsetupstat);
1049
1050	/* Configure the Endpoint List Address */
1051	writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
1052		&udc->op_regs->eplistaddr);
1053
1054	portsc = readl(&udc->op_regs->portsc[0]);
1055	if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
1056		portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
1057
1058	if (udc->force_fs)
1059		portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
1060	else
1061		portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
1062
1063	writel(portsc, &udc->op_regs->portsc[0]);
1064
1065	tmp = readl(&udc->op_regs->epctrlx[0]);
1066	tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
1067	writel(tmp, &udc->op_regs->epctrlx[0]);
1068
1069	return 0;
1070}
1071
1072static int mv_udc_enable_internal(struct mv_udc *udc)
1073{
1074	int retval;
1075
1076	if (udc->active)
1077		return 0;
1078
1079	dev_dbg(&udc->dev->dev, "enable udc\n");
1080	udc_clock_enable(udc);
1081	if (udc->pdata->phy_init) {
1082		retval = udc->pdata->phy_init(udc->phy_regs);
1083		if (retval) {
1084			dev_err(&udc->dev->dev,
1085				"init phy error %d\n", retval);
1086			udc_clock_disable(udc);
1087			return retval;
1088		}
1089	}
1090	udc->active = 1;
1091
1092	return 0;
1093}
1094
1095static int mv_udc_enable(struct mv_udc *udc)
1096{
1097	if (udc->clock_gating)
1098		return mv_udc_enable_internal(udc);
1099
1100	return 0;
1101}
1102
1103static void mv_udc_disable_internal(struct mv_udc *udc)
1104{
1105	if (udc->active) {
1106		dev_dbg(&udc->dev->dev, "disable udc\n");
1107		if (udc->pdata->phy_deinit)
1108			udc->pdata->phy_deinit(udc->phy_regs);
1109		udc_clock_disable(udc);
1110		udc->active = 0;
1111	}
1112}
1113
1114static void mv_udc_disable(struct mv_udc *udc)
1115{
1116	if (udc->clock_gating)
1117		mv_udc_disable_internal(udc);
1118}
1119
1120static int mv_udc_get_frame(struct usb_gadget *gadget)
1121{
1122	struct mv_udc *udc;
1123	u16	retval;
1124
1125	if (!gadget)
1126		return -ENODEV;
1127
1128	udc = container_of(gadget, struct mv_udc, gadget);
1129
1130	retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
1131
1132	return retval;
1133}
1134
1135/* Tries to wake up the host connected to this gadget */
1136static int mv_udc_wakeup(struct usb_gadget *gadget)
1137{
1138	struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
1139	u32 portsc;
1140
1141	/* Remote wakeup feature not enabled by host */
1142	if (!udc->remote_wakeup)
1143		return -ENOTSUPP;
1144
1145	portsc = readl(&udc->op_regs->portsc);
1146	/* not suspended? */
1147	if (!(portsc & PORTSCX_PORT_SUSPEND))
1148		return 0;
1149	/* trigger force resume */
1150	portsc |= PORTSCX_PORT_FORCE_RESUME;
1151	writel(portsc, &udc->op_regs->portsc[0]);
1152	return 0;
1153}
1154
1155static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
1156{
1157	struct mv_udc *udc;
1158	unsigned long flags;
1159	int retval = 0;
1160
1161	udc = container_of(gadget, struct mv_udc, gadget);
1162	spin_lock_irqsave(&udc->lock, flags);
1163
1164	udc->vbus_active = (is_active != 0);
1165
1166	dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1167		__func__, udc->softconnect, udc->vbus_active);
1168
1169	if (udc->driver && udc->softconnect && udc->vbus_active) {
1170		retval = mv_udc_enable(udc);
1171		if (retval == 0) {
1172			/* Clock is disabled, need re-init registers */
1173			udc_reset(udc);
1174			ep0_reset(udc);
1175			udc_start(udc);
1176		}
1177	} else if (udc->driver && udc->softconnect) {
1178		if (!udc->active)
1179			goto out;
1180
1181		/* stop all the transfer in queue*/
1182		stop_activity(udc, udc->driver);
1183		udc_stop(udc);
1184		mv_udc_disable(udc);
1185	}
1186
1187out:
1188	spin_unlock_irqrestore(&udc->lock, flags);
1189	return retval;
1190}
1191
1192static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
1193{
1194	struct mv_udc *udc;
1195	unsigned long flags;
1196	int retval = 0;
1197
1198	udc = container_of(gadget, struct mv_udc, gadget);
1199	spin_lock_irqsave(&udc->lock, flags);
1200
1201	udc->softconnect = (is_on != 0);
1202
1203	dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1204			__func__, udc->softconnect, udc->vbus_active);
1205
1206	if (udc->driver && udc->softconnect && udc->vbus_active) {
1207		retval = mv_udc_enable(udc);
1208		if (retval == 0) {
1209			/* Clock is disabled, need re-init registers */
1210			udc_reset(udc);
1211			ep0_reset(udc);
1212			udc_start(udc);
1213		}
1214	} else if (udc->driver && udc->vbus_active) {
1215		/* stop all the transfer in queue*/
1216		stop_activity(udc, udc->driver);
1217		udc_stop(udc);
1218		mv_udc_disable(udc);
1219	}
1220
1221	spin_unlock_irqrestore(&udc->lock, flags);
1222	return retval;
1223}
1224
1225static int mv_udc_start(struct usb_gadget *, struct usb_gadget_driver *);
1226static int mv_udc_stop(struct usb_gadget *, struct usb_gadget_driver *);
1227/* device controller usb_gadget_ops structure */
1228static const struct usb_gadget_ops mv_ops = {
1229
1230	/* returns the current frame number */
1231	.get_frame	= mv_udc_get_frame,
1232
1233	/* tries to wake up the host connected to this gadget */
1234	.wakeup		= mv_udc_wakeup,
1235
1236	/* notify controller that VBUS is powered or not */
1237	.vbus_session	= mv_udc_vbus_session,
1238
1239	/* D+ pullup, software-controlled connect/disconnect to USB host */
1240	.pullup		= mv_udc_pullup,
1241	.udc_start	= mv_udc_start,
1242	.udc_stop	= mv_udc_stop,
1243};
1244
1245static int eps_init(struct mv_udc *udc)
1246{
1247	struct mv_ep	*ep;
1248	char name[14];
1249	int i;
1250
1251	/* initialize ep0 */
1252	ep = &udc->eps[0];
1253	ep->udc = udc;
1254	strncpy(ep->name, "ep0", sizeof(ep->name));
1255	ep->ep.name = ep->name;
1256	ep->ep.ops = &mv_ep_ops;
1257	ep->wedge = 0;
1258	ep->stopped = 0;
1259	usb_ep_set_maxpacket_limit(&ep->ep, EP0_MAX_PKT_SIZE);
1260	ep->ep_num = 0;
1261	ep->ep.desc = &mv_ep0_desc;
1262	INIT_LIST_HEAD(&ep->queue);
1263
1264	ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1265
1266	/* initialize other endpoints */
1267	for (i = 2; i < udc->max_eps * 2; i++) {
1268		ep = &udc->eps[i];
1269		if (i % 2) {
1270			snprintf(name, sizeof(name), "ep%din", i / 2);
1271			ep->direction = EP_DIR_IN;
1272		} else {
1273			snprintf(name, sizeof(name), "ep%dout", i / 2);
1274			ep->direction = EP_DIR_OUT;
1275		}
1276		ep->udc = udc;
1277		strncpy(ep->name, name, sizeof(ep->name));
1278		ep->ep.name = ep->name;
1279
1280		ep->ep.ops = &mv_ep_ops;
1281		ep->stopped = 0;
1282		usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
1283		ep->ep_num = i / 2;
1284
1285		INIT_LIST_HEAD(&ep->queue);
1286		list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1287
1288		ep->dqh = &udc->ep_dqh[i];
1289	}
1290
1291	return 0;
1292}
1293
1294/* delete all endpoint requests, called with spinlock held */
1295static void nuke(struct mv_ep *ep, int status)
1296{
1297	/* called with spinlock held */
1298	ep->stopped = 1;
1299
1300	/* endpoint fifo flush */
1301	mv_ep_fifo_flush(&ep->ep);
1302
1303	while (!list_empty(&ep->queue)) {
1304		struct mv_req *req = NULL;
1305		req = list_entry(ep->queue.next, struct mv_req, queue);
1306		done(ep, req, status);
1307	}
1308}
1309
1310/* stop all USB activities */
1311static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
1312{
1313	struct mv_ep	*ep;
1314
1315	nuke(&udc->eps[0], -ESHUTDOWN);
1316
1317	list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1318		nuke(ep, -ESHUTDOWN);
1319	}
1320
1321	/* report disconnect; the driver is already quiesced */
1322	if (driver) {
1323		spin_unlock(&udc->lock);
1324		driver->disconnect(&udc->gadget);
1325		spin_lock(&udc->lock);
1326	}
1327}
1328
1329static int mv_udc_start(struct usb_gadget *gadget,
1330		struct usb_gadget_driver *driver)
1331{
1332	struct mv_udc *udc;
1333	int retval = 0;
1334	unsigned long flags;
1335
1336	udc = container_of(gadget, struct mv_udc, gadget);
1337
1338	if (udc->driver)
1339		return -EBUSY;
1340
1341	spin_lock_irqsave(&udc->lock, flags);
1342
1343	/* hook up the driver ... */
1344	driver->driver.bus = NULL;
1345	udc->driver = driver;
1346
1347	udc->usb_state = USB_STATE_ATTACHED;
1348	udc->ep0_state = WAIT_FOR_SETUP;
1349	udc->ep0_dir = EP_DIR_OUT;
1350
1351	spin_unlock_irqrestore(&udc->lock, flags);
1352
1353	if (udc->transceiver) {
1354		retval = otg_set_peripheral(udc->transceiver->otg,
1355					&udc->gadget);
1356		if (retval) {
1357			dev_err(&udc->dev->dev,
1358				"unable to register peripheral to otg\n");
1359			udc->driver = NULL;
1360			return retval;
1361		}
1362	}
1363
1364	/* pullup is always on */
1365	mv_udc_pullup(&udc->gadget, 1);
1366
1367	/* When boot with cable attached, there will be no vbus irq occurred */
1368	if (udc->qwork)
1369		queue_work(udc->qwork, &udc->vbus_work);
1370
1371	return 0;
1372}
1373
1374static int mv_udc_stop(struct usb_gadget *gadget,
1375		struct usb_gadget_driver *driver)
1376{
1377	struct mv_udc *udc;
1378	unsigned long flags;
1379
1380	udc = container_of(gadget, struct mv_udc, gadget);
1381
1382	spin_lock_irqsave(&udc->lock, flags);
1383
1384	mv_udc_enable(udc);
1385	udc_stop(udc);
1386
1387	/* stop all usb activities */
1388	udc->gadget.speed = USB_SPEED_UNKNOWN;
1389	stop_activity(udc, driver);
1390	mv_udc_disable(udc);
1391
1392	spin_unlock_irqrestore(&udc->lock, flags);
1393
1394	/* unbind gadget driver */
1395	udc->driver = NULL;
1396
1397	return 0;
1398}
1399
1400static void mv_set_ptc(struct mv_udc *udc, u32 mode)
1401{
1402	u32 portsc;
1403
1404	portsc = readl(&udc->op_regs->portsc[0]);
1405	portsc |= mode << 16;
1406	writel(portsc, &udc->op_regs->portsc[0]);
1407}
1408
1409static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
1410{
1411	struct mv_ep *mvep = container_of(ep, struct mv_ep, ep);
1412	struct mv_req *req = container_of(_req, struct mv_req, req);
1413	struct mv_udc *udc;
1414	unsigned long flags;
1415
1416	udc = mvep->udc;
1417
1418	dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
1419
1420	spin_lock_irqsave(&udc->lock, flags);
1421	if (req->test_mode) {
1422		mv_set_ptc(udc, req->test_mode);
1423		req->test_mode = 0;
1424	}
1425	spin_unlock_irqrestore(&udc->lock, flags);
1426}
1427
1428static int
1429udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
1430{
1431	int retval = 0;
1432	struct mv_req *req;
1433	struct mv_ep *ep;
1434
1435	ep = &udc->eps[0];
1436	udc->ep0_dir = direction;
1437	udc->ep0_state = WAIT_FOR_OUT_STATUS;
1438
1439	req = udc->status_req;
1440
1441	/* fill in the reqest structure */
1442	if (empty == false) {
1443		*((u16 *) req->req.buf) = cpu_to_le16(status);
1444		req->req.length = 2;
1445	} else
1446		req->req.length = 0;
1447
1448	req->ep = ep;
1449	req->req.status = -EINPROGRESS;
1450	req->req.actual = 0;
1451	if (udc->test_mode) {
1452		req->req.complete = prime_status_complete;
1453		req->test_mode = udc->test_mode;
1454		udc->test_mode = 0;
1455	} else
1456		req->req.complete = NULL;
1457	req->dtd_count = 0;
1458
1459	if (req->req.dma == DMA_ADDR_INVALID) {
1460		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1461				req->req.buf, req->req.length,
1462				ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1463		req->mapped = 1;
1464	}
1465
1466	/* prime the data phase */
1467	if (!req_to_dtd(req)) {
1468		retval = queue_dtd(ep, req);
1469		if (retval) {
1470			dev_err(&udc->dev->dev,
1471				"Failed to queue dtd when prime status\n");
1472			goto out;
1473		}
1474	} else{	/* no mem */
1475		retval = -ENOMEM;
1476		dev_err(&udc->dev->dev,
1477			"Failed to dma_pool_alloc when prime status\n");
1478		goto out;
1479	}
1480
1481	list_add_tail(&req->queue, &ep->queue);
1482
1483	return 0;
1484out:
1485	usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
1486
1487	return retval;
1488}
1489
1490static void mv_udc_testmode(struct mv_udc *udc, u16 index)
1491{
1492	if (index <= TEST_FORCE_EN) {
1493		udc->test_mode = index;
1494		if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1495			ep0_stall(udc);
1496	} else
1497		dev_err(&udc->dev->dev,
1498			"This test mode(%d) is not supported\n", index);
1499}
1500
1501static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1502{
1503	udc->dev_addr = (u8)setup->wValue;
1504
1505	/* update usb state */
1506	udc->usb_state = USB_STATE_ADDRESS;
1507
1508	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1509		ep0_stall(udc);
1510}
1511
1512static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
1513	struct usb_ctrlrequest *setup)
1514{
1515	u16 status = 0;
1516	int retval;
1517
1518	if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
1519		!= (USB_DIR_IN | USB_TYPE_STANDARD))
1520		return;
1521
1522	if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1523		status = 1 << USB_DEVICE_SELF_POWERED;
1524		status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
1525	} else if ((setup->bRequestType & USB_RECIP_MASK)
1526			== USB_RECIP_INTERFACE) {
1527		/* get interface status */
1528		status = 0;
1529	} else if ((setup->bRequestType & USB_RECIP_MASK)
1530			== USB_RECIP_ENDPOINT) {
1531		u8 ep_num, direction;
1532
1533		ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1534		direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1535				? EP_DIR_IN : EP_DIR_OUT;
1536		status = ep_is_stall(udc, ep_num, direction)
1537				<< USB_ENDPOINT_HALT;
1538	}
1539
1540	retval = udc_prime_status(udc, EP_DIR_IN, status, false);
1541	if (retval)
1542		ep0_stall(udc);
1543	else
1544		udc->ep0_state = DATA_STATE_XMIT;
1545}
1546
1547static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1548{
1549	u8 ep_num;
1550	u8 direction;
1551	struct mv_ep *ep;
1552
1553	if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1554		== ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1555		switch (setup->wValue) {
1556		case USB_DEVICE_REMOTE_WAKEUP:
1557			udc->remote_wakeup = 0;
1558			break;
1559		default:
1560			goto out;
1561		}
1562	} else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1563		== ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1564		switch (setup->wValue) {
1565		case USB_ENDPOINT_HALT:
1566			ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1567			direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1568				? EP_DIR_IN : EP_DIR_OUT;
1569			if (setup->wValue != 0 || setup->wLength != 0
1570				|| ep_num > udc->max_eps)
1571				goto out;
1572			ep = &udc->eps[ep_num * 2 + direction];
1573			if (ep->wedge == 1)
1574				break;
1575			spin_unlock(&udc->lock);
1576			ep_set_stall(udc, ep_num, direction, 0);
1577			spin_lock(&udc->lock);
1578			break;
1579		default:
1580			goto out;
1581		}
1582	} else
1583		goto out;
1584
1585	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1586		ep0_stall(udc);
1587out:
1588	return;
1589}
1590
1591static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1592{
1593	u8 ep_num;
1594	u8 direction;
1595
1596	if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1597		== ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1598		switch (setup->wValue) {
1599		case USB_DEVICE_REMOTE_WAKEUP:
1600			udc->remote_wakeup = 1;
1601			break;
1602		case USB_DEVICE_TEST_MODE:
1603			if (setup->wIndex & 0xFF
1604				||  udc->gadget.speed != USB_SPEED_HIGH)
1605				ep0_stall(udc);
1606
1607			if (udc->usb_state != USB_STATE_CONFIGURED
1608				&& udc->usb_state != USB_STATE_ADDRESS
1609				&& udc->usb_state != USB_STATE_DEFAULT)
1610				ep0_stall(udc);
1611
1612			mv_udc_testmode(udc, (setup->wIndex >> 8));
1613			goto out;
1614		default:
1615			goto out;
1616		}
1617	} else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1618		== ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1619		switch (setup->wValue) {
1620		case USB_ENDPOINT_HALT:
1621			ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1622			direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1623				? EP_DIR_IN : EP_DIR_OUT;
1624			if (setup->wValue != 0 || setup->wLength != 0
1625				|| ep_num > udc->max_eps)
1626				goto out;
1627			spin_unlock(&udc->lock);
1628			ep_set_stall(udc, ep_num, direction, 1);
1629			spin_lock(&udc->lock);
1630			break;
1631		default:
1632			goto out;
1633		}
1634	} else
1635		goto out;
1636
1637	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1638		ep0_stall(udc);
1639out:
1640	return;
1641}
1642
1643static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
1644	struct usb_ctrlrequest *setup)
1645	__releases(&ep->udc->lock)
1646	__acquires(&ep->udc->lock)
1647{
1648	bool delegate = false;
1649
1650	nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
1651
1652	dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1653			setup->bRequestType, setup->bRequest,
1654			setup->wValue, setup->wIndex, setup->wLength);
1655	/* We process some standard setup requests here */
1656	if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1657		switch (setup->bRequest) {
1658		case USB_REQ_GET_STATUS:
1659			ch9getstatus(udc, ep_num, setup);
1660			break;
1661
1662		case USB_REQ_SET_ADDRESS:
1663			ch9setaddress(udc, setup);
1664			break;
1665
1666		case USB_REQ_CLEAR_FEATURE:
1667			ch9clearfeature(udc, setup);
1668			break;
1669
1670		case USB_REQ_SET_FEATURE:
1671			ch9setfeature(udc, setup);
1672			break;
1673
1674		default:
1675			delegate = true;
1676		}
1677	} else
1678		delegate = true;
1679
1680	/* delegate USB standard requests to the gadget driver */
1681	if (delegate == true) {
1682		/* USB requests handled by gadget */
1683		if (setup->wLength) {
1684			/* DATA phase from gadget, STATUS phase from udc */
1685			udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
1686					?  EP_DIR_IN : EP_DIR_OUT;
1687			spin_unlock(&udc->lock);
1688			if (udc->driver->setup(&udc->gadget,
1689				&udc->local_setup_buff) < 0)
1690				ep0_stall(udc);
1691			spin_lock(&udc->lock);
1692			udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
1693					?  DATA_STATE_XMIT : DATA_STATE_RECV;
1694		} else {
1695			/* no DATA phase, IN STATUS phase from gadget */
1696			udc->ep0_dir = EP_DIR_IN;
1697			spin_unlock(&udc->lock);
1698			if (udc->driver->setup(&udc->gadget,
1699				&udc->local_setup_buff) < 0)
1700				ep0_stall(udc);
1701			spin_lock(&udc->lock);
1702			udc->ep0_state = WAIT_FOR_OUT_STATUS;
1703		}
1704	}
1705}
1706
1707/* complete DATA or STATUS phase of ep0 prime status phase if needed */
1708static void ep0_req_complete(struct mv_udc *udc,
1709	struct mv_ep *ep0, struct mv_req *req)
1710{
1711	u32 new_addr;
1712
1713	if (udc->usb_state == USB_STATE_ADDRESS) {
1714		/* set the new address */
1715		new_addr = (u32)udc->dev_addr;
1716		writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
1717			&udc->op_regs->deviceaddr);
1718	}
1719
1720	done(ep0, req, 0);
1721
1722	switch (udc->ep0_state) {
1723	case DATA_STATE_XMIT:
1724		/* receive status phase */
1725		if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
1726			ep0_stall(udc);
1727		break;
1728	case DATA_STATE_RECV:
1729		/* send status phase */
1730		if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
1731			ep0_stall(udc);
1732		break;
1733	case WAIT_FOR_OUT_STATUS:
1734		udc->ep0_state = WAIT_FOR_SETUP;
1735		break;
1736	case WAIT_FOR_SETUP:
1737		dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
1738		break;
1739	default:
1740		ep0_stall(udc);
1741		break;
1742	}
1743}
1744
1745static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
1746{
1747	u32 temp;
1748	struct mv_dqh *dqh;
1749
1750	dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
1751
1752	/* Clear bit in ENDPTSETUPSTAT */
1753	writel((1 << ep_num), &udc->op_regs->epsetupstat);
1754
1755	/* while a hazard exists when setup package arrives */
1756	do {
1757		/* Set Setup Tripwire */
1758		temp = readl(&udc->op_regs->usbcmd);
1759		writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1760
1761		/* Copy the setup packet to local buffer */
1762		memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
1763	} while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
1764
1765	/* Clear Setup Tripwire */
1766	temp = readl(&udc->op_regs->usbcmd);
1767	writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1768}
1769
1770static void irq_process_tr_complete(struct mv_udc *udc)
1771{
1772	u32 tmp, bit_pos;
1773	int i, ep_num = 0, direction = 0;
1774	struct mv_ep	*curr_ep;
1775	struct mv_req *curr_req, *temp_req;
1776	int status;
1777
1778	/*
1779	 * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
1780	 * because the setup packets are to be read ASAP
1781	 */
1782
1783	/* Process all Setup packet received interrupts */
1784	tmp = readl(&udc->op_regs->epsetupstat);
1785
1786	if (tmp) {
1787		for (i = 0; i < udc->max_eps; i++) {
1788			if (tmp & (1 << i)) {
1789				get_setup_data(udc, i,
1790					(u8 *)(&udc->local_setup_buff));
1791				handle_setup_packet(udc, i,
1792					&udc->local_setup_buff);
1793			}
1794		}
1795	}
1796
1797	/* Don't clear the endpoint setup status register here.
1798	 * It is cleared as a setup packet is read out of the buffer
1799	 */
1800
1801	/* Process non-setup transaction complete interrupts */
1802	tmp = readl(&udc->op_regs->epcomplete);
1803
1804	if (!tmp)
1805		return;
1806
1807	writel(tmp, &udc->op_regs->epcomplete);
1808
1809	for (i = 0; i < udc->max_eps * 2; i++) {
1810		ep_num = i >> 1;
1811		direction = i % 2;
1812
1813		bit_pos = 1 << (ep_num + 16 * direction);
1814
1815		if (!(bit_pos & tmp))
1816			continue;
1817
1818		if (i == 1)
1819			curr_ep = &udc->eps[0];
1820		else
1821			curr_ep = &udc->eps[i];
1822		/* process the req queue until an uncomplete request */
1823		list_for_each_entry_safe(curr_req, temp_req,
1824			&curr_ep->queue, queue) {
1825			status = process_ep_req(udc, i, curr_req);
1826			if (status)
1827				break;
1828
1829			/* write back status to req */
1830			curr_req->req.status = status;
1831
1832			/* ep0 request completion */
1833			if (ep_num == 0) {
1834				ep0_req_complete(udc, curr_ep, curr_req);
1835				break;
1836			} else {
1837				done(curr_ep, curr_req, status);
1838			}
1839		}
1840	}
1841}
1842
1843static void irq_process_reset(struct mv_udc *udc)
1844{
1845	u32 tmp;
1846	unsigned int loops;
1847
1848	udc->ep0_dir = EP_DIR_OUT;
1849	udc->ep0_state = WAIT_FOR_SETUP;
1850	udc->remote_wakeup = 0;		/* default to 0 on reset */
1851
1852	/* The address bits are past bit 25-31. Set the address */
1853	tmp = readl(&udc->op_regs->deviceaddr);
1854	tmp &= ~(USB_DEVICE_ADDRESS_MASK);
1855	writel(tmp, &udc->op_regs->deviceaddr);
1856
1857	/* Clear all the setup token semaphores */
1858	tmp = readl(&udc->op_regs->epsetupstat);
1859	writel(tmp, &udc->op_regs->epsetupstat);
1860
1861	/* Clear all the endpoint complete status bits */
1862	tmp = readl(&udc->op_regs->epcomplete);
1863	writel(tmp, &udc->op_regs->epcomplete);
1864
1865	/* wait until all endptprime bits cleared */
1866	loops = LOOPS(PRIME_TIMEOUT);
1867	while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
1868		if (loops == 0) {
1869			dev_err(&udc->dev->dev,
1870				"Timeout for ENDPTPRIME = 0x%x\n",
1871				readl(&udc->op_regs->epprime));
1872			break;
1873		}
1874		loops--;
1875		udelay(LOOPS_USEC);
1876	}
1877
1878	/* Write 1s to the Flush register */
1879	writel((u32)~0, &udc->op_regs->epflush);
1880
1881	if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
1882		dev_info(&udc->dev->dev, "usb bus reset\n");
1883		udc->usb_state = USB_STATE_DEFAULT;
1884		/* reset all the queues, stop all USB activities */
1885		stop_activity(udc, udc->driver);
1886	} else {
1887		dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
1888			readl(&udc->op_regs->portsc));
1889
1890		/*
1891		 * re-initialize
1892		 * controller reset
1893		 */
1894		udc_reset(udc);
1895
1896		/* reset all the queues, stop all USB activities */
1897		stop_activity(udc, udc->driver);
1898
1899		/* reset ep0 dQH and endptctrl */
1900		ep0_reset(udc);
1901
1902		/* enable interrupt and set controller to run state */
1903		udc_start(udc);
1904
1905		udc->usb_state = USB_STATE_ATTACHED;
1906	}
1907}
1908
1909static void handle_bus_resume(struct mv_udc *udc)
1910{
1911	udc->usb_state = udc->resume_state;
1912	udc->resume_state = 0;
1913
1914	/* report resume to the driver */
1915	if (udc->driver) {
1916		if (udc->driver->resume) {
1917			spin_unlock(&udc->lock);
1918			udc->driver->resume(&udc->gadget);
1919			spin_lock(&udc->lock);
1920		}
1921	}
1922}
1923
1924static void irq_process_suspend(struct mv_udc *udc)
1925{
1926	udc->resume_state = udc->usb_state;
1927	udc->usb_state = USB_STATE_SUSPENDED;
1928
1929	if (udc->driver->suspend) {
1930		spin_unlock(&udc->lock);
1931		udc->driver->suspend(&udc->gadget);
1932		spin_lock(&udc->lock);
1933	}
1934}
1935
1936static void irq_process_port_change(struct mv_udc *udc)
1937{
1938	u32 portsc;
1939
1940	portsc = readl(&udc->op_regs->portsc[0]);
1941	if (!(portsc & PORTSCX_PORT_RESET)) {
1942		/* Get the speed */
1943		u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
1944		switch (speed) {
1945		case PORTSCX_PORT_SPEED_HIGH:
1946			udc->gadget.speed = USB_SPEED_HIGH;
1947			break;
1948		case PORTSCX_PORT_SPEED_FULL:
1949			udc->gadget.speed = USB_SPEED_FULL;
1950			break;
1951		case PORTSCX_PORT_SPEED_LOW:
1952			udc->gadget.speed = USB_SPEED_LOW;
1953			break;
1954		default:
1955			udc->gadget.speed = USB_SPEED_UNKNOWN;
1956			break;
1957		}
1958	}
1959
1960	if (portsc & PORTSCX_PORT_SUSPEND) {
1961		udc->resume_state = udc->usb_state;
1962		udc->usb_state = USB_STATE_SUSPENDED;
1963		if (udc->driver->suspend) {
1964			spin_unlock(&udc->lock);
1965			udc->driver->suspend(&udc->gadget);
1966			spin_lock(&udc->lock);
1967		}
1968	}
1969
1970	if (!(portsc & PORTSCX_PORT_SUSPEND)
1971		&& udc->usb_state == USB_STATE_SUSPENDED) {
1972		handle_bus_resume(udc);
1973	}
1974
1975	if (!udc->resume_state)
1976		udc->usb_state = USB_STATE_DEFAULT;
1977}
1978
1979static void irq_process_error(struct mv_udc *udc)
1980{
1981	/* Increment the error count */
1982	udc->errors++;
1983}
1984
1985static irqreturn_t mv_udc_irq(int irq, void *dev)
1986{
1987	struct mv_udc *udc = (struct mv_udc *)dev;
1988	u32 status, intr;
1989
1990	/* Disable ISR when stopped bit is set */
1991	if (udc->stopped)
1992		return IRQ_NONE;
1993
1994	spin_lock(&udc->lock);
1995
1996	status = readl(&udc->op_regs->usbsts);
1997	intr = readl(&udc->op_regs->usbintr);
1998	status &= intr;
1999
2000	if (status == 0) {
2001		spin_unlock(&udc->lock);
2002		return IRQ_NONE;
2003	}
2004
2005	/* Clear all the interrupts occurred */
2006	writel(status, &udc->op_regs->usbsts);
2007
2008	if (status & USBSTS_ERR)
2009		irq_process_error(udc);
2010
2011	if (status & USBSTS_RESET)
2012		irq_process_reset(udc);
2013
2014	if (status & USBSTS_PORT_CHANGE)
2015		irq_process_port_change(udc);
2016
2017	if (status & USBSTS_INT)
2018		irq_process_tr_complete(udc);
2019
2020	if (status & USBSTS_SUSPEND)
2021		irq_process_suspend(udc);
2022
2023	spin_unlock(&udc->lock);
2024
2025	return IRQ_HANDLED;
2026}
2027
2028static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)
2029{
2030	struct mv_udc *udc = (struct mv_udc *)dev;
2031
2032	/* polling VBUS and init phy may cause too much time*/
2033	if (udc->qwork)
2034		queue_work(udc->qwork, &udc->vbus_work);
2035
2036	return IRQ_HANDLED;
2037}
2038
2039static void mv_udc_vbus_work(struct work_struct *work)
2040{
2041	struct mv_udc *udc;
2042	unsigned int vbus;
2043
2044	udc = container_of(work, struct mv_udc, vbus_work);
2045	if (!udc->pdata->vbus)
2046		return;
2047
2048	vbus = udc->pdata->vbus->poll();
2049	dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
2050
2051	if (vbus == VBUS_HIGH)
2052		mv_udc_vbus_session(&udc->gadget, 1);
2053	else if (vbus == VBUS_LOW)
2054		mv_udc_vbus_session(&udc->gadget, 0);
2055}
2056
2057/* release device structure */
2058static void gadget_release(struct device *_dev)
2059{
2060	struct mv_udc *udc;
2061
2062	udc = dev_get_drvdata(_dev);
2063
2064	complete(udc->done);
2065}
2066
2067static int mv_udc_remove(struct platform_device *pdev)
2068{
2069	struct mv_udc *udc;
2070
2071	udc = platform_get_drvdata(pdev);
2072
2073	usb_del_gadget_udc(&udc->gadget);
2074
2075	if (udc->qwork) {
2076		flush_workqueue(udc->qwork);
2077		destroy_workqueue(udc->qwork);
2078	}
2079
2080	/* free memory allocated in probe */
2081	if (udc->dtd_pool)
2082		dma_pool_destroy(udc->dtd_pool);
2083
2084	if (udc->ep_dqh)
2085		dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
2086			udc->ep_dqh, udc->ep_dqh_dma);
2087
2088	mv_udc_disable(udc);
2089
2090	/* free dev, wait for the release() finished */
2091	wait_for_completion(udc->done);
2092
2093	return 0;
2094}
2095
2096static int mv_udc_probe(struct platform_device *pdev)
2097{
2098	struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
2099	struct mv_udc *udc;
2100	int retval = 0;
2101	struct resource *r;
2102	size_t size;
2103
2104	if (pdata == NULL) {
2105		dev_err(&pdev->dev, "missing platform_data\n");
2106		return -ENODEV;
2107	}
2108
2109	udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
2110	if (udc == NULL) {
2111		dev_err(&pdev->dev, "failed to allocate memory for udc\n");
2112		return -ENOMEM;
2113	}
2114
2115	udc->done = &release_done;
2116	udc->pdata = dev_get_platdata(&pdev->dev);
2117	spin_lock_init(&udc->lock);
2118
2119	udc->dev = pdev;
2120
2121	if (pdata->mode == MV_USB_MODE_OTG) {
2122		udc->transceiver = devm_usb_get_phy(&pdev->dev,
2123					USB_PHY_TYPE_USB2);
2124		if (IS_ERR(udc->transceiver)) {
2125			retval = PTR_ERR(udc->transceiver);
2126
2127			if (retval == -ENXIO)
2128				return retval;
2129
2130			udc->transceiver = NULL;
2131			return -EPROBE_DEFER;
2132		}
2133	}
2134
2135	/* udc only have one sysclk. */
2136	udc->clk = devm_clk_get(&pdev->dev, NULL);
2137	if (IS_ERR(udc->clk))
2138		return PTR_ERR(udc->clk);
2139
2140	r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
2141	if (r == NULL) {
2142		dev_err(&pdev->dev, "no I/O memory resource defined\n");
2143		return -ENODEV;
2144	}
2145
2146	udc->cap_regs = (struct mv_cap_regs __iomem *)
2147		devm_ioremap(&pdev->dev, r->start, resource_size(r));
2148	if (udc->cap_regs == NULL) {
2149		dev_err(&pdev->dev, "failed to map I/O memory\n");
2150		return -EBUSY;
2151	}
2152
2153	r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
2154	if (r == NULL) {
2155		dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
2156		return -ENODEV;
2157	}
2158
2159	udc->phy_regs = ioremap(r->start, resource_size(r));
2160	if (udc->phy_regs == NULL) {
2161		dev_err(&pdev->dev, "failed to map phy I/O memory\n");
2162		return -EBUSY;
2163	}
2164
2165	/* we will acces controller register, so enable the clk */
2166	retval = mv_udc_enable_internal(udc);
2167	if (retval)
2168		return retval;
2169
2170	udc->op_regs =
2171		(struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
2172		+ (readl(&udc->cap_regs->caplength_hciversion)
2173			& CAPLENGTH_MASK));
2174	udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
2175
2176	/*
2177	 * some platform will use usb to download image, it may not disconnect
2178	 * usb gadget before loading kernel. So first stop udc here.
2179	 */
2180	udc_stop(udc);
2181	writel(0xFFFFFFFF, &udc->op_regs->usbsts);
2182
2183	size = udc->max_eps * sizeof(struct mv_dqh) *2;
2184	size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
2185	udc->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
2186					&udc->ep_dqh_dma, GFP_KERNEL);
2187
2188	if (udc->ep_dqh == NULL) {
2189		dev_err(&pdev->dev, "allocate dQH memory failed\n");
2190		retval = -ENOMEM;
2191		goto err_disable_clock;
2192	}
2193	udc->ep_dqh_size = size;
2194
2195	/* create dTD dma_pool resource */
2196	udc->dtd_pool = dma_pool_create("mv_dtd",
2197			&pdev->dev,
2198			sizeof(struct mv_dtd),
2199			DTD_ALIGNMENT,
2200			DMA_BOUNDARY);
2201
2202	if (!udc->dtd_pool) {
2203		retval = -ENOMEM;
2204		goto err_free_dma;
2205	}
2206
2207	size = udc->max_eps * sizeof(struct mv_ep) *2;
2208	udc->eps = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
2209	if (udc->eps == NULL) {
2210		dev_err(&pdev->dev, "allocate ep memory failed\n");
2211		retval = -ENOMEM;
2212		goto err_destroy_dma;
2213	}
2214
2215	/* initialize ep0 status request structure */
2216	udc->status_req = devm_kzalloc(&pdev->dev, sizeof(struct mv_req),
2217					GFP_KERNEL);
2218	if (!udc->status_req) {
2219		dev_err(&pdev->dev, "allocate status_req memory failed\n");
2220		retval = -ENOMEM;
2221		goto err_destroy_dma;
2222	}
2223	INIT_LIST_HEAD(&udc->status_req->queue);
2224
2225	/* allocate a small amount of memory to get valid address */
2226	udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
2227	udc->status_req->req.dma = DMA_ADDR_INVALID;
2228
2229	udc->resume_state = USB_STATE_NOTATTACHED;
2230	udc->usb_state = USB_STATE_POWERED;
2231	udc->ep0_dir = EP_DIR_OUT;
2232	udc->remote_wakeup = 0;
2233
2234	r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
2235	if (r == NULL) {
2236		dev_err(&pdev->dev, "no IRQ resource defined\n");
2237		retval = -ENODEV;
2238		goto err_destroy_dma;
2239	}
2240	udc->irq = r->start;
2241	if (devm_request_irq(&pdev->dev, udc->irq, mv_udc_irq,
2242		IRQF_SHARED, driver_name, udc)) {
2243		dev_err(&pdev->dev, "Request irq %d for UDC failed\n",
2244			udc->irq);
2245		retval = -ENODEV;
2246		goto err_destroy_dma;
2247	}
2248
2249	/* initialize gadget structure */
2250	udc->gadget.ops = &mv_ops;	/* usb_gadget_ops */
2251	udc->gadget.ep0 = &udc->eps[0].ep;	/* gadget ep0 */
2252	INIT_LIST_HEAD(&udc->gadget.ep_list);	/* ep_list */
2253	udc->gadget.speed = USB_SPEED_UNKNOWN;	/* speed */
2254	udc->gadget.max_speed = USB_SPEED_HIGH;	/* support dual speed */
2255
2256	/* the "gadget" abstracts/virtualizes the controller */
2257	udc->gadget.name = driver_name;		/* gadget name */
2258
2259	eps_init(udc);
2260
2261	/* VBUS detect: we can disable/enable clock on demand.*/
2262	if (udc->transceiver)
2263		udc->clock_gating = 1;
2264	else if (pdata->vbus) {
2265		udc->clock_gating = 1;
2266		retval = devm_request_threaded_irq(&pdev->dev,
2267				pdata->vbus->irq, NULL,
2268				mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
2269		if (retval) {
2270			dev_info(&pdev->dev,
2271				"Can not request irq for VBUS, "
2272				"disable clock gating\n");
2273			udc->clock_gating = 0;
2274		}
2275
2276		udc->qwork = create_singlethread_workqueue("mv_udc_queue");
2277		if (!udc->qwork) {
2278			dev_err(&pdev->dev, "cannot create workqueue\n");
2279			retval = -ENOMEM;
2280			goto err_destroy_dma;
2281		}
2282
2283		INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
2284	}
2285
2286	/*
2287	 * When clock gating is supported, we can disable clk and phy.
2288	 * If not, it means that VBUS detection is not supported, we
2289	 * have to enable vbus active all the time to let controller work.
2290	 */
2291	if (udc->clock_gating)
2292		mv_udc_disable_internal(udc);
2293	else
2294		udc->vbus_active = 1;
2295
2296	retval = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
2297			gadget_release);
2298	if (retval)
2299		goto err_create_workqueue;
2300
2301	platform_set_drvdata(pdev, udc);
2302	dev_info(&pdev->dev, "successful probe UDC device %s clock gating.\n",
2303		udc->clock_gating ? "with" : "without");
2304
2305	return 0;
2306
2307err_create_workqueue:
2308	destroy_workqueue(udc->qwork);
2309err_destroy_dma:
2310	dma_pool_destroy(udc->dtd_pool);
2311err_free_dma:
2312	dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
2313			udc->ep_dqh, udc->ep_dqh_dma);
2314err_disable_clock:
2315	mv_udc_disable_internal(udc);
2316
2317	return retval;
2318}
2319
2320#ifdef CONFIG_PM
2321static int mv_udc_suspend(struct device *dev)
2322{
2323	struct mv_udc *udc;
2324
2325	udc = dev_get_drvdata(dev);
2326
2327	/* if OTG is enabled, the following will be done in OTG driver*/
2328	if (udc->transceiver)
2329		return 0;
2330
2331	if (udc->pdata->vbus && udc->pdata->vbus->poll)
2332		if (udc->pdata->vbus->poll() == VBUS_HIGH) {
2333			dev_info(&udc->dev->dev, "USB cable is connected!\n");
2334			return -EAGAIN;
2335		}
2336
2337	/*
2338	 * only cable is unplugged, udc can suspend.
2339	 * So do not care about clock_gating == 1.
2340	 */
2341	if (!udc->clock_gating) {
2342		udc_stop(udc);
2343
2344		spin_lock_irq(&udc->lock);
2345		/* stop all usb activities */
2346		stop_activity(udc, udc->driver);
2347		spin_unlock_irq(&udc->lock);
2348
2349		mv_udc_disable_internal(udc);
2350	}
2351
2352	return 0;
2353}
2354
2355static int mv_udc_resume(struct device *dev)
2356{
2357	struct mv_udc *udc;
2358	int retval;
2359
2360	udc = dev_get_drvdata(dev);
2361
2362	/* if OTG is enabled, the following will be done in OTG driver*/
2363	if (udc->transceiver)
2364		return 0;
2365
2366	if (!udc->clock_gating) {
2367		retval = mv_udc_enable_internal(udc);
2368		if (retval)
2369			return retval;
2370
2371		if (udc->driver && udc->softconnect) {
2372			udc_reset(udc);
2373			ep0_reset(udc);
2374			udc_start(udc);
2375		}
2376	}
2377
2378	return 0;
2379}
2380
2381static const struct dev_pm_ops mv_udc_pm_ops = {
2382	.suspend	= mv_udc_suspend,
2383	.resume		= mv_udc_resume,
2384};
2385#endif
2386
2387static void mv_udc_shutdown(struct platform_device *pdev)
2388{
2389	struct mv_udc *udc;
2390	u32 mode;
2391
2392	udc = platform_get_drvdata(pdev);
2393	/* reset controller mode to IDLE */
2394	mv_udc_enable(udc);
2395	mode = readl(&udc->op_regs->usbmode);
2396	mode &= ~3;
2397	writel(mode, &udc->op_regs->usbmode);
2398	mv_udc_disable(udc);
2399}
2400
2401static struct platform_driver udc_driver = {
2402	.probe		= mv_udc_probe,
2403	.remove		= mv_udc_remove,
2404	.shutdown	= mv_udc_shutdown,
2405	.driver		= {
2406		.owner	= THIS_MODULE,
2407		.name	= "mv-udc",
2408#ifdef CONFIG_PM
2409		.pm	= &mv_udc_pm_ops,
2410#endif
2411	},
2412};
2413
2414module_platform_driver(udc_driver);
2415MODULE_ALIAS("platform:mv-udc");
2416MODULE_DESCRIPTION(DRIVER_DESC);
2417MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
2418MODULE_VERSION(DRIVER_VERSION);
2419MODULE_LICENSE("GPL");
2420