[go: nahoru, domu]

1/*
2 *  Driver for AMBA serial ports
3 *
4 *  Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
5 *
6 *  Copyright 1999 ARM Limited
7 *  Copyright (C) 2000 Deep Blue Solutions Ltd.
8 *  Copyright (C) 2010 ST-Ericsson SA
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23 *
24 * This is a generic driver for ARM AMBA-type serial ports.  They
25 * have a lot of 16550-like features, but are not register compatible.
26 * Note that although they do have CTS, DCD and DSR inputs, they do
27 * not have an RI input, nor do they have DTR or RTS outputs.  If
28 * required, these have to be supplied via some other means (eg, GPIO)
29 * and hooked into this driver.
30 */
31
32
33#if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
34#define SUPPORT_SYSRQ
35#endif
36
37#include <linux/module.h>
38#include <linux/ioport.h>
39#include <linux/init.h>
40#include <linux/console.h>
41#include <linux/sysrq.h>
42#include <linux/device.h>
43#include <linux/tty.h>
44#include <linux/tty_flip.h>
45#include <linux/serial_core.h>
46#include <linux/serial.h>
47#include <linux/amba/bus.h>
48#include <linux/amba/serial.h>
49#include <linux/clk.h>
50#include <linux/slab.h>
51#include <linux/dmaengine.h>
52#include <linux/dma-mapping.h>
53#include <linux/scatterlist.h>
54#include <linux/delay.h>
55#include <linux/types.h>
56#include <linux/of.h>
57#include <linux/of_device.h>
58#include <linux/pinctrl/consumer.h>
59#include <linux/sizes.h>
60#include <linux/io.h>
61
62#define UART_NR			14
63
64#define SERIAL_AMBA_MAJOR	204
65#define SERIAL_AMBA_MINOR	64
66#define SERIAL_AMBA_NR		UART_NR
67
68#define AMBA_ISR_PASS_LIMIT	256
69
70#define UART_DR_ERROR		(UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
71#define UART_DUMMY_DR_RX	(1 << 16)
72
73/* There is by now at least one vendor with differing details, so handle it */
74struct vendor_data {
75	unsigned int		ifls;
76	unsigned int		lcrh_tx;
77	unsigned int		lcrh_rx;
78	bool			oversampling;
79	bool			dma_threshold;
80	bool			cts_event_workaround;
81
82	unsigned int (*get_fifosize)(struct amba_device *dev);
83};
84
85static unsigned int get_fifosize_arm(struct amba_device *dev)
86{
87	return amba_rev(dev) < 3 ? 16 : 32;
88}
89
90static struct vendor_data vendor_arm = {
91	.ifls			= UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
92	.lcrh_tx		= UART011_LCRH,
93	.lcrh_rx		= UART011_LCRH,
94	.oversampling		= false,
95	.dma_threshold		= false,
96	.cts_event_workaround	= false,
97	.get_fifosize		= get_fifosize_arm,
98};
99
100static unsigned int get_fifosize_st(struct amba_device *dev)
101{
102	return 64;
103}
104
105static struct vendor_data vendor_st = {
106	.ifls			= UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
107	.lcrh_tx		= ST_UART011_LCRH_TX,
108	.lcrh_rx		= ST_UART011_LCRH_RX,
109	.oversampling		= true,
110	.dma_threshold		= true,
111	.cts_event_workaround	= true,
112	.get_fifosize		= get_fifosize_st,
113};
114
115/* Deals with DMA transactions */
116
117struct pl011_sgbuf {
118	struct scatterlist sg;
119	char *buf;
120};
121
122struct pl011_dmarx_data {
123	struct dma_chan		*chan;
124	struct completion	complete;
125	bool			use_buf_b;
126	struct pl011_sgbuf	sgbuf_a;
127	struct pl011_sgbuf	sgbuf_b;
128	dma_cookie_t		cookie;
129	bool			running;
130	struct timer_list	timer;
131	unsigned int last_residue;
132	unsigned long last_jiffies;
133	bool auto_poll_rate;
134	unsigned int poll_rate;
135	unsigned int poll_timeout;
136};
137
138struct pl011_dmatx_data {
139	struct dma_chan		*chan;
140	struct scatterlist	sg;
141	char			*buf;
142	bool			queued;
143};
144
145/*
146 * We wrap our port structure around the generic uart_port.
147 */
148struct uart_amba_port {
149	struct uart_port	port;
150	struct clk		*clk;
151	const struct vendor_data *vendor;
152	unsigned int		dmacr;		/* dma control reg */
153	unsigned int		im;		/* interrupt mask */
154	unsigned int		old_status;
155	unsigned int		fifosize;	/* vendor-specific */
156	unsigned int		lcrh_tx;	/* vendor-specific */
157	unsigned int		lcrh_rx;	/* vendor-specific */
158	unsigned int		old_cr;		/* state during shutdown */
159	bool			autorts;
160	char			type[12];
161#ifdef CONFIG_DMA_ENGINE
162	/* DMA stuff */
163	bool			using_tx_dma;
164	bool			using_rx_dma;
165	struct pl011_dmarx_data dmarx;
166	struct pl011_dmatx_data	dmatx;
167#endif
168};
169
170/*
171 * Reads up to 256 characters from the FIFO or until it's empty and
172 * inserts them into the TTY layer. Returns the number of characters
173 * read from the FIFO.
174 */
175static int pl011_fifo_to_tty(struct uart_amba_port *uap)
176{
177	u16 status, ch;
178	unsigned int flag, max_count = 256;
179	int fifotaken = 0;
180
181	while (max_count--) {
182		status = readw(uap->port.membase + UART01x_FR);
183		if (status & UART01x_FR_RXFE)
184			break;
185
186		/* Take chars from the FIFO and update status */
187		ch = readw(uap->port.membase + UART01x_DR) |
188			UART_DUMMY_DR_RX;
189		flag = TTY_NORMAL;
190		uap->port.icount.rx++;
191		fifotaken++;
192
193		if (unlikely(ch & UART_DR_ERROR)) {
194			if (ch & UART011_DR_BE) {
195				ch &= ~(UART011_DR_FE | UART011_DR_PE);
196				uap->port.icount.brk++;
197				if (uart_handle_break(&uap->port))
198					continue;
199			} else if (ch & UART011_DR_PE)
200				uap->port.icount.parity++;
201			else if (ch & UART011_DR_FE)
202				uap->port.icount.frame++;
203			if (ch & UART011_DR_OE)
204				uap->port.icount.overrun++;
205
206			ch &= uap->port.read_status_mask;
207
208			if (ch & UART011_DR_BE)
209				flag = TTY_BREAK;
210			else if (ch & UART011_DR_PE)
211				flag = TTY_PARITY;
212			else if (ch & UART011_DR_FE)
213				flag = TTY_FRAME;
214		}
215
216		if (uart_handle_sysrq_char(&uap->port, ch & 255))
217			continue;
218
219		uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
220	}
221
222	return fifotaken;
223}
224
225
226/*
227 * All the DMA operation mode stuff goes inside this ifdef.
228 * This assumes that you have a generic DMA device interface,
229 * no custom DMA interfaces are supported.
230 */
231#ifdef CONFIG_DMA_ENGINE
232
233#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
234
235static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
236	enum dma_data_direction dir)
237{
238	dma_addr_t dma_addr;
239
240	sg->buf = dma_alloc_coherent(chan->device->dev,
241		PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
242	if (!sg->buf)
243		return -ENOMEM;
244
245	sg_init_table(&sg->sg, 1);
246	sg_set_page(&sg->sg, phys_to_page(dma_addr),
247		PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
248	sg_dma_address(&sg->sg) = dma_addr;
249
250	return 0;
251}
252
253static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
254	enum dma_data_direction dir)
255{
256	if (sg->buf) {
257		dma_free_coherent(chan->device->dev,
258			PL011_DMA_BUFFER_SIZE, sg->buf,
259			sg_dma_address(&sg->sg));
260	}
261}
262
263static void pl011_dma_probe_initcall(struct device *dev, struct uart_amba_port *uap)
264{
265	/* DMA is the sole user of the platform data right now */
266	struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
267	struct dma_slave_config tx_conf = {
268		.dst_addr = uap->port.mapbase + UART01x_DR,
269		.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
270		.direction = DMA_MEM_TO_DEV,
271		.dst_maxburst = uap->fifosize >> 1,
272		.device_fc = false,
273	};
274	struct dma_chan *chan;
275	dma_cap_mask_t mask;
276
277	chan = dma_request_slave_channel(dev, "tx");
278
279	if (!chan) {
280		/* We need platform data */
281		if (!plat || !plat->dma_filter) {
282			dev_info(uap->port.dev, "no DMA platform data\n");
283			return;
284		}
285
286		/* Try to acquire a generic DMA engine slave TX channel */
287		dma_cap_zero(mask);
288		dma_cap_set(DMA_SLAVE, mask);
289
290		chan = dma_request_channel(mask, plat->dma_filter,
291						plat->dma_tx_param);
292		if (!chan) {
293			dev_err(uap->port.dev, "no TX DMA channel!\n");
294			return;
295		}
296	}
297
298	dmaengine_slave_config(chan, &tx_conf);
299	uap->dmatx.chan = chan;
300
301	dev_info(uap->port.dev, "DMA channel TX %s\n",
302		 dma_chan_name(uap->dmatx.chan));
303
304	/* Optionally make use of an RX channel as well */
305	chan = dma_request_slave_channel(dev, "rx");
306
307	if (!chan && plat->dma_rx_param) {
308		chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
309
310		if (!chan) {
311			dev_err(uap->port.dev, "no RX DMA channel!\n");
312			return;
313		}
314	}
315
316	if (chan) {
317		struct dma_slave_config rx_conf = {
318			.src_addr = uap->port.mapbase + UART01x_DR,
319			.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
320			.direction = DMA_DEV_TO_MEM,
321			.src_maxburst = uap->fifosize >> 2,
322			.device_fc = false,
323		};
324
325		dmaengine_slave_config(chan, &rx_conf);
326		uap->dmarx.chan = chan;
327
328		if (plat && plat->dma_rx_poll_enable) {
329			/* Set poll rate if specified. */
330			if (plat->dma_rx_poll_rate) {
331				uap->dmarx.auto_poll_rate = false;
332				uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
333			} else {
334				/*
335				 * 100 ms defaults to poll rate if not
336				 * specified. This will be adjusted with
337				 * the baud rate at set_termios.
338				 */
339				uap->dmarx.auto_poll_rate = true;
340				uap->dmarx.poll_rate =  100;
341			}
342			/* 3 secs defaults poll_timeout if not specified. */
343			if (plat->dma_rx_poll_timeout)
344				uap->dmarx.poll_timeout =
345					plat->dma_rx_poll_timeout;
346			else
347				uap->dmarx.poll_timeout = 3000;
348		} else
349			uap->dmarx.auto_poll_rate = false;
350
351		dev_info(uap->port.dev, "DMA channel RX %s\n",
352			 dma_chan_name(uap->dmarx.chan));
353	}
354}
355
356#ifndef MODULE
357/*
358 * Stack up the UARTs and let the above initcall be done at device
359 * initcall time, because the serial driver is called as an arch
360 * initcall, and at this time the DMA subsystem is not yet registered.
361 * At this point the driver will switch over to using DMA where desired.
362 */
363struct dma_uap {
364	struct list_head node;
365	struct uart_amba_port *uap;
366	struct device *dev;
367};
368
369static LIST_HEAD(pl011_dma_uarts);
370
371static int __init pl011_dma_initcall(void)
372{
373	struct list_head *node, *tmp;
374
375	list_for_each_safe(node, tmp, &pl011_dma_uarts) {
376		struct dma_uap *dmau = list_entry(node, struct dma_uap, node);
377		pl011_dma_probe_initcall(dmau->dev, dmau->uap);
378		list_del(node);
379		kfree(dmau);
380	}
381	return 0;
382}
383
384device_initcall(pl011_dma_initcall);
385
386static void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap)
387{
388	struct dma_uap *dmau = kzalloc(sizeof(struct dma_uap), GFP_KERNEL);
389	if (dmau) {
390		dmau->uap = uap;
391		dmau->dev = dev;
392		list_add_tail(&dmau->node, &pl011_dma_uarts);
393	}
394}
395#else
396static void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap)
397{
398	pl011_dma_probe_initcall(dev, uap);
399}
400#endif
401
402static void pl011_dma_remove(struct uart_amba_port *uap)
403{
404	/* TODO: remove the initcall if it has not yet executed */
405	if (uap->dmatx.chan)
406		dma_release_channel(uap->dmatx.chan);
407	if (uap->dmarx.chan)
408		dma_release_channel(uap->dmarx.chan);
409}
410
411/* Forward declare this for the refill routine */
412static int pl011_dma_tx_refill(struct uart_amba_port *uap);
413
414/*
415 * The current DMA TX buffer has been sent.
416 * Try to queue up another DMA buffer.
417 */
418static void pl011_dma_tx_callback(void *data)
419{
420	struct uart_amba_port *uap = data;
421	struct pl011_dmatx_data *dmatx = &uap->dmatx;
422	unsigned long flags;
423	u16 dmacr;
424
425	spin_lock_irqsave(&uap->port.lock, flags);
426	if (uap->dmatx.queued)
427		dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
428			     DMA_TO_DEVICE);
429
430	dmacr = uap->dmacr;
431	uap->dmacr = dmacr & ~UART011_TXDMAE;
432	writew(uap->dmacr, uap->port.membase + UART011_DMACR);
433
434	/*
435	 * If TX DMA was disabled, it means that we've stopped the DMA for
436	 * some reason (eg, XOFF received, or we want to send an X-char.)
437	 *
438	 * Note: we need to be careful here of a potential race between DMA
439	 * and the rest of the driver - if the driver disables TX DMA while
440	 * a TX buffer completing, we must update the tx queued status to
441	 * get further refills (hence we check dmacr).
442	 */
443	if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
444	    uart_circ_empty(&uap->port.state->xmit)) {
445		uap->dmatx.queued = false;
446		spin_unlock_irqrestore(&uap->port.lock, flags);
447		return;
448	}
449
450	if (pl011_dma_tx_refill(uap) <= 0) {
451		/*
452		 * We didn't queue a DMA buffer for some reason, but we
453		 * have data pending to be sent.  Re-enable the TX IRQ.
454		 */
455		uap->im |= UART011_TXIM;
456		writew(uap->im, uap->port.membase + UART011_IMSC);
457	}
458	spin_unlock_irqrestore(&uap->port.lock, flags);
459}
460
461/*
462 * Try to refill the TX DMA buffer.
463 * Locking: called with port lock held and IRQs disabled.
464 * Returns:
465 *   1 if we queued up a TX DMA buffer.
466 *   0 if we didn't want to handle this by DMA
467 *  <0 on error
468 */
469static int pl011_dma_tx_refill(struct uart_amba_port *uap)
470{
471	struct pl011_dmatx_data *dmatx = &uap->dmatx;
472	struct dma_chan *chan = dmatx->chan;
473	struct dma_device *dma_dev = chan->device;
474	struct dma_async_tx_descriptor *desc;
475	struct circ_buf *xmit = &uap->port.state->xmit;
476	unsigned int count;
477
478	/*
479	 * Try to avoid the overhead involved in using DMA if the
480	 * transaction fits in the first half of the FIFO, by using
481	 * the standard interrupt handling.  This ensures that we
482	 * issue a uart_write_wakeup() at the appropriate time.
483	 */
484	count = uart_circ_chars_pending(xmit);
485	if (count < (uap->fifosize >> 1)) {
486		uap->dmatx.queued = false;
487		return 0;
488	}
489
490	/*
491	 * Bodge: don't send the last character by DMA, as this
492	 * will prevent XON from notifying us to restart DMA.
493	 */
494	count -= 1;
495
496	/* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
497	if (count > PL011_DMA_BUFFER_SIZE)
498		count = PL011_DMA_BUFFER_SIZE;
499
500	if (xmit->tail < xmit->head)
501		memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
502	else {
503		size_t first = UART_XMIT_SIZE - xmit->tail;
504		size_t second = xmit->head;
505
506		memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
507		if (second)
508			memcpy(&dmatx->buf[first], &xmit->buf[0], second);
509	}
510
511	dmatx->sg.length = count;
512
513	if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
514		uap->dmatx.queued = false;
515		dev_dbg(uap->port.dev, "unable to map TX DMA\n");
516		return -EBUSY;
517	}
518
519	desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
520					     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
521	if (!desc) {
522		dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
523		uap->dmatx.queued = false;
524		/*
525		 * If DMA cannot be used right now, we complete this
526		 * transaction via IRQ and let the TTY layer retry.
527		 */
528		dev_dbg(uap->port.dev, "TX DMA busy\n");
529		return -EBUSY;
530	}
531
532	/* Some data to go along to the callback */
533	desc->callback = pl011_dma_tx_callback;
534	desc->callback_param = uap;
535
536	/* All errors should happen at prepare time */
537	dmaengine_submit(desc);
538
539	/* Fire the DMA transaction */
540	dma_dev->device_issue_pending(chan);
541
542	uap->dmacr |= UART011_TXDMAE;
543	writew(uap->dmacr, uap->port.membase + UART011_DMACR);
544	uap->dmatx.queued = true;
545
546	/*
547	 * Now we know that DMA will fire, so advance the ring buffer
548	 * with the stuff we just dispatched.
549	 */
550	xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
551	uap->port.icount.tx += count;
552
553	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
554		uart_write_wakeup(&uap->port);
555
556	return 1;
557}
558
559/*
560 * We received a transmit interrupt without a pending X-char but with
561 * pending characters.
562 * Locking: called with port lock held and IRQs disabled.
563 * Returns:
564 *   false if we want to use PIO to transmit
565 *   true if we queued a DMA buffer
566 */
567static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
568{
569	if (!uap->using_tx_dma)
570		return false;
571
572	/*
573	 * If we already have a TX buffer queued, but received a
574	 * TX interrupt, it will be because we've just sent an X-char.
575	 * Ensure the TX DMA is enabled and the TX IRQ is disabled.
576	 */
577	if (uap->dmatx.queued) {
578		uap->dmacr |= UART011_TXDMAE;
579		writew(uap->dmacr, uap->port.membase + UART011_DMACR);
580		uap->im &= ~UART011_TXIM;
581		writew(uap->im, uap->port.membase + UART011_IMSC);
582		return true;
583	}
584
585	/*
586	 * We don't have a TX buffer queued, so try to queue one.
587	 * If we successfully queued a buffer, mask the TX IRQ.
588	 */
589	if (pl011_dma_tx_refill(uap) > 0) {
590		uap->im &= ~UART011_TXIM;
591		writew(uap->im, uap->port.membase + UART011_IMSC);
592		return true;
593	}
594	return false;
595}
596
597/*
598 * Stop the DMA transmit (eg, due to received XOFF).
599 * Locking: called with port lock held and IRQs disabled.
600 */
601static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
602{
603	if (uap->dmatx.queued) {
604		uap->dmacr &= ~UART011_TXDMAE;
605		writew(uap->dmacr, uap->port.membase + UART011_DMACR);
606	}
607}
608
609/*
610 * Try to start a DMA transmit, or in the case of an XON/OFF
611 * character queued for send, try to get that character out ASAP.
612 * Locking: called with port lock held and IRQs disabled.
613 * Returns:
614 *   false if we want the TX IRQ to be enabled
615 *   true if we have a buffer queued
616 */
617static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
618{
619	u16 dmacr;
620
621	if (!uap->using_tx_dma)
622		return false;
623
624	if (!uap->port.x_char) {
625		/* no X-char, try to push chars out in DMA mode */
626		bool ret = true;
627
628		if (!uap->dmatx.queued) {
629			if (pl011_dma_tx_refill(uap) > 0) {
630				uap->im &= ~UART011_TXIM;
631				ret = true;
632			} else {
633				uap->im |= UART011_TXIM;
634				ret = false;
635			}
636			writew(uap->im, uap->port.membase + UART011_IMSC);
637		} else if (!(uap->dmacr & UART011_TXDMAE)) {
638			uap->dmacr |= UART011_TXDMAE;
639			writew(uap->dmacr,
640				       uap->port.membase + UART011_DMACR);
641		}
642		return ret;
643	}
644
645	/*
646	 * We have an X-char to send.  Disable DMA to prevent it loading
647	 * the TX fifo, and then see if we can stuff it into the FIFO.
648	 */
649	dmacr = uap->dmacr;
650	uap->dmacr &= ~UART011_TXDMAE;
651	writew(uap->dmacr, uap->port.membase + UART011_DMACR);
652
653	if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) {
654		/*
655		 * No space in the FIFO, so enable the transmit interrupt
656		 * so we know when there is space.  Note that once we've
657		 * loaded the character, we should just re-enable DMA.
658		 */
659		return false;
660	}
661
662	writew(uap->port.x_char, uap->port.membase + UART01x_DR);
663	uap->port.icount.tx++;
664	uap->port.x_char = 0;
665
666	/* Success - restore the DMA state */
667	uap->dmacr = dmacr;
668	writew(dmacr, uap->port.membase + UART011_DMACR);
669
670	return true;
671}
672
673/*
674 * Flush the transmit buffer.
675 * Locking: called with port lock held and IRQs disabled.
676 */
677static void pl011_dma_flush_buffer(struct uart_port *port)
678__releases(&uap->port.lock)
679__acquires(&uap->port.lock)
680{
681	struct uart_amba_port *uap =
682	    container_of(port, struct uart_amba_port, port);
683
684	if (!uap->using_tx_dma)
685		return;
686
687	/* Avoid deadlock with the DMA engine callback */
688	spin_unlock(&uap->port.lock);
689	dmaengine_terminate_all(uap->dmatx.chan);
690	spin_lock(&uap->port.lock);
691	if (uap->dmatx.queued) {
692		dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
693			     DMA_TO_DEVICE);
694		uap->dmatx.queued = false;
695		uap->dmacr &= ~UART011_TXDMAE;
696		writew(uap->dmacr, uap->port.membase + UART011_DMACR);
697	}
698}
699
700static void pl011_dma_rx_callback(void *data);
701
702static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
703{
704	struct dma_chan *rxchan = uap->dmarx.chan;
705	struct pl011_dmarx_data *dmarx = &uap->dmarx;
706	struct dma_async_tx_descriptor *desc;
707	struct pl011_sgbuf *sgbuf;
708
709	if (!rxchan)
710		return -EIO;
711
712	/* Start the RX DMA job */
713	sgbuf = uap->dmarx.use_buf_b ?
714		&uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
715	desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
716					DMA_DEV_TO_MEM,
717					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
718	/*
719	 * If the DMA engine is busy and cannot prepare a
720	 * channel, no big deal, the driver will fall back
721	 * to interrupt mode as a result of this error code.
722	 */
723	if (!desc) {
724		uap->dmarx.running = false;
725		dmaengine_terminate_all(rxchan);
726		return -EBUSY;
727	}
728
729	/* Some data to go along to the callback */
730	desc->callback = pl011_dma_rx_callback;
731	desc->callback_param = uap;
732	dmarx->cookie = dmaengine_submit(desc);
733	dma_async_issue_pending(rxchan);
734
735	uap->dmacr |= UART011_RXDMAE;
736	writew(uap->dmacr, uap->port.membase + UART011_DMACR);
737	uap->dmarx.running = true;
738
739	uap->im &= ~UART011_RXIM;
740	writew(uap->im, uap->port.membase + UART011_IMSC);
741
742	return 0;
743}
744
745/*
746 * This is called when either the DMA job is complete, or
747 * the FIFO timeout interrupt occurred. This must be called
748 * with the port spinlock uap->port.lock held.
749 */
750static void pl011_dma_rx_chars(struct uart_amba_port *uap,
751			       u32 pending, bool use_buf_b,
752			       bool readfifo)
753{
754	struct tty_port *port = &uap->port.state->port;
755	struct pl011_sgbuf *sgbuf = use_buf_b ?
756		&uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
757	int dma_count = 0;
758	u32 fifotaken = 0; /* only used for vdbg() */
759
760	struct pl011_dmarx_data *dmarx = &uap->dmarx;
761	int dmataken = 0;
762
763	if (uap->dmarx.poll_rate) {
764		/* The data can be taken by polling */
765		dmataken = sgbuf->sg.length - dmarx->last_residue;
766		/* Recalculate the pending size */
767		if (pending >= dmataken)
768			pending -= dmataken;
769	}
770
771	/* Pick the remain data from the DMA */
772	if (pending) {
773
774		/*
775		 * First take all chars in the DMA pipe, then look in the FIFO.
776		 * Note that tty_insert_flip_buf() tries to take as many chars
777		 * as it can.
778		 */
779		dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
780				pending);
781
782		uap->port.icount.rx += dma_count;
783		if (dma_count < pending)
784			dev_warn(uap->port.dev,
785				 "couldn't insert all characters (TTY is full?)\n");
786	}
787
788	/* Reset the last_residue for Rx DMA poll */
789	if (uap->dmarx.poll_rate)
790		dmarx->last_residue = sgbuf->sg.length;
791
792	/*
793	 * Only continue with trying to read the FIFO if all DMA chars have
794	 * been taken first.
795	 */
796	if (dma_count == pending && readfifo) {
797		/* Clear any error flags */
798		writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
799		       uap->port.membase + UART011_ICR);
800
801		/*
802		 * If we read all the DMA'd characters, and we had an
803		 * incomplete buffer, that could be due to an rx error, or
804		 * maybe we just timed out. Read any pending chars and check
805		 * the error status.
806		 *
807		 * Error conditions will only occur in the FIFO, these will
808		 * trigger an immediate interrupt and stop the DMA job, so we
809		 * will always find the error in the FIFO, never in the DMA
810		 * buffer.
811		 */
812		fifotaken = pl011_fifo_to_tty(uap);
813	}
814
815	spin_unlock(&uap->port.lock);
816	dev_vdbg(uap->port.dev,
817		 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
818		 dma_count, fifotaken);
819	tty_flip_buffer_push(port);
820	spin_lock(&uap->port.lock);
821}
822
823static void pl011_dma_rx_irq(struct uart_amba_port *uap)
824{
825	struct pl011_dmarx_data *dmarx = &uap->dmarx;
826	struct dma_chan *rxchan = dmarx->chan;
827	struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
828		&dmarx->sgbuf_b : &dmarx->sgbuf_a;
829	size_t pending;
830	struct dma_tx_state state;
831	enum dma_status dmastat;
832
833	/*
834	 * Pause the transfer so we can trust the current counter,
835	 * do this before we pause the PL011 block, else we may
836	 * overflow the FIFO.
837	 */
838	if (dmaengine_pause(rxchan))
839		dev_err(uap->port.dev, "unable to pause DMA transfer\n");
840	dmastat = rxchan->device->device_tx_status(rxchan,
841						   dmarx->cookie, &state);
842	if (dmastat != DMA_PAUSED)
843		dev_err(uap->port.dev, "unable to pause DMA transfer\n");
844
845	/* Disable RX DMA - incoming data will wait in the FIFO */
846	uap->dmacr &= ~UART011_RXDMAE;
847	writew(uap->dmacr, uap->port.membase + UART011_DMACR);
848	uap->dmarx.running = false;
849
850	pending = sgbuf->sg.length - state.residue;
851	BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
852	/* Then we terminate the transfer - we now know our residue */
853	dmaengine_terminate_all(rxchan);
854
855	/*
856	 * This will take the chars we have so far and insert
857	 * into the framework.
858	 */
859	pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
860
861	/* Switch buffer & re-trigger DMA job */
862	dmarx->use_buf_b = !dmarx->use_buf_b;
863	if (pl011_dma_rx_trigger_dma(uap)) {
864		dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
865			"fall back to interrupt mode\n");
866		uap->im |= UART011_RXIM;
867		writew(uap->im, uap->port.membase + UART011_IMSC);
868	}
869}
870
871static void pl011_dma_rx_callback(void *data)
872{
873	struct uart_amba_port *uap = data;
874	struct pl011_dmarx_data *dmarx = &uap->dmarx;
875	struct dma_chan *rxchan = dmarx->chan;
876	bool lastbuf = dmarx->use_buf_b;
877	struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
878		&dmarx->sgbuf_b : &dmarx->sgbuf_a;
879	size_t pending;
880	struct dma_tx_state state;
881	int ret;
882
883	/*
884	 * This completion interrupt occurs typically when the
885	 * RX buffer is totally stuffed but no timeout has yet
886	 * occurred. When that happens, we just want the RX
887	 * routine to flush out the secondary DMA buffer while
888	 * we immediately trigger the next DMA job.
889	 */
890	spin_lock_irq(&uap->port.lock);
891	/*
892	 * Rx data can be taken by the UART interrupts during
893	 * the DMA irq handler. So we check the residue here.
894	 */
895	rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
896	pending = sgbuf->sg.length - state.residue;
897	BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
898	/* Then we terminate the transfer - we now know our residue */
899	dmaengine_terminate_all(rxchan);
900
901	uap->dmarx.running = false;
902	dmarx->use_buf_b = !lastbuf;
903	ret = pl011_dma_rx_trigger_dma(uap);
904
905	pl011_dma_rx_chars(uap, pending, lastbuf, false);
906	spin_unlock_irq(&uap->port.lock);
907	/*
908	 * Do this check after we picked the DMA chars so we don't
909	 * get some IRQ immediately from RX.
910	 */
911	if (ret) {
912		dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
913			"fall back to interrupt mode\n");
914		uap->im |= UART011_RXIM;
915		writew(uap->im, uap->port.membase + UART011_IMSC);
916	}
917}
918
919/*
920 * Stop accepting received characters, when we're shutting down or
921 * suspending this port.
922 * Locking: called with port lock held and IRQs disabled.
923 */
924static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
925{
926	/* FIXME.  Just disable the DMA enable */
927	uap->dmacr &= ~UART011_RXDMAE;
928	writew(uap->dmacr, uap->port.membase + UART011_DMACR);
929}
930
931/*
932 * Timer handler for Rx DMA polling.
933 * Every polling, It checks the residue in the dma buffer and transfer
934 * data to the tty. Also, last_residue is updated for the next polling.
935 */
936static void pl011_dma_rx_poll(unsigned long args)
937{
938	struct uart_amba_port *uap = (struct uart_amba_port *)args;
939	struct tty_port *port = &uap->port.state->port;
940	struct pl011_dmarx_data *dmarx = &uap->dmarx;
941	struct dma_chan *rxchan = uap->dmarx.chan;
942	unsigned long flags = 0;
943	unsigned int dmataken = 0;
944	unsigned int size = 0;
945	struct pl011_sgbuf *sgbuf;
946	int dma_count;
947	struct dma_tx_state state;
948
949	sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
950	rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
951	if (likely(state.residue < dmarx->last_residue)) {
952		dmataken = sgbuf->sg.length - dmarx->last_residue;
953		size = dmarx->last_residue - state.residue;
954		dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
955				size);
956		if (dma_count == size)
957			dmarx->last_residue =  state.residue;
958		dmarx->last_jiffies = jiffies;
959	}
960	tty_flip_buffer_push(port);
961
962	/*
963	 * If no data is received in poll_timeout, the driver will fall back
964	 * to interrupt mode. We will retrigger DMA at the first interrupt.
965	 */
966	if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
967			> uap->dmarx.poll_timeout) {
968
969		spin_lock_irqsave(&uap->port.lock, flags);
970		pl011_dma_rx_stop(uap);
971		uap->im |= UART011_RXIM;
972		writew(uap->im, uap->port.membase + UART011_IMSC);
973		spin_unlock_irqrestore(&uap->port.lock, flags);
974
975		uap->dmarx.running = false;
976		dmaengine_terminate_all(rxchan);
977		del_timer(&uap->dmarx.timer);
978	} else {
979		mod_timer(&uap->dmarx.timer,
980			jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
981	}
982}
983
984static void pl011_dma_startup(struct uart_amba_port *uap)
985{
986	int ret;
987
988	if (!uap->dmatx.chan)
989		return;
990
991	uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL);
992	if (!uap->dmatx.buf) {
993		dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
994		uap->port.fifosize = uap->fifosize;
995		return;
996	}
997
998	sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
999
1000	/* The DMA buffer is now the FIFO the TTY subsystem can use */
1001	uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
1002	uap->using_tx_dma = true;
1003
1004	if (!uap->dmarx.chan)
1005		goto skip_rx;
1006
1007	/* Allocate and map DMA RX buffers */
1008	ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1009			       DMA_FROM_DEVICE);
1010	if (ret) {
1011		dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1012			"RX buffer A", ret);
1013		goto skip_rx;
1014	}
1015
1016	ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
1017			       DMA_FROM_DEVICE);
1018	if (ret) {
1019		dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1020			"RX buffer B", ret);
1021		pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1022				 DMA_FROM_DEVICE);
1023		goto skip_rx;
1024	}
1025
1026	uap->using_rx_dma = true;
1027
1028skip_rx:
1029	/* Turn on DMA error (RX/TX will be enabled on demand) */
1030	uap->dmacr |= UART011_DMAONERR;
1031	writew(uap->dmacr, uap->port.membase + UART011_DMACR);
1032
1033	/*
1034	 * ST Micro variants has some specific dma burst threshold
1035	 * compensation. Set this to 16 bytes, so burst will only
1036	 * be issued above/below 16 bytes.
1037	 */
1038	if (uap->vendor->dma_threshold)
1039		writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
1040			       uap->port.membase + ST_UART011_DMAWM);
1041
1042	if (uap->using_rx_dma) {
1043		if (pl011_dma_rx_trigger_dma(uap))
1044			dev_dbg(uap->port.dev, "could not trigger initial "
1045				"RX DMA job, fall back to interrupt mode\n");
1046		if (uap->dmarx.poll_rate) {
1047			init_timer(&(uap->dmarx.timer));
1048			uap->dmarx.timer.function = pl011_dma_rx_poll;
1049			uap->dmarx.timer.data = (unsigned long)uap;
1050			mod_timer(&uap->dmarx.timer,
1051				jiffies +
1052				msecs_to_jiffies(uap->dmarx.poll_rate));
1053			uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1054			uap->dmarx.last_jiffies = jiffies;
1055		}
1056	}
1057}
1058
1059static void pl011_dma_shutdown(struct uart_amba_port *uap)
1060{
1061	if (!(uap->using_tx_dma || uap->using_rx_dma))
1062		return;
1063
1064	/* Disable RX and TX DMA */
1065	while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
1066		barrier();
1067
1068	spin_lock_irq(&uap->port.lock);
1069	uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
1070	writew(uap->dmacr, uap->port.membase + UART011_DMACR);
1071	spin_unlock_irq(&uap->port.lock);
1072
1073	if (uap->using_tx_dma) {
1074		/* In theory, this should already be done by pl011_dma_flush_buffer */
1075		dmaengine_terminate_all(uap->dmatx.chan);
1076		if (uap->dmatx.queued) {
1077			dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
1078				     DMA_TO_DEVICE);
1079			uap->dmatx.queued = false;
1080		}
1081
1082		kfree(uap->dmatx.buf);
1083		uap->using_tx_dma = false;
1084	}
1085
1086	if (uap->using_rx_dma) {
1087		dmaengine_terminate_all(uap->dmarx.chan);
1088		/* Clean up the RX DMA */
1089		pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
1090		pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
1091		if (uap->dmarx.poll_rate)
1092			del_timer_sync(&uap->dmarx.timer);
1093		uap->using_rx_dma = false;
1094	}
1095}
1096
1097static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1098{
1099	return uap->using_rx_dma;
1100}
1101
1102static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1103{
1104	return uap->using_rx_dma && uap->dmarx.running;
1105}
1106
1107#else
1108/* Blank functions if the DMA engine is not available */
1109static inline void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap)
1110{
1111}
1112
1113static inline void pl011_dma_remove(struct uart_amba_port *uap)
1114{
1115}
1116
1117static inline void pl011_dma_startup(struct uart_amba_port *uap)
1118{
1119}
1120
1121static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
1122{
1123}
1124
1125static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
1126{
1127	return false;
1128}
1129
1130static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
1131{
1132}
1133
1134static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
1135{
1136	return false;
1137}
1138
1139static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
1140{
1141}
1142
1143static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1144{
1145}
1146
1147static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
1148{
1149	return -EIO;
1150}
1151
1152static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1153{
1154	return false;
1155}
1156
1157static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1158{
1159	return false;
1160}
1161
1162#define pl011_dma_flush_buffer	NULL
1163#endif
1164
1165static void pl011_stop_tx(struct uart_port *port)
1166{
1167	struct uart_amba_port *uap =
1168	    container_of(port, struct uart_amba_port, port);
1169
1170	uap->im &= ~UART011_TXIM;
1171	writew(uap->im, uap->port.membase + UART011_IMSC);
1172	pl011_dma_tx_stop(uap);
1173}
1174
1175static void pl011_start_tx(struct uart_port *port)
1176{
1177	struct uart_amba_port *uap =
1178	    container_of(port, struct uart_amba_port, port);
1179
1180	if (!pl011_dma_tx_start(uap)) {
1181		uap->im |= UART011_TXIM;
1182		writew(uap->im, uap->port.membase + UART011_IMSC);
1183	}
1184}
1185
1186static void pl011_stop_rx(struct uart_port *port)
1187{
1188	struct uart_amba_port *uap =
1189	    container_of(port, struct uart_amba_port, port);
1190
1191	uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
1192		     UART011_PEIM|UART011_BEIM|UART011_OEIM);
1193	writew(uap->im, uap->port.membase + UART011_IMSC);
1194
1195	pl011_dma_rx_stop(uap);
1196}
1197
1198static void pl011_enable_ms(struct uart_port *port)
1199{
1200	struct uart_amba_port *uap =
1201	    container_of(port, struct uart_amba_port, port);
1202
1203	uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
1204	writew(uap->im, uap->port.membase + UART011_IMSC);
1205}
1206
1207static void pl011_rx_chars(struct uart_amba_port *uap)
1208__releases(&uap->port.lock)
1209__acquires(&uap->port.lock)
1210{
1211	pl011_fifo_to_tty(uap);
1212
1213	spin_unlock(&uap->port.lock);
1214	tty_flip_buffer_push(&uap->port.state->port);
1215	/*
1216	 * If we were temporarily out of DMA mode for a while,
1217	 * attempt to switch back to DMA mode again.
1218	 */
1219	if (pl011_dma_rx_available(uap)) {
1220		if (pl011_dma_rx_trigger_dma(uap)) {
1221			dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1222				"fall back to interrupt mode again\n");
1223			uap->im |= UART011_RXIM;
1224			writew(uap->im, uap->port.membase + UART011_IMSC);
1225		} else {
1226#ifdef CONFIG_DMA_ENGINE
1227			/* Start Rx DMA poll */
1228			if (uap->dmarx.poll_rate) {
1229				uap->dmarx.last_jiffies = jiffies;
1230				uap->dmarx.last_residue	= PL011_DMA_BUFFER_SIZE;
1231				mod_timer(&uap->dmarx.timer,
1232					jiffies +
1233					msecs_to_jiffies(uap->dmarx.poll_rate));
1234			}
1235#endif
1236		}
1237	}
1238	spin_lock(&uap->port.lock);
1239}
1240
1241static void pl011_tx_chars(struct uart_amba_port *uap)
1242{
1243	struct circ_buf *xmit = &uap->port.state->xmit;
1244	int count;
1245
1246	if (uap->port.x_char) {
1247		writew(uap->port.x_char, uap->port.membase + UART01x_DR);
1248		uap->port.icount.tx++;
1249		uap->port.x_char = 0;
1250		return;
1251	}
1252	if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
1253		pl011_stop_tx(&uap->port);
1254		return;
1255	}
1256
1257	/* If we are using DMA mode, try to send some characters. */
1258	if (pl011_dma_tx_irq(uap))
1259		return;
1260
1261	count = uap->fifosize >> 1;
1262	do {
1263		writew(xmit->buf[xmit->tail], uap->port.membase + UART01x_DR);
1264		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
1265		uap->port.icount.tx++;
1266		if (uart_circ_empty(xmit))
1267			break;
1268	} while (--count > 0);
1269
1270	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1271		uart_write_wakeup(&uap->port);
1272
1273	if (uart_circ_empty(xmit))
1274		pl011_stop_tx(&uap->port);
1275}
1276
1277static void pl011_modem_status(struct uart_amba_port *uap)
1278{
1279	unsigned int status, delta;
1280
1281	status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
1282
1283	delta = status ^ uap->old_status;
1284	uap->old_status = status;
1285
1286	if (!delta)
1287		return;
1288
1289	if (delta & UART01x_FR_DCD)
1290		uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
1291
1292	if (delta & UART01x_FR_DSR)
1293		uap->port.icount.dsr++;
1294
1295	if (delta & UART01x_FR_CTS)
1296		uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS);
1297
1298	wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
1299}
1300
1301static irqreturn_t pl011_int(int irq, void *dev_id)
1302{
1303	struct uart_amba_port *uap = dev_id;
1304	unsigned long flags;
1305	unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1306	int handled = 0;
1307	unsigned int dummy_read;
1308
1309	spin_lock_irqsave(&uap->port.lock, flags);
1310	status = readw(uap->port.membase + UART011_MIS);
1311	if (status) {
1312		do {
1313			if (uap->vendor->cts_event_workaround) {
1314				/* workaround to make sure that all bits are unlocked.. */
1315				writew(0x00, uap->port.membase + UART011_ICR);
1316
1317				/*
1318				 * WA: introduce 26ns(1 uart clk) delay before W1C;
1319				 * single apb access will incur 2 pclk(133.12Mhz) delay,
1320				 * so add 2 dummy reads
1321				 */
1322				dummy_read = readw(uap->port.membase + UART011_ICR);
1323				dummy_read = readw(uap->port.membase + UART011_ICR);
1324			}
1325
1326			writew(status & ~(UART011_TXIS|UART011_RTIS|
1327					  UART011_RXIS),
1328			       uap->port.membase + UART011_ICR);
1329
1330			if (status & (UART011_RTIS|UART011_RXIS)) {
1331				if (pl011_dma_rx_running(uap))
1332					pl011_dma_rx_irq(uap);
1333				else
1334					pl011_rx_chars(uap);
1335			}
1336			if (status & (UART011_DSRMIS|UART011_DCDMIS|
1337				      UART011_CTSMIS|UART011_RIMIS))
1338				pl011_modem_status(uap);
1339			if (status & UART011_TXIS)
1340				pl011_tx_chars(uap);
1341
1342			if (pass_counter-- == 0)
1343				break;
1344
1345			status = readw(uap->port.membase + UART011_MIS);
1346		} while (status != 0);
1347		handled = 1;
1348	}
1349
1350	spin_unlock_irqrestore(&uap->port.lock, flags);
1351
1352	return IRQ_RETVAL(handled);
1353}
1354
1355static unsigned int pl011_tx_empty(struct uart_port *port)
1356{
1357	struct uart_amba_port *uap =
1358	    container_of(port, struct uart_amba_port, port);
1359	unsigned int status = readw(uap->port.membase + UART01x_FR);
1360	return status & (UART01x_FR_BUSY|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT;
1361}
1362
1363static unsigned int pl011_get_mctrl(struct uart_port *port)
1364{
1365	struct uart_amba_port *uap =
1366	    container_of(port, struct uart_amba_port, port);
1367	unsigned int result = 0;
1368	unsigned int status = readw(uap->port.membase + UART01x_FR);
1369
1370#define TIOCMBIT(uartbit, tiocmbit)	\
1371	if (status & uartbit)		\
1372		result |= tiocmbit
1373
1374	TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
1375	TIOCMBIT(UART01x_FR_DSR, TIOCM_DSR);
1376	TIOCMBIT(UART01x_FR_CTS, TIOCM_CTS);
1377	TIOCMBIT(UART011_FR_RI, TIOCM_RNG);
1378#undef TIOCMBIT
1379	return result;
1380}
1381
1382static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
1383{
1384	struct uart_amba_port *uap =
1385	    container_of(port, struct uart_amba_port, port);
1386	unsigned int cr;
1387
1388	cr = readw(uap->port.membase + UART011_CR);
1389
1390#define	TIOCMBIT(tiocmbit, uartbit)		\
1391	if (mctrl & tiocmbit)		\
1392		cr |= uartbit;		\
1393	else				\
1394		cr &= ~uartbit
1395
1396	TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
1397	TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
1398	TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
1399	TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
1400	TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
1401
1402	if (uap->autorts) {
1403		/* We need to disable auto-RTS if we want to turn RTS off */
1404		TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
1405	}
1406#undef TIOCMBIT
1407
1408	writew(cr, uap->port.membase + UART011_CR);
1409}
1410
1411static void pl011_break_ctl(struct uart_port *port, int break_state)
1412{
1413	struct uart_amba_port *uap =
1414	    container_of(port, struct uart_amba_port, port);
1415	unsigned long flags;
1416	unsigned int lcr_h;
1417
1418	spin_lock_irqsave(&uap->port.lock, flags);
1419	lcr_h = readw(uap->port.membase + uap->lcrh_tx);
1420	if (break_state == -1)
1421		lcr_h |= UART01x_LCRH_BRK;
1422	else
1423		lcr_h &= ~UART01x_LCRH_BRK;
1424	writew(lcr_h, uap->port.membase + uap->lcrh_tx);
1425	spin_unlock_irqrestore(&uap->port.lock, flags);
1426}
1427
1428#ifdef CONFIG_CONSOLE_POLL
1429
1430static void pl011_quiesce_irqs(struct uart_port *port)
1431{
1432	struct uart_amba_port *uap =
1433	    container_of(port, struct uart_amba_port, port);
1434	unsigned char __iomem *regs = uap->port.membase;
1435
1436	writew(readw(regs + UART011_MIS), regs + UART011_ICR);
1437	/*
1438	 * There is no way to clear TXIM as this is "ready to transmit IRQ", so
1439	 * we simply mask it. start_tx() will unmask it.
1440	 *
1441	 * Note we can race with start_tx(), and if the race happens, the
1442	 * polling user might get another interrupt just after we clear it.
1443	 * But it should be OK and can happen even w/o the race, e.g.
1444	 * controller immediately got some new data and raised the IRQ.
1445	 *
1446	 * And whoever uses polling routines assumes that it manages the device
1447	 * (including tx queue), so we're also fine with start_tx()'s caller
1448	 * side.
1449	 */
1450	writew(readw(regs + UART011_IMSC) & ~UART011_TXIM, regs + UART011_IMSC);
1451}
1452
1453static int pl011_get_poll_char(struct uart_port *port)
1454{
1455	struct uart_amba_port *uap =
1456	    container_of(port, struct uart_amba_port, port);
1457	unsigned int status;
1458
1459	/*
1460	 * The caller might need IRQs lowered, e.g. if used with KDB NMI
1461	 * debugger.
1462	 */
1463	pl011_quiesce_irqs(port);
1464
1465	status = readw(uap->port.membase + UART01x_FR);
1466	if (status & UART01x_FR_RXFE)
1467		return NO_POLL_CHAR;
1468
1469	return readw(uap->port.membase + UART01x_DR);
1470}
1471
1472static void pl011_put_poll_char(struct uart_port *port,
1473			 unsigned char ch)
1474{
1475	struct uart_amba_port *uap =
1476	    container_of(port, struct uart_amba_port, port);
1477
1478	while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
1479		barrier();
1480
1481	writew(ch, uap->port.membase + UART01x_DR);
1482}
1483
1484#endif /* CONFIG_CONSOLE_POLL */
1485
1486static int pl011_hwinit(struct uart_port *port)
1487{
1488	struct uart_amba_port *uap =
1489	    container_of(port, struct uart_amba_port, port);
1490	int retval;
1491
1492	/* Optionaly enable pins to be muxed in and configured */
1493	pinctrl_pm_select_default_state(port->dev);
1494
1495	/*
1496	 * Try to enable the clock producer.
1497	 */
1498	retval = clk_prepare_enable(uap->clk);
1499	if (retval)
1500		return retval;
1501
1502	uap->port.uartclk = clk_get_rate(uap->clk);
1503
1504	/* Clear pending error and receive interrupts */
1505	writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS |
1506	       UART011_RTIS | UART011_RXIS, uap->port.membase + UART011_ICR);
1507
1508	/*
1509	 * Save interrupts enable mask, and enable RX interrupts in case if
1510	 * the interrupt is used for NMI entry.
1511	 */
1512	uap->im = readw(uap->port.membase + UART011_IMSC);
1513	writew(UART011_RTIM | UART011_RXIM, uap->port.membase + UART011_IMSC);
1514
1515	if (dev_get_platdata(uap->port.dev)) {
1516		struct amba_pl011_data *plat;
1517
1518		plat = dev_get_platdata(uap->port.dev);
1519		if (plat->init)
1520			plat->init();
1521	}
1522	return 0;
1523}
1524
1525static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
1526{
1527	writew(lcr_h, uap->port.membase + uap->lcrh_rx);
1528	if (uap->lcrh_rx != uap->lcrh_tx) {
1529		int i;
1530		/*
1531		 * Wait 10 PCLKs before writing LCRH_TX register,
1532		 * to get this delay write read only register 10 times
1533		 */
1534		for (i = 0; i < 10; ++i)
1535			writew(0xff, uap->port.membase + UART011_MIS);
1536		writew(lcr_h, uap->port.membase + uap->lcrh_tx);
1537	}
1538}
1539
1540static int pl011_startup(struct uart_port *port)
1541{
1542	struct uart_amba_port *uap =
1543	    container_of(port, struct uart_amba_port, port);
1544	unsigned int cr, lcr_h, fbrd, ibrd;
1545	int retval;
1546
1547	retval = pl011_hwinit(port);
1548	if (retval)
1549		goto clk_dis;
1550
1551	writew(uap->im, uap->port.membase + UART011_IMSC);
1552
1553	/*
1554	 * Allocate the IRQ
1555	 */
1556	retval = request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap);
1557	if (retval)
1558		goto clk_dis;
1559
1560	writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS);
1561
1562	/*
1563	 * Provoke TX FIFO interrupt into asserting. Taking care to preserve
1564	 * baud rate and data format specified by FBRD, IBRD and LCRH as the
1565	 * UART may already be in use as a console.
1566	 */
1567	spin_lock_irq(&uap->port.lock);
1568
1569	fbrd = readw(uap->port.membase + UART011_FBRD);
1570	ibrd = readw(uap->port.membase + UART011_IBRD);
1571	lcr_h = readw(uap->port.membase + uap->lcrh_rx);
1572
1573	cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE;
1574	writew(cr, uap->port.membase + UART011_CR);
1575	writew(0, uap->port.membase + UART011_FBRD);
1576	writew(1, uap->port.membase + UART011_IBRD);
1577	pl011_write_lcr_h(uap, 0);
1578	writew(0, uap->port.membase + UART01x_DR);
1579	while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
1580		barrier();
1581
1582	writew(fbrd, uap->port.membase + UART011_FBRD);
1583	writew(ibrd, uap->port.membase + UART011_IBRD);
1584	pl011_write_lcr_h(uap, lcr_h);
1585
1586	/* restore RTS and DTR */
1587	cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
1588	cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
1589	writew(cr, uap->port.membase + UART011_CR);
1590
1591	spin_unlock_irq(&uap->port.lock);
1592
1593	/*
1594	 * initialise the old status of the modem signals
1595	 */
1596	uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
1597
1598	/* Startup DMA */
1599	pl011_dma_startup(uap);
1600
1601	/*
1602	 * Finally, enable interrupts, only timeouts when using DMA
1603	 * if initial RX DMA job failed, start in interrupt mode
1604	 * as well.
1605	 */
1606	spin_lock_irq(&uap->port.lock);
1607	/* Clear out any spuriously appearing RX interrupts */
1608	 writew(UART011_RTIS | UART011_RXIS,
1609		uap->port.membase + UART011_ICR);
1610	uap->im = UART011_RTIM;
1611	if (!pl011_dma_rx_running(uap))
1612		uap->im |= UART011_RXIM;
1613	writew(uap->im, uap->port.membase + UART011_IMSC);
1614	spin_unlock_irq(&uap->port.lock);
1615
1616	return 0;
1617
1618 clk_dis:
1619	clk_disable_unprepare(uap->clk);
1620	return retval;
1621}
1622
1623static void pl011_shutdown_channel(struct uart_amba_port *uap,
1624					unsigned int lcrh)
1625{
1626      unsigned long val;
1627
1628      val = readw(uap->port.membase + lcrh);
1629      val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
1630      writew(val, uap->port.membase + lcrh);
1631}
1632
1633static void pl011_shutdown(struct uart_port *port)
1634{
1635	struct uart_amba_port *uap =
1636	    container_of(port, struct uart_amba_port, port);
1637	unsigned int cr;
1638
1639	/*
1640	 * disable all interrupts
1641	 */
1642	spin_lock_irq(&uap->port.lock);
1643	uap->im = 0;
1644	writew(uap->im, uap->port.membase + UART011_IMSC);
1645	writew(0xffff, uap->port.membase + UART011_ICR);
1646	spin_unlock_irq(&uap->port.lock);
1647
1648	pl011_dma_shutdown(uap);
1649
1650	/*
1651	 * Free the interrupt
1652	 */
1653	free_irq(uap->port.irq, uap);
1654
1655	/*
1656	 * disable the port
1657	 * disable the port. It should not disable RTS and DTR.
1658	 * Also RTS and DTR state should be preserved to restore
1659	 * it during startup().
1660	 */
1661	uap->autorts = false;
1662	spin_lock_irq(&uap->port.lock);
1663	cr = readw(uap->port.membase + UART011_CR);
1664	uap->old_cr = cr;
1665	cr &= UART011_CR_RTS | UART011_CR_DTR;
1666	cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1667	writew(cr, uap->port.membase + UART011_CR);
1668	spin_unlock_irq(&uap->port.lock);
1669
1670	/*
1671	 * disable break condition and fifos
1672	 */
1673	pl011_shutdown_channel(uap, uap->lcrh_rx);
1674	if (uap->lcrh_rx != uap->lcrh_tx)
1675		pl011_shutdown_channel(uap, uap->lcrh_tx);
1676
1677	/*
1678	 * Shut down the clock producer
1679	 */
1680	clk_disable_unprepare(uap->clk);
1681	/* Optionally let pins go into sleep states */
1682	pinctrl_pm_select_sleep_state(port->dev);
1683
1684	if (dev_get_platdata(uap->port.dev)) {
1685		struct amba_pl011_data *plat;
1686
1687		plat = dev_get_platdata(uap->port.dev);
1688		if (plat->exit)
1689			plat->exit();
1690	}
1691
1692}
1693
1694static void
1695pl011_set_termios(struct uart_port *port, struct ktermios *termios,
1696		     struct ktermios *old)
1697{
1698	struct uart_amba_port *uap =
1699	    container_of(port, struct uart_amba_port, port);
1700	unsigned int lcr_h, old_cr;
1701	unsigned long flags;
1702	unsigned int baud, quot, clkdiv;
1703
1704	if (uap->vendor->oversampling)
1705		clkdiv = 8;
1706	else
1707		clkdiv = 16;
1708
1709	/*
1710	 * Ask the core to calculate the divisor for us.
1711	 */
1712	baud = uart_get_baud_rate(port, termios, old, 0,
1713				  port->uartclk / clkdiv);
1714#ifdef CONFIG_DMA_ENGINE
1715	/*
1716	 * Adjust RX DMA polling rate with baud rate if not specified.
1717	 */
1718	if (uap->dmarx.auto_poll_rate)
1719		uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
1720#endif
1721
1722	if (baud > port->uartclk/16)
1723		quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
1724	else
1725		quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
1726
1727	switch (termios->c_cflag & CSIZE) {
1728	case CS5:
1729		lcr_h = UART01x_LCRH_WLEN_5;
1730		break;
1731	case CS6:
1732		lcr_h = UART01x_LCRH_WLEN_6;
1733		break;
1734	case CS7:
1735		lcr_h = UART01x_LCRH_WLEN_7;
1736		break;
1737	default: // CS8
1738		lcr_h = UART01x_LCRH_WLEN_8;
1739		break;
1740	}
1741	if (termios->c_cflag & CSTOPB)
1742		lcr_h |= UART01x_LCRH_STP2;
1743	if (termios->c_cflag & PARENB) {
1744		lcr_h |= UART01x_LCRH_PEN;
1745		if (!(termios->c_cflag & PARODD))
1746			lcr_h |= UART01x_LCRH_EPS;
1747	}
1748	if (uap->fifosize > 1)
1749		lcr_h |= UART01x_LCRH_FEN;
1750
1751	spin_lock_irqsave(&port->lock, flags);
1752
1753	/*
1754	 * Update the per-port timeout.
1755	 */
1756	uart_update_timeout(port, termios->c_cflag, baud);
1757
1758	port->read_status_mask = UART011_DR_OE | 255;
1759	if (termios->c_iflag & INPCK)
1760		port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
1761	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1762		port->read_status_mask |= UART011_DR_BE;
1763
1764	/*
1765	 * Characters to ignore
1766	 */
1767	port->ignore_status_mask = 0;
1768	if (termios->c_iflag & IGNPAR)
1769		port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
1770	if (termios->c_iflag & IGNBRK) {
1771		port->ignore_status_mask |= UART011_DR_BE;
1772		/*
1773		 * If we're ignoring parity and break indicators,
1774		 * ignore overruns too (for real raw support).
1775		 */
1776		if (termios->c_iflag & IGNPAR)
1777			port->ignore_status_mask |= UART011_DR_OE;
1778	}
1779
1780	/*
1781	 * Ignore all characters if CREAD is not set.
1782	 */
1783	if ((termios->c_cflag & CREAD) == 0)
1784		port->ignore_status_mask |= UART_DUMMY_DR_RX;
1785
1786	if (UART_ENABLE_MS(port, termios->c_cflag))
1787		pl011_enable_ms(port);
1788
1789	/* first, disable everything */
1790	old_cr = readw(port->membase + UART011_CR);
1791	writew(0, port->membase + UART011_CR);
1792
1793	if (termios->c_cflag & CRTSCTS) {
1794		if (old_cr & UART011_CR_RTS)
1795			old_cr |= UART011_CR_RTSEN;
1796
1797		old_cr |= UART011_CR_CTSEN;
1798		uap->autorts = true;
1799	} else {
1800		old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
1801		uap->autorts = false;
1802	}
1803
1804	if (uap->vendor->oversampling) {
1805		if (baud > port->uartclk / 16)
1806			old_cr |= ST_UART011_CR_OVSFACT;
1807		else
1808			old_cr &= ~ST_UART011_CR_OVSFACT;
1809	}
1810
1811	/*
1812	 * Workaround for the ST Micro oversampling variants to
1813	 * increase the bitrate slightly, by lowering the divisor,
1814	 * to avoid delayed sampling of start bit at high speeds,
1815	 * else we see data corruption.
1816	 */
1817	if (uap->vendor->oversampling) {
1818		if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
1819			quot -= 1;
1820		else if ((baud > 3250000) && (quot > 2))
1821			quot -= 2;
1822	}
1823	/* Set baud rate */
1824	writew(quot & 0x3f, port->membase + UART011_FBRD);
1825	writew(quot >> 6, port->membase + UART011_IBRD);
1826
1827	/*
1828	 * ----------v----------v----------v----------v-----
1829	 * NOTE: lcrh_tx and lcrh_rx MUST BE WRITTEN AFTER
1830	 * UART011_FBRD & UART011_IBRD.
1831	 * ----------^----------^----------^----------^-----
1832	 */
1833	pl011_write_lcr_h(uap, lcr_h);
1834	writew(old_cr, port->membase + UART011_CR);
1835
1836	spin_unlock_irqrestore(&port->lock, flags);
1837}
1838
1839static const char *pl011_type(struct uart_port *port)
1840{
1841	struct uart_amba_port *uap =
1842	    container_of(port, struct uart_amba_port, port);
1843	return uap->port.type == PORT_AMBA ? uap->type : NULL;
1844}
1845
1846/*
1847 * Release the memory region(s) being used by 'port'
1848 */
1849static void pl011_release_port(struct uart_port *port)
1850{
1851	release_mem_region(port->mapbase, SZ_4K);
1852}
1853
1854/*
1855 * Request the memory region(s) being used by 'port'
1856 */
1857static int pl011_request_port(struct uart_port *port)
1858{
1859	return request_mem_region(port->mapbase, SZ_4K, "uart-pl011")
1860			!= NULL ? 0 : -EBUSY;
1861}
1862
1863/*
1864 * Configure/autoconfigure the port.
1865 */
1866static void pl011_config_port(struct uart_port *port, int flags)
1867{
1868	if (flags & UART_CONFIG_TYPE) {
1869		port->type = PORT_AMBA;
1870		pl011_request_port(port);
1871	}
1872}
1873
1874/*
1875 * verify the new serial_struct (for TIOCSSERIAL).
1876 */
1877static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
1878{
1879	int ret = 0;
1880	if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
1881		ret = -EINVAL;
1882	if (ser->irq < 0 || ser->irq >= nr_irqs)
1883		ret = -EINVAL;
1884	if (ser->baud_base < 9600)
1885		ret = -EINVAL;
1886	return ret;
1887}
1888
1889static struct uart_ops amba_pl011_pops = {
1890	.tx_empty	= pl011_tx_empty,
1891	.set_mctrl	= pl011_set_mctrl,
1892	.get_mctrl	= pl011_get_mctrl,
1893	.stop_tx	= pl011_stop_tx,
1894	.start_tx	= pl011_start_tx,
1895	.stop_rx	= pl011_stop_rx,
1896	.enable_ms	= pl011_enable_ms,
1897	.break_ctl	= pl011_break_ctl,
1898	.startup	= pl011_startup,
1899	.shutdown	= pl011_shutdown,
1900	.flush_buffer	= pl011_dma_flush_buffer,
1901	.set_termios	= pl011_set_termios,
1902	.type		= pl011_type,
1903	.release_port	= pl011_release_port,
1904	.request_port	= pl011_request_port,
1905	.config_port	= pl011_config_port,
1906	.verify_port	= pl011_verify_port,
1907#ifdef CONFIG_CONSOLE_POLL
1908	.poll_init     = pl011_hwinit,
1909	.poll_get_char = pl011_get_poll_char,
1910	.poll_put_char = pl011_put_poll_char,
1911#endif
1912};
1913
1914static struct uart_amba_port *amba_ports[UART_NR];
1915
1916#ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
1917
1918static void pl011_console_putchar(struct uart_port *port, int ch)
1919{
1920	struct uart_amba_port *uap =
1921	    container_of(port, struct uart_amba_port, port);
1922
1923	while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
1924		barrier();
1925	writew(ch, uap->port.membase + UART01x_DR);
1926}
1927
1928static void
1929pl011_console_write(struct console *co, const char *s, unsigned int count)
1930{
1931	struct uart_amba_port *uap = amba_ports[co->index];
1932	unsigned int status, old_cr, new_cr;
1933	unsigned long flags;
1934	int locked = 1;
1935
1936	clk_enable(uap->clk);
1937
1938	local_irq_save(flags);
1939	if (uap->port.sysrq)
1940		locked = 0;
1941	else if (oops_in_progress)
1942		locked = spin_trylock(&uap->port.lock);
1943	else
1944		spin_lock(&uap->port.lock);
1945
1946	/*
1947	 *	First save the CR then disable the interrupts
1948	 */
1949	old_cr = readw(uap->port.membase + UART011_CR);
1950	new_cr = old_cr & ~UART011_CR_CTSEN;
1951	new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1952	writew(new_cr, uap->port.membase + UART011_CR);
1953
1954	uart_console_write(&uap->port, s, count, pl011_console_putchar);
1955
1956	/*
1957	 *	Finally, wait for transmitter to become empty
1958	 *	and restore the TCR
1959	 */
1960	do {
1961		status = readw(uap->port.membase + UART01x_FR);
1962	} while (status & UART01x_FR_BUSY);
1963	writew(old_cr, uap->port.membase + UART011_CR);
1964
1965	if (locked)
1966		spin_unlock(&uap->port.lock);
1967	local_irq_restore(flags);
1968
1969	clk_disable(uap->clk);
1970}
1971
1972static void __init
1973pl011_console_get_options(struct uart_amba_port *uap, int *baud,
1974			     int *parity, int *bits)
1975{
1976	if (readw(uap->port.membase + UART011_CR) & UART01x_CR_UARTEN) {
1977		unsigned int lcr_h, ibrd, fbrd;
1978
1979		lcr_h = readw(uap->port.membase + uap->lcrh_tx);
1980
1981		*parity = 'n';
1982		if (lcr_h & UART01x_LCRH_PEN) {
1983			if (lcr_h & UART01x_LCRH_EPS)
1984				*parity = 'e';
1985			else
1986				*parity = 'o';
1987		}
1988
1989		if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
1990			*bits = 7;
1991		else
1992			*bits = 8;
1993
1994		ibrd = readw(uap->port.membase + UART011_IBRD);
1995		fbrd = readw(uap->port.membase + UART011_FBRD);
1996
1997		*baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
1998
1999		if (uap->vendor->oversampling) {
2000			if (readw(uap->port.membase + UART011_CR)
2001				  & ST_UART011_CR_OVSFACT)
2002				*baud *= 2;
2003		}
2004	}
2005}
2006
2007static int __init pl011_console_setup(struct console *co, char *options)
2008{
2009	struct uart_amba_port *uap;
2010	int baud = 38400;
2011	int bits = 8;
2012	int parity = 'n';
2013	int flow = 'n';
2014	int ret;
2015
2016	/*
2017	 * Check whether an invalid uart number has been specified, and
2018	 * if so, search for the first available port that does have
2019	 * console support.
2020	 */
2021	if (co->index >= UART_NR)
2022		co->index = 0;
2023	uap = amba_ports[co->index];
2024	if (!uap)
2025		return -ENODEV;
2026
2027	/* Allow pins to be muxed in and configured */
2028	pinctrl_pm_select_default_state(uap->port.dev);
2029
2030	ret = clk_prepare(uap->clk);
2031	if (ret)
2032		return ret;
2033
2034	if (dev_get_platdata(uap->port.dev)) {
2035		struct amba_pl011_data *plat;
2036
2037		plat = dev_get_platdata(uap->port.dev);
2038		if (plat->init)
2039			plat->init();
2040	}
2041
2042	uap->port.uartclk = clk_get_rate(uap->clk);
2043
2044	if (options)
2045		uart_parse_options(options, &baud, &parity, &bits, &flow);
2046	else
2047		pl011_console_get_options(uap, &baud, &parity, &bits);
2048
2049	return uart_set_options(&uap->port, co, baud, parity, bits, flow);
2050}
2051
2052static struct uart_driver amba_reg;
2053static struct console amba_console = {
2054	.name		= "ttyAMA",
2055	.write		= pl011_console_write,
2056	.device		= uart_console_device,
2057	.setup		= pl011_console_setup,
2058	.flags		= CON_PRINTBUFFER,
2059	.index		= -1,
2060	.data		= &amba_reg,
2061};
2062
2063#define AMBA_CONSOLE	(&amba_console)
2064
2065static void pl011_putc(struct uart_port *port, int c)
2066{
2067	while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2068		;
2069	writeb(c, port->membase + UART01x_DR);
2070	while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
2071		;
2072}
2073
2074static void pl011_early_write(struct console *con, const char *s, unsigned n)
2075{
2076	struct earlycon_device *dev = con->data;
2077
2078	uart_console_write(&dev->port, s, n, pl011_putc);
2079}
2080
2081static int __init pl011_early_console_setup(struct earlycon_device *device,
2082					    const char *opt)
2083{
2084	if (!device->port.membase)
2085		return -ENODEV;
2086
2087	device->con->write = pl011_early_write;
2088	return 0;
2089}
2090EARLYCON_DECLARE(pl011, pl011_early_console_setup);
2091OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
2092
2093#else
2094#define AMBA_CONSOLE	NULL
2095#endif
2096
2097static struct uart_driver amba_reg = {
2098	.owner			= THIS_MODULE,
2099	.driver_name		= "ttyAMA",
2100	.dev_name		= "ttyAMA",
2101	.major			= SERIAL_AMBA_MAJOR,
2102	.minor			= SERIAL_AMBA_MINOR,
2103	.nr			= UART_NR,
2104	.cons			= AMBA_CONSOLE,
2105};
2106
2107static int pl011_probe_dt_alias(int index, struct device *dev)
2108{
2109	struct device_node *np;
2110	static bool seen_dev_with_alias = false;
2111	static bool seen_dev_without_alias = false;
2112	int ret = index;
2113
2114	if (!IS_ENABLED(CONFIG_OF))
2115		return ret;
2116
2117	np = dev->of_node;
2118	if (!np)
2119		return ret;
2120
2121	ret = of_alias_get_id(np, "serial");
2122	if (IS_ERR_VALUE(ret)) {
2123		seen_dev_without_alias = true;
2124		ret = index;
2125	} else {
2126		seen_dev_with_alias = true;
2127		if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
2128			dev_warn(dev, "requested serial port %d  not available.\n", ret);
2129			ret = index;
2130		}
2131	}
2132
2133	if (seen_dev_with_alias && seen_dev_without_alias)
2134		dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
2135
2136	return ret;
2137}
2138
2139static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
2140{
2141	struct uart_amba_port *uap;
2142	struct vendor_data *vendor = id->data;
2143	void __iomem *base;
2144	int i, ret;
2145
2146	for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2147		if (amba_ports[i] == NULL)
2148			break;
2149
2150	if (i == ARRAY_SIZE(amba_ports))
2151		return -EBUSY;
2152
2153	uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
2154			   GFP_KERNEL);
2155	if (uap == NULL)
2156		return -ENOMEM;
2157
2158	i = pl011_probe_dt_alias(i, &dev->dev);
2159
2160	base = devm_ioremap(&dev->dev, dev->res.start,
2161			    resource_size(&dev->res));
2162	if (!base)
2163		return -ENOMEM;
2164
2165	uap->clk = devm_clk_get(&dev->dev, NULL);
2166	if (IS_ERR(uap->clk))
2167		return PTR_ERR(uap->clk);
2168
2169	uap->vendor = vendor;
2170	uap->lcrh_rx = vendor->lcrh_rx;
2171	uap->lcrh_tx = vendor->lcrh_tx;
2172	uap->old_cr = 0;
2173	uap->fifosize = vendor->get_fifosize(dev);
2174	uap->port.dev = &dev->dev;
2175	uap->port.mapbase = dev->res.start;
2176	uap->port.membase = base;
2177	uap->port.iotype = UPIO_MEM;
2178	uap->port.irq = dev->irq[0];
2179	uap->port.fifosize = uap->fifosize;
2180	uap->port.ops = &amba_pl011_pops;
2181	uap->port.flags = UPF_BOOT_AUTOCONF;
2182	uap->port.line = i;
2183	pl011_dma_probe(&dev->dev, uap);
2184
2185	/* Ensure interrupts from this UART are masked and cleared */
2186	writew(0, uap->port.membase + UART011_IMSC);
2187	writew(0xffff, uap->port.membase + UART011_ICR);
2188
2189	snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
2190
2191	amba_ports[i] = uap;
2192
2193	amba_set_drvdata(dev, uap);
2194
2195	if (!amba_reg.state) {
2196		ret = uart_register_driver(&amba_reg);
2197		if (ret < 0) {
2198			pr_err("Failed to register AMBA-PL011 driver\n");
2199			return ret;
2200		}
2201	}
2202
2203	ret = uart_add_one_port(&amba_reg, &uap->port);
2204	if (ret) {
2205		amba_ports[i] = NULL;
2206		uart_unregister_driver(&amba_reg);
2207		pl011_dma_remove(uap);
2208	}
2209
2210	return ret;
2211}
2212
2213static int pl011_remove(struct amba_device *dev)
2214{
2215	struct uart_amba_port *uap = amba_get_drvdata(dev);
2216	bool busy = false;
2217	int i;
2218
2219	uart_remove_one_port(&amba_reg, &uap->port);
2220
2221	for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2222		if (amba_ports[i] == uap)
2223			amba_ports[i] = NULL;
2224		else if (amba_ports[i])
2225			busy = true;
2226
2227	pl011_dma_remove(uap);
2228	if (!busy)
2229		uart_unregister_driver(&amba_reg);
2230	return 0;
2231}
2232
2233#ifdef CONFIG_PM_SLEEP
2234static int pl011_suspend(struct device *dev)
2235{
2236	struct uart_amba_port *uap = dev_get_drvdata(dev);
2237
2238	if (!uap)
2239		return -EINVAL;
2240
2241	return uart_suspend_port(&amba_reg, &uap->port);
2242}
2243
2244static int pl011_resume(struct device *dev)
2245{
2246	struct uart_amba_port *uap = dev_get_drvdata(dev);
2247
2248	if (!uap)
2249		return -EINVAL;
2250
2251	return uart_resume_port(&amba_reg, &uap->port);
2252}
2253#endif
2254
2255static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
2256
2257static struct amba_id pl011_ids[] = {
2258	{
2259		.id	= 0x00041011,
2260		.mask	= 0x000fffff,
2261		.data	= &vendor_arm,
2262	},
2263	{
2264		.id	= 0x00380802,
2265		.mask	= 0x00ffffff,
2266		.data	= &vendor_st,
2267	},
2268	{ 0, 0 },
2269};
2270
2271MODULE_DEVICE_TABLE(amba, pl011_ids);
2272
2273static struct amba_driver pl011_driver = {
2274	.drv = {
2275		.name	= "uart-pl011",
2276		.pm	= &pl011_dev_pm_ops,
2277	},
2278	.id_table	= pl011_ids,
2279	.probe		= pl011_probe,
2280	.remove		= pl011_remove,
2281};
2282
2283static int __init pl011_init(void)
2284{
2285	printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
2286
2287	return amba_driver_register(&pl011_driver);
2288}
2289
2290static void __exit pl011_exit(void)
2291{
2292	amba_driver_unregister(&pl011_driver);
2293}
2294
2295/*
2296 * While this can be a module, if builtin it's most likely the console
2297 * So let's leave module_exit but move module_init to an earlier place
2298 */
2299arch_initcall(pl011_init);
2300module_exit(pl011_exit);
2301
2302MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
2303MODULE_DESCRIPTION("ARM AMBA serial port driver");
2304MODULE_LICENSE("GPL");
2305