[go: nahoru, domu]

1/*
2 * SuperH on-chip serial module support.  (SCI with no FIFO / with FIFO)
3 *
4 *  Copyright (C) 2002 - 2011  Paul Mundt
5 *  Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007).
6 *
7 * based off of the old drivers/char/sh-sci.c by:
8 *
9 *   Copyright (C) 1999, 2000  Niibe Yutaka
10 *   Copyright (C) 2000  Sugioka Toshinobu
11 *   Modified to support multiple serial ports. Stuart Menefy (May 2000).
12 *   Modified to support SecureEdge. David McCullough (2002)
13 *   Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003).
14 *   Removed SH7300 support (Jul 2007).
15 *
16 * This file is subject to the terms and conditions of the GNU General Public
17 * License.  See the file "COPYING" in the main directory of this archive
18 * for more details.
19 */
20#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
21#define SUPPORT_SYSRQ
22#endif
23
24#undef DEBUG
25
26#include <linux/clk.h>
27#include <linux/console.h>
28#include <linux/ctype.h>
29#include <linux/cpufreq.h>
30#include <linux/delay.h>
31#include <linux/dmaengine.h>
32#include <linux/dma-mapping.h>
33#include <linux/err.h>
34#include <linux/errno.h>
35#include <linux/init.h>
36#include <linux/interrupt.h>
37#include <linux/ioport.h>
38#include <linux/major.h>
39#include <linux/module.h>
40#include <linux/mm.h>
41#include <linux/notifier.h>
42#include <linux/of.h>
43#include <linux/platform_device.h>
44#include <linux/pm_runtime.h>
45#include <linux/scatterlist.h>
46#include <linux/serial.h>
47#include <linux/serial_sci.h>
48#include <linux/sh_dma.h>
49#include <linux/slab.h>
50#include <linux/string.h>
51#include <linux/sysrq.h>
52#include <linux/timer.h>
53#include <linux/tty.h>
54#include <linux/tty_flip.h>
55
56#ifdef CONFIG_SUPERH
57#include <asm/sh_bios.h>
58#endif
59
60#include "sh-sci.h"
61
62/* Offsets into the sci_port->irqs array */
63enum {
64	SCIx_ERI_IRQ,
65	SCIx_RXI_IRQ,
66	SCIx_TXI_IRQ,
67	SCIx_BRI_IRQ,
68	SCIx_NR_IRQS,
69
70	SCIx_MUX_IRQ = SCIx_NR_IRQS,	/* special case */
71};
72
73#define SCIx_IRQ_IS_MUXED(port)			\
74	((port)->irqs[SCIx_ERI_IRQ] ==	\
75	 (port)->irqs[SCIx_RXI_IRQ]) ||	\
76	((port)->irqs[SCIx_ERI_IRQ] &&	\
77	 ((port)->irqs[SCIx_RXI_IRQ] < 0))
78
79struct sci_port {
80	struct uart_port	port;
81
82	/* Platform configuration */
83	struct plat_sci_port	*cfg;
84	int			overrun_bit;
85	unsigned int		error_mask;
86	unsigned int		sampling_rate;
87
88
89	/* Break timer */
90	struct timer_list	break_timer;
91	int			break_flag;
92
93	/* Interface clock */
94	struct clk		*iclk;
95	/* Function clock */
96	struct clk		*fclk;
97
98	int			irqs[SCIx_NR_IRQS];
99	char			*irqstr[SCIx_NR_IRQS];
100
101	struct dma_chan			*chan_tx;
102	struct dma_chan			*chan_rx;
103
104#ifdef CONFIG_SERIAL_SH_SCI_DMA
105	struct dma_async_tx_descriptor	*desc_tx;
106	struct dma_async_tx_descriptor	*desc_rx[2];
107	dma_cookie_t			cookie_tx;
108	dma_cookie_t			cookie_rx[2];
109	dma_cookie_t			active_rx;
110	struct scatterlist		sg_tx;
111	unsigned int			sg_len_tx;
112	struct scatterlist		sg_rx[2];
113	size_t				buf_len_rx;
114	struct sh_dmae_slave		param_tx;
115	struct sh_dmae_slave		param_rx;
116	struct work_struct		work_tx;
117	struct work_struct		work_rx;
118	struct timer_list		rx_timer;
119	unsigned int			rx_timeout;
120#endif
121
122	struct notifier_block		freq_transition;
123};
124
125/* Function prototypes */
126static void sci_start_tx(struct uart_port *port);
127static void sci_stop_tx(struct uart_port *port);
128static void sci_start_rx(struct uart_port *port);
129
130#define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
131
132static struct sci_port sci_ports[SCI_NPORTS];
133static struct uart_driver sci_uart_driver;
134
135static inline struct sci_port *
136to_sci_port(struct uart_port *uart)
137{
138	return container_of(uart, struct sci_port, port);
139}
140
141struct plat_sci_reg {
142	u8 offset, size;
143};
144
145/* Helper for invalidating specific entries of an inherited map. */
146#define sci_reg_invalid	{ .offset = 0, .size = 0 }
147
148static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
149	[SCIx_PROBE_REGTYPE] = {
150		[0 ... SCIx_NR_REGS - 1] = sci_reg_invalid,
151	},
152
153	/*
154	 * Common SCI definitions, dependent on the port's regshift
155	 * value.
156	 */
157	[SCIx_SCI_REGTYPE] = {
158		[SCSMR]		= { 0x00,  8 },
159		[SCBRR]		= { 0x01,  8 },
160		[SCSCR]		= { 0x02,  8 },
161		[SCxTDR]	= { 0x03,  8 },
162		[SCxSR]		= { 0x04,  8 },
163		[SCxRDR]	= { 0x05,  8 },
164		[SCFCR]		= sci_reg_invalid,
165		[SCFDR]		= sci_reg_invalid,
166		[SCTFDR]	= sci_reg_invalid,
167		[SCRFDR]	= sci_reg_invalid,
168		[SCSPTR]	= sci_reg_invalid,
169		[SCLSR]		= sci_reg_invalid,
170		[HSSRR]		= sci_reg_invalid,
171	},
172
173	/*
174	 * Common definitions for legacy IrDA ports, dependent on
175	 * regshift value.
176	 */
177	[SCIx_IRDA_REGTYPE] = {
178		[SCSMR]		= { 0x00,  8 },
179		[SCBRR]		= { 0x01,  8 },
180		[SCSCR]		= { 0x02,  8 },
181		[SCxTDR]	= { 0x03,  8 },
182		[SCxSR]		= { 0x04,  8 },
183		[SCxRDR]	= { 0x05,  8 },
184		[SCFCR]		= { 0x06,  8 },
185		[SCFDR]		= { 0x07, 16 },
186		[SCTFDR]	= sci_reg_invalid,
187		[SCRFDR]	= sci_reg_invalid,
188		[SCSPTR]	= sci_reg_invalid,
189		[SCLSR]		= sci_reg_invalid,
190		[HSSRR]		= sci_reg_invalid,
191	},
192
193	/*
194	 * Common SCIFA definitions.
195	 */
196	[SCIx_SCIFA_REGTYPE] = {
197		[SCSMR]		= { 0x00, 16 },
198		[SCBRR]		= { 0x04,  8 },
199		[SCSCR]		= { 0x08, 16 },
200		[SCxTDR]	= { 0x20,  8 },
201		[SCxSR]		= { 0x14, 16 },
202		[SCxRDR]	= { 0x24,  8 },
203		[SCFCR]		= { 0x18, 16 },
204		[SCFDR]		= { 0x1c, 16 },
205		[SCTFDR]	= sci_reg_invalid,
206		[SCRFDR]	= sci_reg_invalid,
207		[SCSPTR]	= sci_reg_invalid,
208		[SCLSR]		= sci_reg_invalid,
209		[HSSRR]		= sci_reg_invalid,
210	},
211
212	/*
213	 * Common SCIFB definitions.
214	 */
215	[SCIx_SCIFB_REGTYPE] = {
216		[SCSMR]		= { 0x00, 16 },
217		[SCBRR]		= { 0x04,  8 },
218		[SCSCR]		= { 0x08, 16 },
219		[SCxTDR]	= { 0x40,  8 },
220		[SCxSR]		= { 0x14, 16 },
221		[SCxRDR]	= { 0x60,  8 },
222		[SCFCR]		= { 0x18, 16 },
223		[SCFDR]		= sci_reg_invalid,
224		[SCTFDR]	= { 0x38, 16 },
225		[SCRFDR]	= { 0x3c, 16 },
226		[SCSPTR]	= sci_reg_invalid,
227		[SCLSR]		= sci_reg_invalid,
228		[HSSRR]		= sci_reg_invalid,
229	},
230
231	/*
232	 * Common SH-2(A) SCIF definitions for ports with FIFO data
233	 * count registers.
234	 */
235	[SCIx_SH2_SCIF_FIFODATA_REGTYPE] = {
236		[SCSMR]		= { 0x00, 16 },
237		[SCBRR]		= { 0x04,  8 },
238		[SCSCR]		= { 0x08, 16 },
239		[SCxTDR]	= { 0x0c,  8 },
240		[SCxSR]		= { 0x10, 16 },
241		[SCxRDR]	= { 0x14,  8 },
242		[SCFCR]		= { 0x18, 16 },
243		[SCFDR]		= { 0x1c, 16 },
244		[SCTFDR]	= sci_reg_invalid,
245		[SCRFDR]	= sci_reg_invalid,
246		[SCSPTR]	= { 0x20, 16 },
247		[SCLSR]		= { 0x24, 16 },
248		[HSSRR]		= sci_reg_invalid,
249	},
250
251	/*
252	 * Common SH-3 SCIF definitions.
253	 */
254	[SCIx_SH3_SCIF_REGTYPE] = {
255		[SCSMR]		= { 0x00,  8 },
256		[SCBRR]		= { 0x02,  8 },
257		[SCSCR]		= { 0x04,  8 },
258		[SCxTDR]	= { 0x06,  8 },
259		[SCxSR]		= { 0x08, 16 },
260		[SCxRDR]	= { 0x0a,  8 },
261		[SCFCR]		= { 0x0c,  8 },
262		[SCFDR]		= { 0x0e, 16 },
263		[SCTFDR]	= sci_reg_invalid,
264		[SCRFDR]	= sci_reg_invalid,
265		[SCSPTR]	= sci_reg_invalid,
266		[SCLSR]		= sci_reg_invalid,
267		[HSSRR]		= sci_reg_invalid,
268	},
269
270	/*
271	 * Common SH-4(A) SCIF(B) definitions.
272	 */
273	[SCIx_SH4_SCIF_REGTYPE] = {
274		[SCSMR]		= { 0x00, 16 },
275		[SCBRR]		= { 0x04,  8 },
276		[SCSCR]		= { 0x08, 16 },
277		[SCxTDR]	= { 0x0c,  8 },
278		[SCxSR]		= { 0x10, 16 },
279		[SCxRDR]	= { 0x14,  8 },
280		[SCFCR]		= { 0x18, 16 },
281		[SCFDR]		= { 0x1c, 16 },
282		[SCTFDR]	= sci_reg_invalid,
283		[SCRFDR]	= sci_reg_invalid,
284		[SCSPTR]	= { 0x20, 16 },
285		[SCLSR]		= { 0x24, 16 },
286		[HSSRR]		= sci_reg_invalid,
287	},
288
289	/*
290	 * Common HSCIF definitions.
291	 */
292	[SCIx_HSCIF_REGTYPE] = {
293		[SCSMR]		= { 0x00, 16 },
294		[SCBRR]		= { 0x04,  8 },
295		[SCSCR]		= { 0x08, 16 },
296		[SCxTDR]	= { 0x0c,  8 },
297		[SCxSR]		= { 0x10, 16 },
298		[SCxRDR]	= { 0x14,  8 },
299		[SCFCR]		= { 0x18, 16 },
300		[SCFDR]		= { 0x1c, 16 },
301		[SCTFDR]	= sci_reg_invalid,
302		[SCRFDR]	= sci_reg_invalid,
303		[SCSPTR]	= { 0x20, 16 },
304		[SCLSR]		= { 0x24, 16 },
305		[HSSRR]		= { 0x40, 16 },
306	},
307
308	/*
309	 * Common SH-4(A) SCIF(B) definitions for ports without an SCSPTR
310	 * register.
311	 */
312	[SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE] = {
313		[SCSMR]		= { 0x00, 16 },
314		[SCBRR]		= { 0x04,  8 },
315		[SCSCR]		= { 0x08, 16 },
316		[SCxTDR]	= { 0x0c,  8 },
317		[SCxSR]		= { 0x10, 16 },
318		[SCxRDR]	= { 0x14,  8 },
319		[SCFCR]		= { 0x18, 16 },
320		[SCFDR]		= { 0x1c, 16 },
321		[SCTFDR]	= sci_reg_invalid,
322		[SCRFDR]	= sci_reg_invalid,
323		[SCSPTR]	= sci_reg_invalid,
324		[SCLSR]		= { 0x24, 16 },
325		[HSSRR]		= sci_reg_invalid,
326	},
327
328	/*
329	 * Common SH-4(A) SCIF(B) definitions for ports with FIFO data
330	 * count registers.
331	 */
332	[SCIx_SH4_SCIF_FIFODATA_REGTYPE] = {
333		[SCSMR]		= { 0x00, 16 },
334		[SCBRR]		= { 0x04,  8 },
335		[SCSCR]		= { 0x08, 16 },
336		[SCxTDR]	= { 0x0c,  8 },
337		[SCxSR]		= { 0x10, 16 },
338		[SCxRDR]	= { 0x14,  8 },
339		[SCFCR]		= { 0x18, 16 },
340		[SCFDR]		= { 0x1c, 16 },
341		[SCTFDR]	= { 0x1c, 16 },	/* aliased to SCFDR */
342		[SCRFDR]	= { 0x20, 16 },
343		[SCSPTR]	= { 0x24, 16 },
344		[SCLSR]		= { 0x28, 16 },
345		[HSSRR]		= sci_reg_invalid,
346	},
347
348	/*
349	 * SH7705-style SCIF(B) ports, lacking both SCSPTR and SCLSR
350	 * registers.
351	 */
352	[SCIx_SH7705_SCIF_REGTYPE] = {
353		[SCSMR]		= { 0x00, 16 },
354		[SCBRR]		= { 0x04,  8 },
355		[SCSCR]		= { 0x08, 16 },
356		[SCxTDR]	= { 0x20,  8 },
357		[SCxSR]		= { 0x14, 16 },
358		[SCxRDR]	= { 0x24,  8 },
359		[SCFCR]		= { 0x18, 16 },
360		[SCFDR]		= { 0x1c, 16 },
361		[SCTFDR]	= sci_reg_invalid,
362		[SCRFDR]	= sci_reg_invalid,
363		[SCSPTR]	= sci_reg_invalid,
364		[SCLSR]		= sci_reg_invalid,
365		[HSSRR]		= sci_reg_invalid,
366	},
367};
368
369#define sci_getreg(up, offset)		(sci_regmap[to_sci_port(up)->cfg->regtype] + offset)
370
371/*
372 * The "offset" here is rather misleading, in that it refers to an enum
373 * value relative to the port mapping rather than the fixed offset
374 * itself, which needs to be manually retrieved from the platform's
375 * register map for the given port.
376 */
377static unsigned int sci_serial_in(struct uart_port *p, int offset)
378{
379	struct plat_sci_reg *reg = sci_getreg(p, offset);
380
381	if (reg->size == 8)
382		return ioread8(p->membase + (reg->offset << p->regshift));
383	else if (reg->size == 16)
384		return ioread16(p->membase + (reg->offset << p->regshift));
385	else
386		WARN(1, "Invalid register access\n");
387
388	return 0;
389}
390
391static void sci_serial_out(struct uart_port *p, int offset, int value)
392{
393	struct plat_sci_reg *reg = sci_getreg(p, offset);
394
395	if (reg->size == 8)
396		iowrite8(value, p->membase + (reg->offset << p->regshift));
397	else if (reg->size == 16)
398		iowrite16(value, p->membase + (reg->offset << p->regshift));
399	else
400		WARN(1, "Invalid register access\n");
401}
402
403static int sci_probe_regmap(struct plat_sci_port *cfg)
404{
405	switch (cfg->type) {
406	case PORT_SCI:
407		cfg->regtype = SCIx_SCI_REGTYPE;
408		break;
409	case PORT_IRDA:
410		cfg->regtype = SCIx_IRDA_REGTYPE;
411		break;
412	case PORT_SCIFA:
413		cfg->regtype = SCIx_SCIFA_REGTYPE;
414		break;
415	case PORT_SCIFB:
416		cfg->regtype = SCIx_SCIFB_REGTYPE;
417		break;
418	case PORT_SCIF:
419		/*
420		 * The SH-4 is a bit of a misnomer here, although that's
421		 * where this particular port layout originated. This
422		 * configuration (or some slight variation thereof)
423		 * remains the dominant model for all SCIFs.
424		 */
425		cfg->regtype = SCIx_SH4_SCIF_REGTYPE;
426		break;
427	case PORT_HSCIF:
428		cfg->regtype = SCIx_HSCIF_REGTYPE;
429		break;
430	default:
431		pr_err("Can't probe register map for given port\n");
432		return -EINVAL;
433	}
434
435	return 0;
436}
437
438static void sci_port_enable(struct sci_port *sci_port)
439{
440	if (!sci_port->port.dev)
441		return;
442
443	pm_runtime_get_sync(sci_port->port.dev);
444
445	clk_prepare_enable(sci_port->iclk);
446	sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
447	clk_prepare_enable(sci_port->fclk);
448}
449
450static void sci_port_disable(struct sci_port *sci_port)
451{
452	if (!sci_port->port.dev)
453		return;
454
455	/* Cancel the break timer to ensure that the timer handler will not try
456	 * to access the hardware with clocks and power disabled. Reset the
457	 * break flag to make the break debouncing state machine ready for the
458	 * next break.
459	 */
460	del_timer_sync(&sci_port->break_timer);
461	sci_port->break_flag = 0;
462
463	clk_disable_unprepare(sci_port->fclk);
464	clk_disable_unprepare(sci_port->iclk);
465
466	pm_runtime_put_sync(sci_port->port.dev);
467}
468
469#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
470
471#ifdef CONFIG_CONSOLE_POLL
472static int sci_poll_get_char(struct uart_port *port)
473{
474	unsigned short status;
475	int c;
476
477	do {
478		status = serial_port_in(port, SCxSR);
479		if (status & SCxSR_ERRORS(port)) {
480			serial_port_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
481			continue;
482		}
483		break;
484	} while (1);
485
486	if (!(status & SCxSR_RDxF(port)))
487		return NO_POLL_CHAR;
488
489	c = serial_port_in(port, SCxRDR);
490
491	/* Dummy read */
492	serial_port_in(port, SCxSR);
493	serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
494
495	return c;
496}
497#endif
498
499static void sci_poll_put_char(struct uart_port *port, unsigned char c)
500{
501	unsigned short status;
502
503	do {
504		status = serial_port_in(port, SCxSR);
505	} while (!(status & SCxSR_TDxE(port)));
506
507	serial_port_out(port, SCxTDR, c);
508	serial_port_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
509}
510#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
511
512static void sci_init_pins(struct uart_port *port, unsigned int cflag)
513{
514	struct sci_port *s = to_sci_port(port);
515	struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + SCSPTR;
516
517	/*
518	 * Use port-specific handler if provided.
519	 */
520	if (s->cfg->ops && s->cfg->ops->init_pins) {
521		s->cfg->ops->init_pins(port, cflag);
522		return;
523	}
524
525	/*
526	 * For the generic path SCSPTR is necessary. Bail out if that's
527	 * unavailable, too.
528	 */
529	if (!reg->size)
530		return;
531
532	if ((s->cfg->capabilities & SCIx_HAVE_RTSCTS) &&
533	    ((!(cflag & CRTSCTS)))) {
534		unsigned short status;
535
536		status = serial_port_in(port, SCSPTR);
537		status &= ~SCSPTR_CTSIO;
538		status |= SCSPTR_RTSIO;
539		serial_port_out(port, SCSPTR, status); /* Set RTS = 1 */
540	}
541}
542
543static int sci_txfill(struct uart_port *port)
544{
545	struct plat_sci_reg *reg;
546
547	reg = sci_getreg(port, SCTFDR);
548	if (reg->size)
549		return serial_port_in(port, SCTFDR) & ((port->fifosize << 1) - 1);
550
551	reg = sci_getreg(port, SCFDR);
552	if (reg->size)
553		return serial_port_in(port, SCFDR) >> 8;
554
555	return !(serial_port_in(port, SCxSR) & SCI_TDRE);
556}
557
558static int sci_txroom(struct uart_port *port)
559{
560	return port->fifosize - sci_txfill(port);
561}
562
563static int sci_rxfill(struct uart_port *port)
564{
565	struct plat_sci_reg *reg;
566
567	reg = sci_getreg(port, SCRFDR);
568	if (reg->size)
569		return serial_port_in(port, SCRFDR) & ((port->fifosize << 1) - 1);
570
571	reg = sci_getreg(port, SCFDR);
572	if (reg->size)
573		return serial_port_in(port, SCFDR) & ((port->fifosize << 1) - 1);
574
575	return (serial_port_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
576}
577
578/*
579 * SCI helper for checking the state of the muxed port/RXD pins.
580 */
581static inline int sci_rxd_in(struct uart_port *port)
582{
583	struct sci_port *s = to_sci_port(port);
584
585	if (s->cfg->port_reg <= 0)
586		return 1;
587
588	/* Cast for ARM damage */
589	return !!__raw_readb((void __iomem *)(uintptr_t)s->cfg->port_reg);
590}
591
592/* ********************************************************************** *
593 *                   the interrupt related routines                       *
594 * ********************************************************************** */
595
596static void sci_transmit_chars(struct uart_port *port)
597{
598	struct circ_buf *xmit = &port->state->xmit;
599	unsigned int stopped = uart_tx_stopped(port);
600	unsigned short status;
601	unsigned short ctrl;
602	int count;
603
604	status = serial_port_in(port, SCxSR);
605	if (!(status & SCxSR_TDxE(port))) {
606		ctrl = serial_port_in(port, SCSCR);
607		if (uart_circ_empty(xmit))
608			ctrl &= ~SCSCR_TIE;
609		else
610			ctrl |= SCSCR_TIE;
611		serial_port_out(port, SCSCR, ctrl);
612		return;
613	}
614
615	count = sci_txroom(port);
616
617	do {
618		unsigned char c;
619
620		if (port->x_char) {
621			c = port->x_char;
622			port->x_char = 0;
623		} else if (!uart_circ_empty(xmit) && !stopped) {
624			c = xmit->buf[xmit->tail];
625			xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
626		} else {
627			break;
628		}
629
630		serial_port_out(port, SCxTDR, c);
631
632		port->icount.tx++;
633	} while (--count > 0);
634
635	serial_port_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
636
637	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
638		uart_write_wakeup(port);
639	if (uart_circ_empty(xmit)) {
640		sci_stop_tx(port);
641	} else {
642		ctrl = serial_port_in(port, SCSCR);
643
644		if (port->type != PORT_SCI) {
645			serial_port_in(port, SCxSR); /* Dummy read */
646			serial_port_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
647		}
648
649		ctrl |= SCSCR_TIE;
650		serial_port_out(port, SCSCR, ctrl);
651	}
652}
653
654/* On SH3, SCIF may read end-of-break as a space->mark char */
655#define STEPFN(c)  ({int __c = (c); (((__c-1)|(__c)) == -1); })
656
657static void sci_receive_chars(struct uart_port *port)
658{
659	struct sci_port *sci_port = to_sci_port(port);
660	struct tty_port *tport = &port->state->port;
661	int i, count, copied = 0;
662	unsigned short status;
663	unsigned char flag;
664
665	status = serial_port_in(port, SCxSR);
666	if (!(status & SCxSR_RDxF(port)))
667		return;
668
669	while (1) {
670		/* Don't copy more bytes than there is room for in the buffer */
671		count = tty_buffer_request_room(tport, sci_rxfill(port));
672
673		/* If for any reason we can't copy more data, we're done! */
674		if (count == 0)
675			break;
676
677		if (port->type == PORT_SCI) {
678			char c = serial_port_in(port, SCxRDR);
679			if (uart_handle_sysrq_char(port, c) ||
680			    sci_port->break_flag)
681				count = 0;
682			else
683				tty_insert_flip_char(tport, c, TTY_NORMAL);
684		} else {
685			for (i = 0; i < count; i++) {
686				char c = serial_port_in(port, SCxRDR);
687
688				status = serial_port_in(port, SCxSR);
689#if defined(CONFIG_CPU_SH3)
690				/* Skip "chars" during break */
691				if (sci_port->break_flag) {
692					if ((c == 0) &&
693					    (status & SCxSR_FER(port))) {
694						count--; i--;
695						continue;
696					}
697
698					/* Nonzero => end-of-break */
699					dev_dbg(port->dev, "debounce<%02x>\n", c);
700					sci_port->break_flag = 0;
701
702					if (STEPFN(c)) {
703						count--; i--;
704						continue;
705					}
706				}
707#endif /* CONFIG_CPU_SH3 */
708				if (uart_handle_sysrq_char(port, c)) {
709					count--; i--;
710					continue;
711				}
712
713				/* Store data and status */
714				if (status & SCxSR_FER(port)) {
715					flag = TTY_FRAME;
716					port->icount.frame++;
717					dev_notice(port->dev, "frame error\n");
718				} else if (status & SCxSR_PER(port)) {
719					flag = TTY_PARITY;
720					port->icount.parity++;
721					dev_notice(port->dev, "parity error\n");
722				} else
723					flag = TTY_NORMAL;
724
725				tty_insert_flip_char(tport, c, flag);
726			}
727		}
728
729		serial_port_in(port, SCxSR); /* dummy read */
730		serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
731
732		copied += count;
733		port->icount.rx += count;
734	}
735
736	if (copied) {
737		/* Tell the rest of the system the news. New characters! */
738		tty_flip_buffer_push(tport);
739	} else {
740		serial_port_in(port, SCxSR); /* dummy read */
741		serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
742	}
743}
744
745#define SCI_BREAK_JIFFIES (HZ/20)
746
747/*
748 * The sci generates interrupts during the break,
749 * 1 per millisecond or so during the break period, for 9600 baud.
750 * So dont bother disabling interrupts.
751 * But dont want more than 1 break event.
752 * Use a kernel timer to periodically poll the rx line until
753 * the break is finished.
754 */
755static inline void sci_schedule_break_timer(struct sci_port *port)
756{
757	mod_timer(&port->break_timer, jiffies + SCI_BREAK_JIFFIES);
758}
759
760/* Ensure that two consecutive samples find the break over. */
761static void sci_break_timer(unsigned long data)
762{
763	struct sci_port *port = (struct sci_port *)data;
764
765	if (sci_rxd_in(&port->port) == 0) {
766		port->break_flag = 1;
767		sci_schedule_break_timer(port);
768	} else if (port->break_flag == 1) {
769		/* break is over. */
770		port->break_flag = 2;
771		sci_schedule_break_timer(port);
772	} else
773		port->break_flag = 0;
774}
775
776static int sci_handle_errors(struct uart_port *port)
777{
778	int copied = 0;
779	unsigned short status = serial_port_in(port, SCxSR);
780	struct tty_port *tport = &port->state->port;
781	struct sci_port *s = to_sci_port(port);
782
783	/* Handle overruns */
784	if (status & (1 << s->overrun_bit)) {
785		port->icount.overrun++;
786
787		/* overrun error */
788		if (tty_insert_flip_char(tport, 0, TTY_OVERRUN))
789			copied++;
790
791		dev_notice(port->dev, "overrun error\n");
792	}
793
794	if (status & SCxSR_FER(port)) {
795		if (sci_rxd_in(port) == 0) {
796			/* Notify of BREAK */
797			struct sci_port *sci_port = to_sci_port(port);
798
799			if (!sci_port->break_flag) {
800				port->icount.brk++;
801
802				sci_port->break_flag = 1;
803				sci_schedule_break_timer(sci_port);
804
805				/* Do sysrq handling. */
806				if (uart_handle_break(port))
807					return 0;
808
809				dev_dbg(port->dev, "BREAK detected\n");
810
811				if (tty_insert_flip_char(tport, 0, TTY_BREAK))
812					copied++;
813			}
814
815		} else {
816			/* frame error */
817			port->icount.frame++;
818
819			if (tty_insert_flip_char(tport, 0, TTY_FRAME))
820				copied++;
821
822			dev_notice(port->dev, "frame error\n");
823		}
824	}
825
826	if (status & SCxSR_PER(port)) {
827		/* parity error */
828		port->icount.parity++;
829
830		if (tty_insert_flip_char(tport, 0, TTY_PARITY))
831			copied++;
832
833		dev_notice(port->dev, "parity error\n");
834	}
835
836	if (copied)
837		tty_flip_buffer_push(tport);
838
839	return copied;
840}
841
842static int sci_handle_fifo_overrun(struct uart_port *port)
843{
844	struct tty_port *tport = &port->state->port;
845	struct sci_port *s = to_sci_port(port);
846	struct plat_sci_reg *reg;
847	int copied = 0;
848
849	reg = sci_getreg(port, SCLSR);
850	if (!reg->size)
851		return 0;
852
853	if ((serial_port_in(port, SCLSR) & (1 << s->overrun_bit))) {
854		serial_port_out(port, SCLSR, 0);
855
856		port->icount.overrun++;
857
858		tty_insert_flip_char(tport, 0, TTY_OVERRUN);
859		tty_flip_buffer_push(tport);
860
861		dev_notice(port->dev, "overrun error\n");
862		copied++;
863	}
864
865	return copied;
866}
867
868static int sci_handle_breaks(struct uart_port *port)
869{
870	int copied = 0;
871	unsigned short status = serial_port_in(port, SCxSR);
872	struct tty_port *tport = &port->state->port;
873	struct sci_port *s = to_sci_port(port);
874
875	if (uart_handle_break(port))
876		return 0;
877
878	if (!s->break_flag && status & SCxSR_BRK(port)) {
879#if defined(CONFIG_CPU_SH3)
880		/* Debounce break */
881		s->break_flag = 1;
882#endif
883
884		port->icount.brk++;
885
886		/* Notify of BREAK */
887		if (tty_insert_flip_char(tport, 0, TTY_BREAK))
888			copied++;
889
890		dev_dbg(port->dev, "BREAK detected\n");
891	}
892
893	if (copied)
894		tty_flip_buffer_push(tport);
895
896	copied += sci_handle_fifo_overrun(port);
897
898	return copied;
899}
900
901static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
902{
903#ifdef CONFIG_SERIAL_SH_SCI_DMA
904	struct uart_port *port = ptr;
905	struct sci_port *s = to_sci_port(port);
906
907	if (s->chan_rx) {
908		u16 scr = serial_port_in(port, SCSCR);
909		u16 ssr = serial_port_in(port, SCxSR);
910
911		/* Disable future Rx interrupts */
912		if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
913			disable_irq_nosync(irq);
914			scr |= SCSCR_RDRQE;
915		} else {
916			scr &= ~SCSCR_RIE;
917		}
918		serial_port_out(port, SCSCR, scr);
919		/* Clear current interrupt */
920		serial_port_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
921		dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
922			jiffies, s->rx_timeout);
923		mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
924
925		return IRQ_HANDLED;
926	}
927#endif
928
929	/* I think sci_receive_chars has to be called irrespective
930	 * of whether the I_IXOFF is set, otherwise, how is the interrupt
931	 * to be disabled?
932	 */
933	sci_receive_chars(ptr);
934
935	return IRQ_HANDLED;
936}
937
938static irqreturn_t sci_tx_interrupt(int irq, void *ptr)
939{
940	struct uart_port *port = ptr;
941	unsigned long flags;
942
943	spin_lock_irqsave(&port->lock, flags);
944	sci_transmit_chars(port);
945	spin_unlock_irqrestore(&port->lock, flags);
946
947	return IRQ_HANDLED;
948}
949
950static irqreturn_t sci_er_interrupt(int irq, void *ptr)
951{
952	struct uart_port *port = ptr;
953
954	/* Handle errors */
955	if (port->type == PORT_SCI) {
956		if (sci_handle_errors(port)) {
957			/* discard character in rx buffer */
958			serial_port_in(port, SCxSR);
959			serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
960		}
961	} else {
962		sci_handle_fifo_overrun(port);
963		sci_rx_interrupt(irq, ptr);
964	}
965
966	serial_port_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
967
968	/* Kick the transmission */
969	sci_tx_interrupt(irq, ptr);
970
971	return IRQ_HANDLED;
972}
973
974static irqreturn_t sci_br_interrupt(int irq, void *ptr)
975{
976	struct uart_port *port = ptr;
977
978	/* Handle BREAKs */
979	sci_handle_breaks(port);
980	serial_port_out(port, SCxSR, SCxSR_BREAK_CLEAR(port));
981
982	return IRQ_HANDLED;
983}
984
985static inline unsigned long port_rx_irq_mask(struct uart_port *port)
986{
987	/*
988	 * Not all ports (such as SCIFA) will support REIE. Rather than
989	 * special-casing the port type, we check the port initialization
990	 * IRQ enable mask to see whether the IRQ is desired at all. If
991	 * it's unset, it's logically inferred that there's no point in
992	 * testing for it.
993	 */
994	return SCSCR_RIE | (to_sci_port(port)->cfg->scscr & SCSCR_REIE);
995}
996
997static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
998{
999	unsigned short ssr_status, scr_status, err_enabled;
1000	struct uart_port *port = ptr;
1001	struct sci_port *s = to_sci_port(port);
1002	irqreturn_t ret = IRQ_NONE;
1003
1004	ssr_status = serial_port_in(port, SCxSR);
1005	scr_status = serial_port_in(port, SCSCR);
1006	err_enabled = scr_status & port_rx_irq_mask(port);
1007
1008	/* Tx Interrupt */
1009	if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCSCR_TIE) &&
1010	    !s->chan_tx)
1011		ret = sci_tx_interrupt(irq, ptr);
1012
1013	/*
1014	 * Rx Interrupt: if we're using DMA, the DMA controller clears RDF /
1015	 * DR flags
1016	 */
1017	if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) &&
1018	    (scr_status & SCSCR_RIE))
1019		ret = sci_rx_interrupt(irq, ptr);
1020
1021	/* Error Interrupt */
1022	if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled)
1023		ret = sci_er_interrupt(irq, ptr);
1024
1025	/* Break Interrupt */
1026	if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
1027		ret = sci_br_interrupt(irq, ptr);
1028
1029	return ret;
1030}
1031
1032/*
1033 * Here we define a transition notifier so that we can update all of our
1034 * ports' baud rate when the peripheral clock changes.
1035 */
1036static int sci_notifier(struct notifier_block *self,
1037			unsigned long phase, void *p)
1038{
1039	struct sci_port *sci_port;
1040	unsigned long flags;
1041
1042	sci_port = container_of(self, struct sci_port, freq_transition);
1043
1044	if (phase == CPUFREQ_POSTCHANGE) {
1045		struct uart_port *port = &sci_port->port;
1046
1047		spin_lock_irqsave(&port->lock, flags);
1048		port->uartclk = clk_get_rate(sci_port->iclk);
1049		spin_unlock_irqrestore(&port->lock, flags);
1050	}
1051
1052	return NOTIFY_OK;
1053}
1054
1055static struct sci_irq_desc {
1056	const char	*desc;
1057	irq_handler_t	handler;
1058} sci_irq_desc[] = {
1059	/*
1060	 * Split out handlers, the default case.
1061	 */
1062	[SCIx_ERI_IRQ] = {
1063		.desc = "rx err",
1064		.handler = sci_er_interrupt,
1065	},
1066
1067	[SCIx_RXI_IRQ] = {
1068		.desc = "rx full",
1069		.handler = sci_rx_interrupt,
1070	},
1071
1072	[SCIx_TXI_IRQ] = {
1073		.desc = "tx empty",
1074		.handler = sci_tx_interrupt,
1075	},
1076
1077	[SCIx_BRI_IRQ] = {
1078		.desc = "break",
1079		.handler = sci_br_interrupt,
1080	},
1081
1082	/*
1083	 * Special muxed handler.
1084	 */
1085	[SCIx_MUX_IRQ] = {
1086		.desc = "mux",
1087		.handler = sci_mpxed_interrupt,
1088	},
1089};
1090
1091static int sci_request_irq(struct sci_port *port)
1092{
1093	struct uart_port *up = &port->port;
1094	int i, j, ret = 0;
1095
1096	for (i = j = 0; i < SCIx_NR_IRQS; i++, j++) {
1097		struct sci_irq_desc *desc;
1098		int irq;
1099
1100		if (SCIx_IRQ_IS_MUXED(port)) {
1101			i = SCIx_MUX_IRQ;
1102			irq = up->irq;
1103		} else {
1104			irq = port->irqs[i];
1105
1106			/*
1107			 * Certain port types won't support all of the
1108			 * available interrupt sources.
1109			 */
1110			if (unlikely(irq < 0))
1111				continue;
1112		}
1113
1114		desc = sci_irq_desc + i;
1115		port->irqstr[j] = kasprintf(GFP_KERNEL, "%s:%s",
1116					    dev_name(up->dev), desc->desc);
1117		if (!port->irqstr[j]) {
1118			dev_err(up->dev, "Failed to allocate %s IRQ string\n",
1119				desc->desc);
1120			goto out_nomem;
1121		}
1122
1123		ret = request_irq(irq, desc->handler, up->irqflags,
1124				  port->irqstr[j], port);
1125		if (unlikely(ret)) {
1126			dev_err(up->dev, "Can't allocate %s IRQ\n", desc->desc);
1127			goto out_noirq;
1128		}
1129	}
1130
1131	return 0;
1132
1133out_noirq:
1134	while (--i >= 0)
1135		free_irq(port->irqs[i], port);
1136
1137out_nomem:
1138	while (--j >= 0)
1139		kfree(port->irqstr[j]);
1140
1141	return ret;
1142}
1143
1144static void sci_free_irq(struct sci_port *port)
1145{
1146	int i;
1147
1148	/*
1149	 * Intentionally in reverse order so we iterate over the muxed
1150	 * IRQ first.
1151	 */
1152	for (i = 0; i < SCIx_NR_IRQS; i++) {
1153		int irq = port->irqs[i];
1154
1155		/*
1156		 * Certain port types won't support all of the available
1157		 * interrupt sources.
1158		 */
1159		if (unlikely(irq < 0))
1160			continue;
1161
1162		free_irq(port->irqs[i], port);
1163		kfree(port->irqstr[i]);
1164
1165		if (SCIx_IRQ_IS_MUXED(port)) {
1166			/* If there's only one IRQ, we're done. */
1167			return;
1168		}
1169	}
1170}
1171
1172static unsigned int sci_tx_empty(struct uart_port *port)
1173{
1174	unsigned short status = serial_port_in(port, SCxSR);
1175	unsigned short in_tx_fifo = sci_txfill(port);
1176
1177	return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
1178}
1179
1180/*
1181 * Modem control is a bit of a mixed bag for SCI(F) ports. Generally
1182 * CTS/RTS is supported in hardware by at least one port and controlled
1183 * via SCSPTR (SCxPCR for SCIFA/B parts), or external pins (presently
1184 * handled via the ->init_pins() op, which is a bit of a one-way street,
1185 * lacking any ability to defer pin control -- this will later be
1186 * converted over to the GPIO framework).
1187 *
1188 * Other modes (such as loopback) are supported generically on certain
1189 * port types, but not others. For these it's sufficient to test for the
1190 * existence of the support register and simply ignore the port type.
1191 */
1192static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
1193{
1194	if (mctrl & TIOCM_LOOP) {
1195		struct plat_sci_reg *reg;
1196
1197		/*
1198		 * Standard loopback mode for SCFCR ports.
1199		 */
1200		reg = sci_getreg(port, SCFCR);
1201		if (reg->size)
1202			serial_port_out(port, SCFCR,
1203					serial_port_in(port, SCFCR) |
1204					SCFCR_LOOP);
1205	}
1206}
1207
1208static unsigned int sci_get_mctrl(struct uart_port *port)
1209{
1210	/*
1211	 * CTS/RTS is handled in hardware when supported, while nothing
1212	 * else is wired up. Keep it simple and simply assert DSR/CAR.
1213	 */
1214	return TIOCM_DSR | TIOCM_CAR;
1215}
1216
1217#ifdef CONFIG_SERIAL_SH_SCI_DMA
1218static void sci_dma_tx_complete(void *arg)
1219{
1220	struct sci_port *s = arg;
1221	struct uart_port *port = &s->port;
1222	struct circ_buf *xmit = &port->state->xmit;
1223	unsigned long flags;
1224
1225	dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1226
1227	spin_lock_irqsave(&port->lock, flags);
1228
1229	xmit->tail += sg_dma_len(&s->sg_tx);
1230	xmit->tail &= UART_XMIT_SIZE - 1;
1231
1232	port->icount.tx += sg_dma_len(&s->sg_tx);
1233
1234	async_tx_ack(s->desc_tx);
1235	s->desc_tx = NULL;
1236
1237	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1238		uart_write_wakeup(port);
1239
1240	if (!uart_circ_empty(xmit)) {
1241		s->cookie_tx = 0;
1242		schedule_work(&s->work_tx);
1243	} else {
1244		s->cookie_tx = -EINVAL;
1245		if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1246			u16 ctrl = serial_port_in(port, SCSCR);
1247			serial_port_out(port, SCSCR, ctrl & ~SCSCR_TIE);
1248		}
1249	}
1250
1251	spin_unlock_irqrestore(&port->lock, flags);
1252}
1253
1254/* Locking: called with port lock held */
1255static int sci_dma_rx_push(struct sci_port *s, size_t count)
1256{
1257	struct uart_port *port = &s->port;
1258	struct tty_port *tport = &port->state->port;
1259	int i, active, room;
1260
1261	room = tty_buffer_request_room(tport, count);
1262
1263	if (s->active_rx == s->cookie_rx[0]) {
1264		active = 0;
1265	} else if (s->active_rx == s->cookie_rx[1]) {
1266		active = 1;
1267	} else {
1268		dev_err(port->dev, "cookie %d not found!\n", s->active_rx);
1269		return 0;
1270	}
1271
1272	if (room < count)
1273		dev_warn(port->dev, "Rx overrun: dropping %zu bytes\n",
1274			 count - room);
1275	if (!room)
1276		return room;
1277
1278	for (i = 0; i < room; i++)
1279		tty_insert_flip_char(tport, ((u8 *)sg_virt(&s->sg_rx[active]))[i],
1280				     TTY_NORMAL);
1281
1282	port->icount.rx += room;
1283
1284	return room;
1285}
1286
1287static void sci_dma_rx_complete(void *arg)
1288{
1289	struct sci_port *s = arg;
1290	struct uart_port *port = &s->port;
1291	unsigned long flags;
1292	int count;
1293
1294	dev_dbg(port->dev, "%s(%d) active #%d\n",
1295		__func__, port->line, s->active_rx);
1296
1297	spin_lock_irqsave(&port->lock, flags);
1298
1299	count = sci_dma_rx_push(s, s->buf_len_rx);
1300
1301	mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
1302
1303	spin_unlock_irqrestore(&port->lock, flags);
1304
1305	if (count)
1306		tty_flip_buffer_push(&port->state->port);
1307
1308	schedule_work(&s->work_rx);
1309}
1310
1311static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
1312{
1313	struct dma_chan *chan = s->chan_rx;
1314	struct uart_port *port = &s->port;
1315
1316	s->chan_rx = NULL;
1317	s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
1318	dma_release_channel(chan);
1319	if (sg_dma_address(&s->sg_rx[0]))
1320		dma_free_coherent(port->dev, s->buf_len_rx * 2,
1321				  sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0]));
1322	if (enable_pio)
1323		sci_start_rx(port);
1324}
1325
1326static void sci_tx_dma_release(struct sci_port *s, bool enable_pio)
1327{
1328	struct dma_chan *chan = s->chan_tx;
1329	struct uart_port *port = &s->port;
1330
1331	s->chan_tx = NULL;
1332	s->cookie_tx = -EINVAL;
1333	dma_release_channel(chan);
1334	if (enable_pio)
1335		sci_start_tx(port);
1336}
1337
1338static void sci_submit_rx(struct sci_port *s)
1339{
1340	struct dma_chan *chan = s->chan_rx;
1341	int i;
1342
1343	for (i = 0; i < 2; i++) {
1344		struct scatterlist *sg = &s->sg_rx[i];
1345		struct dma_async_tx_descriptor *desc;
1346
1347		desc = dmaengine_prep_slave_sg(chan,
1348			sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
1349
1350		if (desc) {
1351			s->desc_rx[i] = desc;
1352			desc->callback = sci_dma_rx_complete;
1353			desc->callback_param = s;
1354			s->cookie_rx[i] = desc->tx_submit(desc);
1355		}
1356
1357		if (!desc || s->cookie_rx[i] < 0) {
1358			if (i) {
1359				async_tx_ack(s->desc_rx[0]);
1360				s->cookie_rx[0] = -EINVAL;
1361			}
1362			if (desc) {
1363				async_tx_ack(desc);
1364				s->cookie_rx[i] = -EINVAL;
1365			}
1366			dev_warn(s->port.dev,
1367				 "failed to re-start DMA, using PIO\n");
1368			sci_rx_dma_release(s, true);
1369			return;
1370		}
1371		dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n",
1372			__func__, s->cookie_rx[i], i);
1373	}
1374
1375	s->active_rx = s->cookie_rx[0];
1376
1377	dma_async_issue_pending(chan);
1378}
1379
1380static void work_fn_rx(struct work_struct *work)
1381{
1382	struct sci_port *s = container_of(work, struct sci_port, work_rx);
1383	struct uart_port *port = &s->port;
1384	struct dma_async_tx_descriptor *desc;
1385	int new;
1386
1387	if (s->active_rx == s->cookie_rx[0]) {
1388		new = 0;
1389	} else if (s->active_rx == s->cookie_rx[1]) {
1390		new = 1;
1391	} else {
1392		dev_err(port->dev, "cookie %d not found!\n", s->active_rx);
1393		return;
1394	}
1395	desc = s->desc_rx[new];
1396
1397	if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
1398	    DMA_COMPLETE) {
1399		/* Handle incomplete DMA receive */
1400		struct dma_chan *chan = s->chan_rx;
1401		struct shdma_desc *sh_desc = container_of(desc,
1402					struct shdma_desc, async_tx);
1403		unsigned long flags;
1404		int count;
1405
1406		dmaengine_terminate_all(chan);
1407		dev_dbg(port->dev, "Read %zu bytes with cookie %d\n",
1408			sh_desc->partial, sh_desc->cookie);
1409
1410		spin_lock_irqsave(&port->lock, flags);
1411		count = sci_dma_rx_push(s, sh_desc->partial);
1412		spin_unlock_irqrestore(&port->lock, flags);
1413
1414		if (count)
1415			tty_flip_buffer_push(&port->state->port);
1416
1417		sci_submit_rx(s);
1418
1419		return;
1420	}
1421
1422	s->cookie_rx[new] = desc->tx_submit(desc);
1423	if (s->cookie_rx[new] < 0) {
1424		dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
1425		sci_rx_dma_release(s, true);
1426		return;
1427	}
1428
1429	s->active_rx = s->cookie_rx[!new];
1430
1431	dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n",
1432		__func__, s->cookie_rx[new], new, s->active_rx);
1433}
1434
1435static void work_fn_tx(struct work_struct *work)
1436{
1437	struct sci_port *s = container_of(work, struct sci_port, work_tx);
1438	struct dma_async_tx_descriptor *desc;
1439	struct dma_chan *chan = s->chan_tx;
1440	struct uart_port *port = &s->port;
1441	struct circ_buf *xmit = &port->state->xmit;
1442	struct scatterlist *sg = &s->sg_tx;
1443
1444	/*
1445	 * DMA is idle now.
1446	 * Port xmit buffer is already mapped, and it is one page... Just adjust
1447	 * offsets and lengths. Since it is a circular buffer, we have to
1448	 * transmit till the end, and then the rest. Take the port lock to get a
1449	 * consistent xmit buffer state.
1450	 */
1451	spin_lock_irq(&port->lock);
1452	sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
1453	sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
1454		sg->offset;
1455	sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
1456		CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
1457	spin_unlock_irq(&port->lock);
1458
1459	BUG_ON(!sg_dma_len(sg));
1460
1461	desc = dmaengine_prep_slave_sg(chan,
1462			sg, s->sg_len_tx, DMA_MEM_TO_DEV,
1463			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1464	if (!desc) {
1465		/* switch to PIO */
1466		sci_tx_dma_release(s, true);
1467		return;
1468	}
1469
1470	dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE);
1471
1472	spin_lock_irq(&port->lock);
1473	s->desc_tx = desc;
1474	desc->callback = sci_dma_tx_complete;
1475	desc->callback_param = s;
1476	spin_unlock_irq(&port->lock);
1477	s->cookie_tx = desc->tx_submit(desc);
1478	if (s->cookie_tx < 0) {
1479		dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
1480		/* switch to PIO */
1481		sci_tx_dma_release(s, true);
1482		return;
1483	}
1484
1485	dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n",
1486		__func__, xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
1487
1488	dma_async_issue_pending(chan);
1489}
1490#endif
1491
1492static void sci_start_tx(struct uart_port *port)
1493{
1494	struct sci_port *s = to_sci_port(port);
1495	unsigned short ctrl;
1496
1497#ifdef CONFIG_SERIAL_SH_SCI_DMA
1498	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1499		u16 new, scr = serial_port_in(port, SCSCR);
1500		if (s->chan_tx)
1501			new = scr | SCSCR_TDRQE;
1502		else
1503			new = scr & ~SCSCR_TDRQE;
1504		if (new != scr)
1505			serial_port_out(port, SCSCR, new);
1506	}
1507
1508	if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
1509	    s->cookie_tx < 0) {
1510		s->cookie_tx = 0;
1511		schedule_work(&s->work_tx);
1512	}
1513#endif
1514
1515	if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1516		/* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
1517		ctrl = serial_port_in(port, SCSCR);
1518		serial_port_out(port, SCSCR, ctrl | SCSCR_TIE);
1519	}
1520}
1521
1522static void sci_stop_tx(struct uart_port *port)
1523{
1524	unsigned short ctrl;
1525
1526	/* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
1527	ctrl = serial_port_in(port, SCSCR);
1528
1529	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1530		ctrl &= ~SCSCR_TDRQE;
1531
1532	ctrl &= ~SCSCR_TIE;
1533
1534	serial_port_out(port, SCSCR, ctrl);
1535}
1536
1537static void sci_start_rx(struct uart_port *port)
1538{
1539	unsigned short ctrl;
1540
1541	ctrl = serial_port_in(port, SCSCR) | port_rx_irq_mask(port);
1542
1543	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1544		ctrl &= ~SCSCR_RDRQE;
1545
1546	serial_port_out(port, SCSCR, ctrl);
1547}
1548
1549static void sci_stop_rx(struct uart_port *port)
1550{
1551	unsigned short ctrl;
1552
1553	ctrl = serial_port_in(port, SCSCR);
1554
1555	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1556		ctrl &= ~SCSCR_RDRQE;
1557
1558	ctrl &= ~port_rx_irq_mask(port);
1559
1560	serial_port_out(port, SCSCR, ctrl);
1561}
1562
1563static void sci_break_ctl(struct uart_port *port, int break_state)
1564{
1565	struct sci_port *s = to_sci_port(port);
1566	struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + SCSPTR;
1567	unsigned short scscr, scsptr;
1568
1569	/* check wheter the port has SCSPTR */
1570	if (!reg->size) {
1571		/*
1572		 * Not supported by hardware. Most parts couple break and rx
1573		 * interrupts together, with break detection always enabled.
1574		 */
1575		return;
1576	}
1577
1578	scsptr = serial_port_in(port, SCSPTR);
1579	scscr = serial_port_in(port, SCSCR);
1580
1581	if (break_state == -1) {
1582		scsptr = (scsptr | SCSPTR_SPB2IO) & ~SCSPTR_SPB2DT;
1583		scscr &= ~SCSCR_TE;
1584	} else {
1585		scsptr = (scsptr | SCSPTR_SPB2DT) & ~SCSPTR_SPB2IO;
1586		scscr |= SCSCR_TE;
1587	}
1588
1589	serial_port_out(port, SCSPTR, scsptr);
1590	serial_port_out(port, SCSCR, scscr);
1591}
1592
1593#ifdef CONFIG_SERIAL_SH_SCI_DMA
1594static bool filter(struct dma_chan *chan, void *slave)
1595{
1596	struct sh_dmae_slave *param = slave;
1597
1598	dev_dbg(chan->device->dev, "%s: slave ID %d\n",
1599		__func__, param->shdma_slave.slave_id);
1600
1601	chan->private = &param->shdma_slave;
1602	return true;
1603}
1604
1605static void rx_timer_fn(unsigned long arg)
1606{
1607	struct sci_port *s = (struct sci_port *)arg;
1608	struct uart_port *port = &s->port;
1609	u16 scr = serial_port_in(port, SCSCR);
1610
1611	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1612		scr &= ~SCSCR_RDRQE;
1613		enable_irq(s->irqs[SCIx_RXI_IRQ]);
1614	}
1615	serial_port_out(port, SCSCR, scr | SCSCR_RIE);
1616	dev_dbg(port->dev, "DMA Rx timed out\n");
1617	schedule_work(&s->work_rx);
1618}
1619
1620static void sci_request_dma(struct uart_port *port)
1621{
1622	struct sci_port *s = to_sci_port(port);
1623	struct sh_dmae_slave *param;
1624	struct dma_chan *chan;
1625	dma_cap_mask_t mask;
1626	int nent;
1627
1628	dev_dbg(port->dev, "%s: port %d\n", __func__, port->line);
1629
1630	if (s->cfg->dma_slave_tx <= 0 || s->cfg->dma_slave_rx <= 0)
1631		return;
1632
1633	dma_cap_zero(mask);
1634	dma_cap_set(DMA_SLAVE, mask);
1635
1636	param = &s->param_tx;
1637
1638	/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
1639	param->shdma_slave.slave_id = s->cfg->dma_slave_tx;
1640
1641	s->cookie_tx = -EINVAL;
1642	chan = dma_request_channel(mask, filter, param);
1643	dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan);
1644	if (chan) {
1645		s->chan_tx = chan;
1646		sg_init_table(&s->sg_tx, 1);
1647		/* UART circular tx buffer is an aligned page. */
1648		BUG_ON((uintptr_t)port->state->xmit.buf & ~PAGE_MASK);
1649		sg_set_page(&s->sg_tx, virt_to_page(port->state->xmit.buf),
1650			    UART_XMIT_SIZE,
1651			    (uintptr_t)port->state->xmit.buf & ~PAGE_MASK);
1652		nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE);
1653		if (!nent)
1654			sci_tx_dma_release(s, false);
1655		else
1656			dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n",
1657				__func__,
1658				sg_dma_len(&s->sg_tx), port->state->xmit.buf,
1659				&sg_dma_address(&s->sg_tx));
1660
1661		s->sg_len_tx = nent;
1662
1663		INIT_WORK(&s->work_tx, work_fn_tx);
1664	}
1665
1666	param = &s->param_rx;
1667
1668	/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
1669	param->shdma_slave.slave_id = s->cfg->dma_slave_rx;
1670
1671	chan = dma_request_channel(mask, filter, param);
1672	dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
1673	if (chan) {
1674		dma_addr_t dma[2];
1675		void *buf[2];
1676		int i;
1677
1678		s->chan_rx = chan;
1679
1680		s->buf_len_rx = 2 * max(16, (int)port->fifosize);
1681		buf[0] = dma_alloc_coherent(port->dev, s->buf_len_rx * 2,
1682					    &dma[0], GFP_KERNEL);
1683
1684		if (!buf[0]) {
1685			dev_warn(port->dev,
1686				 "failed to allocate dma buffer, using PIO\n");
1687			sci_rx_dma_release(s, true);
1688			return;
1689		}
1690
1691		buf[1] = buf[0] + s->buf_len_rx;
1692		dma[1] = dma[0] + s->buf_len_rx;
1693
1694		for (i = 0; i < 2; i++) {
1695			struct scatterlist *sg = &s->sg_rx[i];
1696
1697			sg_init_table(sg, 1);
1698			sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx,
1699				    (uintptr_t)buf[i] & ~PAGE_MASK);
1700			sg_dma_address(sg) = dma[i];
1701		}
1702
1703		INIT_WORK(&s->work_rx, work_fn_rx);
1704		setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s);
1705
1706		sci_submit_rx(s);
1707	}
1708}
1709
1710static void sci_free_dma(struct uart_port *port)
1711{
1712	struct sci_port *s = to_sci_port(port);
1713
1714	if (s->chan_tx)
1715		sci_tx_dma_release(s, false);
1716	if (s->chan_rx)
1717		sci_rx_dma_release(s, false);
1718}
1719#else
1720static inline void sci_request_dma(struct uart_port *port)
1721{
1722}
1723
1724static inline void sci_free_dma(struct uart_port *port)
1725{
1726}
1727#endif
1728
1729static int sci_startup(struct uart_port *port)
1730{
1731	struct sci_port *s = to_sci_port(port);
1732	unsigned long flags;
1733	int ret;
1734
1735	dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1736
1737	ret = sci_request_irq(s);
1738	if (unlikely(ret < 0))
1739		return ret;
1740
1741	sci_request_dma(port);
1742
1743	spin_lock_irqsave(&port->lock, flags);
1744	sci_start_tx(port);
1745	sci_start_rx(port);
1746	spin_unlock_irqrestore(&port->lock, flags);
1747
1748	return 0;
1749}
1750
1751static void sci_shutdown(struct uart_port *port)
1752{
1753	struct sci_port *s = to_sci_port(port);
1754	unsigned long flags;
1755
1756	dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1757
1758	spin_lock_irqsave(&port->lock, flags);
1759	sci_stop_rx(port);
1760	sci_stop_tx(port);
1761	spin_unlock_irqrestore(&port->lock, flags);
1762
1763	sci_free_dma(port);
1764	sci_free_irq(s);
1765}
1766
1767static unsigned int sci_scbrr_calc(struct sci_port *s, unsigned int bps,
1768				   unsigned long freq)
1769{
1770	if (s->sampling_rate)
1771		return DIV_ROUND_CLOSEST(freq, s->sampling_rate * bps) - 1;
1772
1773	/* Warn, but use a safe default */
1774	WARN_ON(1);
1775
1776	return ((freq + 16 * bps) / (32 * bps) - 1);
1777}
1778
1779/* calculate frame length from SMR */
1780static int sci_baud_calc_frame_len(unsigned int smr_val)
1781{
1782	int len = 10;
1783
1784	if (smr_val & SCSMR_CHR)
1785		len--;
1786	if (smr_val & SCSMR_PE)
1787		len++;
1788	if (smr_val & SCSMR_STOP)
1789		len++;
1790
1791	return len;
1792}
1793
1794
1795/* calculate sample rate, BRR, and clock select for HSCIF */
1796static void sci_baud_calc_hscif(unsigned int bps, unsigned long freq,
1797				int *brr, unsigned int *srr,
1798				unsigned int *cks, int frame_len)
1799{
1800	int sr, c, br, err, recv_margin;
1801	int min_err = 1000; /* 100% */
1802	int recv_max_margin = 0;
1803
1804	/* Find the combination of sample rate and clock select with the
1805	   smallest deviation from the desired baud rate. */
1806	for (sr = 8; sr <= 32; sr++) {
1807		for (c = 0; c <= 3; c++) {
1808			/* integerized formulas from HSCIF documentation */
1809			br = DIV_ROUND_CLOSEST(freq, (sr *
1810					      (1 << (2 * c + 1)) * bps)) - 1;
1811			br = clamp(br, 0, 255);
1812			err = DIV_ROUND_CLOSEST(freq, ((br + 1) * bps * sr *
1813					       (1 << (2 * c + 1)) / 1000)) -
1814					       1000;
1815			if (err < 0)
1816				continue;
1817
1818			/* Calc recv margin
1819			 * M: Receive margin (%)
1820			 * N: Ratio of bit rate to clock (N = sampling rate)
1821			 * D: Clock duty (D = 0 to 1.0)
1822			 * L: Frame length (L = 9 to 12)
1823			 * F: Absolute value of clock frequency deviation
1824			 *
1825			 *  M = |(0.5 - 1 / 2 * N) - ((L - 0.5) * F) -
1826			 *      (|D - 0.5| / N * (1 + F))|
1827			 *  NOTE: Usually, treat D for 0.5, F is 0 by this
1828			 *        calculation.
1829			 */
1830			recv_margin = abs((500 -
1831					DIV_ROUND_CLOSEST(1000, sr << 1)) / 10);
1832			if (min_err > err) {
1833				min_err = err;
1834				recv_max_margin = recv_margin;
1835			} else if ((min_err == err) &&
1836				   (recv_margin > recv_max_margin))
1837				recv_max_margin = recv_margin;
1838			else
1839				continue;
1840
1841			*brr = br;
1842			*srr = sr - 1;
1843			*cks = c;
1844		}
1845	}
1846
1847	if (min_err == 1000) {
1848		WARN_ON(1);
1849		/* use defaults */
1850		*brr = 255;
1851		*srr = 15;
1852		*cks = 0;
1853	}
1854}
1855
1856static void sci_reset(struct uart_port *port)
1857{
1858	struct plat_sci_reg *reg;
1859	unsigned int status;
1860
1861	do {
1862		status = serial_port_in(port, SCxSR);
1863	} while (!(status & SCxSR_TEND(port)));
1864
1865	serial_port_out(port, SCSCR, 0x00);	/* TE=0, RE=0, CKE1=0 */
1866
1867	reg = sci_getreg(port, SCFCR);
1868	if (reg->size)
1869		serial_port_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
1870}
1871
1872static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1873			    struct ktermios *old)
1874{
1875	struct sci_port *s = to_sci_port(port);
1876	struct plat_sci_reg *reg;
1877	unsigned int baud, smr_val = 0, max_baud, cks = 0;
1878	int t = -1;
1879	unsigned int srr = 15;
1880
1881	if ((termios->c_cflag & CSIZE) == CS7)
1882		smr_val |= SCSMR_CHR;
1883	if (termios->c_cflag & PARENB)
1884		smr_val |= SCSMR_PE;
1885	if (termios->c_cflag & PARODD)
1886		smr_val |= SCSMR_PE | SCSMR_ODD;
1887	if (termios->c_cflag & CSTOPB)
1888		smr_val |= SCSMR_STOP;
1889
1890	/*
1891	 * earlyprintk comes here early on with port->uartclk set to zero.
1892	 * the clock framework is not up and running at this point so here
1893	 * we assume that 115200 is the maximum baud rate. please note that
1894	 * the baud rate is not programmed during earlyprintk - it is assumed
1895	 * that the previous boot loader has enabled required clocks and
1896	 * setup the baud rate generator hardware for us already.
1897	 */
1898	max_baud = port->uartclk ? port->uartclk / 16 : 115200;
1899
1900	baud = uart_get_baud_rate(port, termios, old, 0, max_baud);
1901	if (likely(baud && port->uartclk)) {
1902		if (s->cfg->type == PORT_HSCIF) {
1903			int frame_len = sci_baud_calc_frame_len(smr_val);
1904			sci_baud_calc_hscif(baud, port->uartclk, &t, &srr,
1905					    &cks, frame_len);
1906		} else {
1907			t = sci_scbrr_calc(s, baud, port->uartclk);
1908			for (cks = 0; t >= 256 && cks <= 3; cks++)
1909				t >>= 2;
1910		}
1911	}
1912
1913	sci_port_enable(s);
1914
1915	sci_reset(port);
1916
1917	smr_val |= serial_port_in(port, SCSMR) & 3;
1918
1919	uart_update_timeout(port, termios->c_cflag, baud);
1920
1921	dev_dbg(port->dev, "%s: SMR %x, cks %x, t %x, SCSCR %x\n",
1922		__func__, smr_val, cks, t, s->cfg->scscr);
1923
1924	if (t >= 0) {
1925		serial_port_out(port, SCSMR, (smr_val & ~SCSMR_CKS) | cks);
1926		serial_port_out(port, SCBRR, t);
1927		reg = sci_getreg(port, HSSRR);
1928		if (reg->size)
1929			serial_port_out(port, HSSRR, srr | HSCIF_SRE);
1930		udelay((1000000+(baud-1)) / baud); /* Wait one bit interval */
1931	} else
1932		serial_port_out(port, SCSMR, smr_val);
1933
1934	sci_init_pins(port, termios->c_cflag);
1935
1936	reg = sci_getreg(port, SCFCR);
1937	if (reg->size) {
1938		unsigned short ctrl = serial_port_in(port, SCFCR);
1939
1940		if (s->cfg->capabilities & SCIx_HAVE_RTSCTS) {
1941			if (termios->c_cflag & CRTSCTS)
1942				ctrl |= SCFCR_MCE;
1943			else
1944				ctrl &= ~SCFCR_MCE;
1945		}
1946
1947		/*
1948		 * As we've done a sci_reset() above, ensure we don't
1949		 * interfere with the FIFOs while toggling MCE. As the
1950		 * reset values could still be set, simply mask them out.
1951		 */
1952		ctrl &= ~(SCFCR_RFRST | SCFCR_TFRST);
1953
1954		serial_port_out(port, SCFCR, ctrl);
1955	}
1956
1957	serial_port_out(port, SCSCR, s->cfg->scscr);
1958
1959#ifdef CONFIG_SERIAL_SH_SCI_DMA
1960	/*
1961	 * Calculate delay for 1.5 DMA buffers: see
1962	 * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits
1963	 * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
1964	 * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
1965	 * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO
1966	 * sizes), but it has been found out experimentally, that this is not
1967	 * enough: the driver too often needlessly runs on a DMA timeout. 20ms
1968	 * as a minimum seem to work perfectly.
1969	 */
1970	if (s->chan_rx) {
1971		s->rx_timeout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
1972			port->fifosize / 2;
1973		dev_dbg(port->dev, "DMA Rx t-out %ums, tty t-out %u jiffies\n",
1974			s->rx_timeout * 1000 / HZ, port->timeout);
1975		if (s->rx_timeout < msecs_to_jiffies(20))
1976			s->rx_timeout = msecs_to_jiffies(20);
1977	}
1978#endif
1979
1980	if ((termios->c_cflag & CREAD) != 0)
1981		sci_start_rx(port);
1982
1983	sci_port_disable(s);
1984}
1985
1986static void sci_pm(struct uart_port *port, unsigned int state,
1987		   unsigned int oldstate)
1988{
1989	struct sci_port *sci_port = to_sci_port(port);
1990
1991	switch (state) {
1992	case UART_PM_STATE_OFF:
1993		sci_port_disable(sci_port);
1994		break;
1995	default:
1996		sci_port_enable(sci_port);
1997		break;
1998	}
1999}
2000
2001static const char *sci_type(struct uart_port *port)
2002{
2003	switch (port->type) {
2004	case PORT_IRDA:
2005		return "irda";
2006	case PORT_SCI:
2007		return "sci";
2008	case PORT_SCIF:
2009		return "scif";
2010	case PORT_SCIFA:
2011		return "scifa";
2012	case PORT_SCIFB:
2013		return "scifb";
2014	case PORT_HSCIF:
2015		return "hscif";
2016	}
2017
2018	return NULL;
2019}
2020
2021static inline unsigned long sci_port_size(struct uart_port *port)
2022{
2023	/*
2024	 * Pick an arbitrary size that encapsulates all of the base
2025	 * registers by default. This can be optimized later, or derived
2026	 * from platform resource data at such a time that ports begin to
2027	 * behave more erratically.
2028	 */
2029	if (port->type == PORT_HSCIF)
2030		return 96;
2031	else
2032		return 64;
2033}
2034
2035static int sci_remap_port(struct uart_port *port)
2036{
2037	unsigned long size = sci_port_size(port);
2038
2039	/*
2040	 * Nothing to do if there's already an established membase.
2041	 */
2042	if (port->membase)
2043		return 0;
2044
2045	if (port->flags & UPF_IOREMAP) {
2046		port->membase = ioremap_nocache(port->mapbase, size);
2047		if (unlikely(!port->membase)) {
2048			dev_err(port->dev, "can't remap port#%d\n", port->line);
2049			return -ENXIO;
2050		}
2051	} else {
2052		/*
2053		 * For the simple (and majority of) cases where we don't
2054		 * need to do any remapping, just cast the cookie
2055		 * directly.
2056		 */
2057		port->membase = (void __iomem *)(uintptr_t)port->mapbase;
2058	}
2059
2060	return 0;
2061}
2062
2063static void sci_release_port(struct uart_port *port)
2064{
2065	if (port->flags & UPF_IOREMAP) {
2066		iounmap(port->membase);
2067		port->membase = NULL;
2068	}
2069
2070	release_mem_region(port->mapbase, sci_port_size(port));
2071}
2072
2073static int sci_request_port(struct uart_port *port)
2074{
2075	unsigned long size = sci_port_size(port);
2076	struct resource *res;
2077	int ret;
2078
2079	res = request_mem_region(port->mapbase, size, dev_name(port->dev));
2080	if (unlikely(res == NULL))
2081		return -EBUSY;
2082
2083	ret = sci_remap_port(port);
2084	if (unlikely(ret != 0)) {
2085		release_resource(res);
2086		return ret;
2087	}
2088
2089	return 0;
2090}
2091
2092static void sci_config_port(struct uart_port *port, int flags)
2093{
2094	if (flags & UART_CONFIG_TYPE) {
2095		struct sci_port *sport = to_sci_port(port);
2096
2097		port->type = sport->cfg->type;
2098		sci_request_port(port);
2099	}
2100}
2101
2102static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
2103{
2104	if (ser->baud_base < 2400)
2105		/* No paper tape reader for Mitch.. */
2106		return -EINVAL;
2107
2108	return 0;
2109}
2110
2111static struct uart_ops sci_uart_ops = {
2112	.tx_empty	= sci_tx_empty,
2113	.set_mctrl	= sci_set_mctrl,
2114	.get_mctrl	= sci_get_mctrl,
2115	.start_tx	= sci_start_tx,
2116	.stop_tx	= sci_stop_tx,
2117	.stop_rx	= sci_stop_rx,
2118	.break_ctl	= sci_break_ctl,
2119	.startup	= sci_startup,
2120	.shutdown	= sci_shutdown,
2121	.set_termios	= sci_set_termios,
2122	.pm		= sci_pm,
2123	.type		= sci_type,
2124	.release_port	= sci_release_port,
2125	.request_port	= sci_request_port,
2126	.config_port	= sci_config_port,
2127	.verify_port	= sci_verify_port,
2128#ifdef CONFIG_CONSOLE_POLL
2129	.poll_get_char	= sci_poll_get_char,
2130	.poll_put_char	= sci_poll_put_char,
2131#endif
2132};
2133
2134static int sci_init_single(struct platform_device *dev,
2135			   struct sci_port *sci_port, unsigned int index,
2136			   struct plat_sci_port *p, bool early)
2137{
2138	struct uart_port *port = &sci_port->port;
2139	const struct resource *res;
2140	unsigned int sampling_rate;
2141	unsigned int i;
2142	int ret;
2143
2144	sci_port->cfg	= p;
2145
2146	port->ops	= &sci_uart_ops;
2147	port->iotype	= UPIO_MEM;
2148	port->line	= index;
2149
2150	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
2151	if (res == NULL)
2152		return -ENOMEM;
2153
2154	port->mapbase = res->start;
2155
2156	for (i = 0; i < ARRAY_SIZE(sci_port->irqs); ++i)
2157		sci_port->irqs[i] = platform_get_irq(dev, i);
2158
2159	/* The SCI generates several interrupts. They can be muxed together or
2160	 * connected to different interrupt lines. In the muxed case only one
2161	 * interrupt resource is specified. In the non-muxed case three or four
2162	 * interrupt resources are specified, as the BRI interrupt is optional.
2163	 */
2164	if (sci_port->irqs[0] < 0)
2165		return -ENXIO;
2166
2167	if (sci_port->irqs[1] < 0) {
2168		sci_port->irqs[1] = sci_port->irqs[0];
2169		sci_port->irqs[2] = sci_port->irqs[0];
2170		sci_port->irqs[3] = sci_port->irqs[0];
2171	}
2172
2173	if (p->regtype == SCIx_PROBE_REGTYPE) {
2174		ret = sci_probe_regmap(p);
2175		if (unlikely(ret))
2176			return ret;
2177	}
2178
2179	switch (p->type) {
2180	case PORT_SCIFB:
2181		port->fifosize = 256;
2182		sci_port->overrun_bit = 9;
2183		sampling_rate = 16;
2184		break;
2185	case PORT_HSCIF:
2186		port->fifosize = 128;
2187		sampling_rate = 0;
2188		sci_port->overrun_bit = 0;
2189		break;
2190	case PORT_SCIFA:
2191		port->fifosize = 64;
2192		sci_port->overrun_bit = 9;
2193		sampling_rate = 16;
2194		break;
2195	case PORT_SCIF:
2196		port->fifosize = 16;
2197		if (p->regtype == SCIx_SH7705_SCIF_REGTYPE) {
2198			sci_port->overrun_bit = 9;
2199			sampling_rate = 16;
2200		} else {
2201			sci_port->overrun_bit = 0;
2202			sampling_rate = 32;
2203		}
2204		break;
2205	default:
2206		port->fifosize = 1;
2207		sci_port->overrun_bit = 5;
2208		sampling_rate = 32;
2209		break;
2210	}
2211
2212	/* SCIFA on sh7723 and sh7724 need a custom sampling rate that doesn't
2213	 * match the SoC datasheet, this should be investigated. Let platform
2214	 * data override the sampling rate for now.
2215	 */
2216	sci_port->sampling_rate = p->sampling_rate ? p->sampling_rate
2217				: sampling_rate;
2218
2219	if (!early) {
2220		sci_port->iclk = clk_get(&dev->dev, "sci_ick");
2221		if (IS_ERR(sci_port->iclk)) {
2222			sci_port->iclk = clk_get(&dev->dev, "peripheral_clk");
2223			if (IS_ERR(sci_port->iclk)) {
2224				dev_err(&dev->dev, "can't get iclk\n");
2225				return PTR_ERR(sci_port->iclk);
2226			}
2227		}
2228
2229		/*
2230		 * The function clock is optional, ignore it if we can't
2231		 * find it.
2232		 */
2233		sci_port->fclk = clk_get(&dev->dev, "sci_fck");
2234		if (IS_ERR(sci_port->fclk))
2235			sci_port->fclk = NULL;
2236
2237		port->dev = &dev->dev;
2238
2239		pm_runtime_enable(&dev->dev);
2240	}
2241
2242	sci_port->break_timer.data = (unsigned long)sci_port;
2243	sci_port->break_timer.function = sci_break_timer;
2244	init_timer(&sci_port->break_timer);
2245
2246	/*
2247	 * Establish some sensible defaults for the error detection.
2248	 */
2249	sci_port->error_mask = (p->type == PORT_SCI) ?
2250			SCI_DEFAULT_ERROR_MASK : SCIF_DEFAULT_ERROR_MASK;
2251
2252	/*
2253	 * Establish sensible defaults for the overrun detection, unless
2254	 * the part has explicitly disabled support for it.
2255	 */
2256
2257	/*
2258	 * Make the error mask inclusive of overrun detection, if
2259	 * supported.
2260	 */
2261	sci_port->error_mask |= 1 << sci_port->overrun_bit;
2262
2263	port->type		= p->type;
2264	port->flags		= UPF_FIXED_PORT | p->flags;
2265	port->regshift		= p->regshift;
2266
2267	/*
2268	 * The UART port needs an IRQ value, so we peg this to the RX IRQ
2269	 * for the multi-IRQ ports, which is where we are primarily
2270	 * concerned with the shutdown path synchronization.
2271	 *
2272	 * For the muxed case there's nothing more to do.
2273	 */
2274	port->irq		= sci_port->irqs[SCIx_RXI_IRQ];
2275	port->irqflags		= 0;
2276
2277	port->serial_in		= sci_serial_in;
2278	port->serial_out	= sci_serial_out;
2279
2280	if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0)
2281		dev_dbg(port->dev, "DMA tx %d, rx %d\n",
2282			p->dma_slave_tx, p->dma_slave_rx);
2283
2284	return 0;
2285}
2286
2287static void sci_cleanup_single(struct sci_port *port)
2288{
2289	clk_put(port->iclk);
2290	clk_put(port->fclk);
2291
2292	pm_runtime_disable(port->port.dev);
2293}
2294
2295#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
2296static void serial_console_putchar(struct uart_port *port, int ch)
2297{
2298	sci_poll_put_char(port, ch);
2299}
2300
2301/*
2302 *	Print a string to the serial port trying not to disturb
2303 *	any possible real use of the port...
2304 */
2305static void serial_console_write(struct console *co, const char *s,
2306				 unsigned count)
2307{
2308	struct sci_port *sci_port = &sci_ports[co->index];
2309	struct uart_port *port = &sci_port->port;
2310	unsigned short bits, ctrl;
2311	unsigned long flags;
2312	int locked = 1;
2313
2314	local_irq_save(flags);
2315	if (port->sysrq)
2316		locked = 0;
2317	else if (oops_in_progress)
2318		locked = spin_trylock(&port->lock);
2319	else
2320		spin_lock(&port->lock);
2321
2322	/* first save the SCSCR then disable the interrupts */
2323	ctrl = serial_port_in(port, SCSCR);
2324	serial_port_out(port, SCSCR, sci_port->cfg->scscr);
2325
2326	uart_console_write(port, s, count, serial_console_putchar);
2327
2328	/* wait until fifo is empty and last bit has been transmitted */
2329	bits = SCxSR_TDxE(port) | SCxSR_TEND(port);
2330	while ((serial_port_in(port, SCxSR) & bits) != bits)
2331		cpu_relax();
2332
2333	/* restore the SCSCR */
2334	serial_port_out(port, SCSCR, ctrl);
2335
2336	if (locked)
2337		spin_unlock(&port->lock);
2338	local_irq_restore(flags);
2339}
2340
2341static int serial_console_setup(struct console *co, char *options)
2342{
2343	struct sci_port *sci_port;
2344	struct uart_port *port;
2345	int baud = 115200;
2346	int bits = 8;
2347	int parity = 'n';
2348	int flow = 'n';
2349	int ret;
2350
2351	/*
2352	 * Refuse to handle any bogus ports.
2353	 */
2354	if (co->index < 0 || co->index >= SCI_NPORTS)
2355		return -ENODEV;
2356
2357	sci_port = &sci_ports[co->index];
2358	port = &sci_port->port;
2359
2360	/*
2361	 * Refuse to handle uninitialized ports.
2362	 */
2363	if (!port->ops)
2364		return -ENODEV;
2365
2366	ret = sci_remap_port(port);
2367	if (unlikely(ret != 0))
2368		return ret;
2369
2370	if (options)
2371		uart_parse_options(options, &baud, &parity, &bits, &flow);
2372
2373	return uart_set_options(port, co, baud, parity, bits, flow);
2374}
2375
2376static struct console serial_console = {
2377	.name		= "ttySC",
2378	.device		= uart_console_device,
2379	.write		= serial_console_write,
2380	.setup		= serial_console_setup,
2381	.flags		= CON_PRINTBUFFER,
2382	.index		= -1,
2383	.data		= &sci_uart_driver,
2384};
2385
2386static struct console early_serial_console = {
2387	.name           = "early_ttySC",
2388	.write          = serial_console_write,
2389	.flags          = CON_PRINTBUFFER,
2390	.index		= -1,
2391};
2392
2393static char early_serial_buf[32];
2394
2395static int sci_probe_earlyprintk(struct platform_device *pdev)
2396{
2397	struct plat_sci_port *cfg = dev_get_platdata(&pdev->dev);
2398
2399	if (early_serial_console.data)
2400		return -EEXIST;
2401
2402	early_serial_console.index = pdev->id;
2403
2404	sci_init_single(pdev, &sci_ports[pdev->id], pdev->id, cfg, true);
2405
2406	serial_console_setup(&early_serial_console, early_serial_buf);
2407
2408	if (!strstr(early_serial_buf, "keep"))
2409		early_serial_console.flags |= CON_BOOT;
2410
2411	register_console(&early_serial_console);
2412	return 0;
2413}
2414
2415#define SCI_CONSOLE	(&serial_console)
2416
2417#else
2418static inline int sci_probe_earlyprintk(struct platform_device *pdev)
2419{
2420	return -EINVAL;
2421}
2422
2423#define SCI_CONSOLE	NULL
2424
2425#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
2426
2427static const char banner[] __initconst = "SuperH (H)SCI(F) driver initialized";
2428
2429static struct uart_driver sci_uart_driver = {
2430	.owner		= THIS_MODULE,
2431	.driver_name	= "sci",
2432	.dev_name	= "ttySC",
2433	.major		= SCI_MAJOR,
2434	.minor		= SCI_MINOR_START,
2435	.nr		= SCI_NPORTS,
2436	.cons		= SCI_CONSOLE,
2437};
2438
2439static int sci_remove(struct platform_device *dev)
2440{
2441	struct sci_port *port = platform_get_drvdata(dev);
2442
2443	cpufreq_unregister_notifier(&port->freq_transition,
2444				    CPUFREQ_TRANSITION_NOTIFIER);
2445
2446	uart_remove_one_port(&sci_uart_driver, &port->port);
2447
2448	sci_cleanup_single(port);
2449
2450	return 0;
2451}
2452
2453struct sci_port_info {
2454	unsigned int type;
2455	unsigned int regtype;
2456};
2457
2458static const struct of_device_id of_sci_match[] = {
2459	{
2460		.compatible = "renesas,scif",
2461		.data = &(const struct sci_port_info) {
2462			.type = PORT_SCIF,
2463			.regtype = SCIx_SH4_SCIF_REGTYPE,
2464		},
2465	}, {
2466		.compatible = "renesas,scifa",
2467		.data = &(const struct sci_port_info) {
2468			.type = PORT_SCIFA,
2469			.regtype = SCIx_SCIFA_REGTYPE,
2470		},
2471	}, {
2472		.compatible = "renesas,scifb",
2473		.data = &(const struct sci_port_info) {
2474			.type = PORT_SCIFB,
2475			.regtype = SCIx_SCIFB_REGTYPE,
2476		},
2477	}, {
2478		.compatible = "renesas,hscif",
2479		.data = &(const struct sci_port_info) {
2480			.type = PORT_HSCIF,
2481			.regtype = SCIx_HSCIF_REGTYPE,
2482		},
2483	}, {
2484		/* Terminator */
2485	},
2486};
2487MODULE_DEVICE_TABLE(of, of_sci_match);
2488
2489static struct plat_sci_port *
2490sci_parse_dt(struct platform_device *pdev, unsigned int *dev_id)
2491{
2492	struct device_node *np = pdev->dev.of_node;
2493	const struct of_device_id *match;
2494	const struct sci_port_info *info;
2495	struct plat_sci_port *p;
2496	int id;
2497
2498	if (!IS_ENABLED(CONFIG_OF) || !np)
2499		return NULL;
2500
2501	match = of_match_node(of_sci_match, pdev->dev.of_node);
2502	if (!match)
2503		return NULL;
2504
2505	info = match->data;
2506
2507	p = devm_kzalloc(&pdev->dev, sizeof(struct plat_sci_port), GFP_KERNEL);
2508	if (!p) {
2509		dev_err(&pdev->dev, "failed to allocate DT config data\n");
2510		return NULL;
2511	}
2512
2513	/* Get the line number for the aliases node. */
2514	id = of_alias_get_id(np, "serial");
2515	if (id < 0) {
2516		dev_err(&pdev->dev, "failed to get alias id (%d)\n", id);
2517		return NULL;
2518	}
2519
2520	*dev_id = id;
2521
2522	p->flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
2523	p->type = info->type;
2524	p->regtype = info->regtype;
2525	p->scscr = SCSCR_RE | SCSCR_TE;
2526
2527	return p;
2528}
2529
2530static int sci_probe_single(struct platform_device *dev,
2531				      unsigned int index,
2532				      struct plat_sci_port *p,
2533				      struct sci_port *sciport)
2534{
2535	int ret;
2536
2537	/* Sanity check */
2538	if (unlikely(index >= SCI_NPORTS)) {
2539		dev_notice(&dev->dev, "Attempting to register port %d when only %d are available\n",
2540			   index+1, SCI_NPORTS);
2541		dev_notice(&dev->dev, "Consider bumping CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
2542		return -EINVAL;
2543	}
2544
2545	ret = sci_init_single(dev, sciport, index, p, false);
2546	if (ret)
2547		return ret;
2548
2549	ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
2550	if (ret) {
2551		sci_cleanup_single(sciport);
2552		return ret;
2553	}
2554
2555	return 0;
2556}
2557
2558static int sci_probe(struct platform_device *dev)
2559{
2560	struct plat_sci_port *p;
2561	struct sci_port *sp;
2562	unsigned int dev_id;
2563	int ret;
2564
2565	/*
2566	 * If we've come here via earlyprintk initialization, head off to
2567	 * the special early probe. We don't have sufficient device state
2568	 * to make it beyond this yet.
2569	 */
2570	if (is_early_platform_device(dev))
2571		return sci_probe_earlyprintk(dev);
2572
2573	if (dev->dev.of_node) {
2574		p = sci_parse_dt(dev, &dev_id);
2575		if (p == NULL)
2576			return -EINVAL;
2577	} else {
2578		p = dev->dev.platform_data;
2579		if (p == NULL) {
2580			dev_err(&dev->dev, "no platform data supplied\n");
2581			return -EINVAL;
2582		}
2583
2584		dev_id = dev->id;
2585	}
2586
2587	sp = &sci_ports[dev_id];
2588	platform_set_drvdata(dev, sp);
2589
2590	ret = sci_probe_single(dev, dev_id, p, sp);
2591	if (ret)
2592		return ret;
2593
2594	sp->freq_transition.notifier_call = sci_notifier;
2595
2596	ret = cpufreq_register_notifier(&sp->freq_transition,
2597					CPUFREQ_TRANSITION_NOTIFIER);
2598	if (unlikely(ret < 0)) {
2599		uart_remove_one_port(&sci_uart_driver, &sp->port);
2600		sci_cleanup_single(sp);
2601		return ret;
2602	}
2603
2604#ifdef CONFIG_SH_STANDARD_BIOS
2605	sh_bios_gdb_detach();
2606#endif
2607
2608	return 0;
2609}
2610
2611static int sci_suspend(struct device *dev)
2612{
2613	struct sci_port *sport = dev_get_drvdata(dev);
2614
2615	if (sport)
2616		uart_suspend_port(&sci_uart_driver, &sport->port);
2617
2618	return 0;
2619}
2620
2621static int sci_resume(struct device *dev)
2622{
2623	struct sci_port *sport = dev_get_drvdata(dev);
2624
2625	if (sport)
2626		uart_resume_port(&sci_uart_driver, &sport->port);
2627
2628	return 0;
2629}
2630
2631static const struct dev_pm_ops sci_dev_pm_ops = {
2632	.suspend	= sci_suspend,
2633	.resume		= sci_resume,
2634};
2635
2636static struct platform_driver sci_driver = {
2637	.probe		= sci_probe,
2638	.remove		= sci_remove,
2639	.driver		= {
2640		.name	= "sh-sci",
2641		.owner	= THIS_MODULE,
2642		.pm	= &sci_dev_pm_ops,
2643		.of_match_table = of_match_ptr(of_sci_match),
2644	},
2645};
2646
2647static int __init sci_init(void)
2648{
2649	int ret;
2650
2651	pr_info("%s\n", banner);
2652
2653	ret = uart_register_driver(&sci_uart_driver);
2654	if (likely(ret == 0)) {
2655		ret = platform_driver_register(&sci_driver);
2656		if (unlikely(ret))
2657			uart_unregister_driver(&sci_uart_driver);
2658	}
2659
2660	return ret;
2661}
2662
2663static void __exit sci_exit(void)
2664{
2665	platform_driver_unregister(&sci_driver);
2666	uart_unregister_driver(&sci_uart_driver);
2667}
2668
2669#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
2670early_platform_init_buffer("earlyprintk", &sci_driver,
2671			   early_serial_buf, ARRAY_SIZE(early_serial_buf));
2672#endif
2673module_init(sci_init);
2674module_exit(sci_exit);
2675
2676MODULE_LICENSE("GPL");
2677MODULE_ALIAS("platform:sh-sci");
2678MODULE_AUTHOR("Paul Mundt");
2679MODULE_DESCRIPTION("SuperH (H)SCI(F) serial driver");
2680