xref: /linux/drivers/tty/serial/sh-sci.c (revision 3e4cd0737d2e9c3dd52153a23aef1753e3a99fc4)
1 /*
2  * drivers/serial/sh-sci.c
3  *
4  * SuperH on-chip serial module support.  (SCI with no FIFO / with FIFO)
5  *
6  *  Copyright (C) 2002 - 2011  Paul Mundt
7  *  Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007).
8  *
9  * based off of the old drivers/char/sh-sci.c by:
10  *
11  *   Copyright (C) 1999, 2000  Niibe Yutaka
12  *   Copyright (C) 2000  Sugioka Toshinobu
13  *   Modified to support multiple serial ports. Stuart Menefy (May 2000).
14  *   Modified to support SecureEdge. David McCullough (2002)
15  *   Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003).
16  *   Removed SH7300 support (Jul 2007).
17  *
18  * This file is subject to the terms and conditions of the GNU General Public
19  * License.  See the file "COPYING" in the main directory of this archive
20  * for more details.
21  */
22 #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
23 #define SUPPORT_SYSRQ
24 #endif
25 
26 #undef DEBUG
27 
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/timer.h>
31 #include <linux/interrupt.h>
32 #include <linux/tty.h>
33 #include <linux/tty_flip.h>
34 #include <linux/serial.h>
35 #include <linux/major.h>
36 #include <linux/string.h>
37 #include <linux/sysrq.h>
38 #include <linux/ioport.h>
39 #include <linux/mm.h>
40 #include <linux/init.h>
41 #include <linux/delay.h>
42 #include <linux/console.h>
43 #include <linux/platform_device.h>
44 #include <linux/serial_sci.h>
45 #include <linux/notifier.h>
46 #include <linux/pm_runtime.h>
47 #include <linux/cpufreq.h>
48 #include <linux/clk.h>
49 #include <linux/ctype.h>
50 #include <linux/err.h>
51 #include <linux/dmaengine.h>
52 #include <linux/scatterlist.h>
53 #include <linux/slab.h>
54 
55 #ifdef CONFIG_SUPERH
56 #include <asm/sh_bios.h>
57 #endif
58 
59 #ifdef CONFIG_H8300
60 #include <asm/gpio.h>
61 #endif
62 
63 #include "sh-sci.h"
64 
65 struct sci_port {
66 	struct uart_port	port;
67 
68 	/* Platform configuration */
69 	struct plat_sci_port	*cfg;
70 
71 	/* Port enable callback */
72 	void			(*enable)(struct uart_port *port);
73 
74 	/* Port disable callback */
75 	void			(*disable)(struct uart_port *port);
76 
77 	/* Break timer */
78 	struct timer_list	break_timer;
79 	int			break_flag;
80 
81 	/* Interface clock */
82 	struct clk		*iclk;
83 	/* Function clock */
84 	struct clk		*fclk;
85 
86 	struct dma_chan			*chan_tx;
87 	struct dma_chan			*chan_rx;
88 
89 #ifdef CONFIG_SERIAL_SH_SCI_DMA
90 	struct dma_async_tx_descriptor	*desc_tx;
91 	struct dma_async_tx_descriptor	*desc_rx[2];
92 	dma_cookie_t			cookie_tx;
93 	dma_cookie_t			cookie_rx[2];
94 	dma_cookie_t			active_rx;
95 	struct scatterlist		sg_tx;
96 	unsigned int			sg_len_tx;
97 	struct scatterlist		sg_rx[2];
98 	size_t				buf_len_rx;
99 	struct sh_dmae_slave		param_tx;
100 	struct sh_dmae_slave		param_rx;
101 	struct work_struct		work_tx;
102 	struct work_struct		work_rx;
103 	struct timer_list		rx_timer;
104 	unsigned int			rx_timeout;
105 #endif
106 
107 	struct notifier_block		freq_transition;
108 };
109 
110 /* Function prototypes */
111 static void sci_start_tx(struct uart_port *port);
112 static void sci_stop_tx(struct uart_port *port);
113 static void sci_start_rx(struct uart_port *port);
114 
115 #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
116 
117 static struct sci_port sci_ports[SCI_NPORTS];
118 static struct uart_driver sci_uart_driver;
119 
120 static inline struct sci_port *
121 to_sci_port(struct uart_port *uart)
122 {
123 	return container_of(uart, struct sci_port, port);
124 }
125 
126 #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
127 
128 #ifdef CONFIG_CONSOLE_POLL
129 static int sci_poll_get_char(struct uart_port *port)
130 {
131 	unsigned short status;
132 	int c;
133 
134 	do {
135 		status = sci_in(port, SCxSR);
136 		if (status & SCxSR_ERRORS(port)) {
137 			sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
138 			continue;
139 		}
140 		break;
141 	} while (1);
142 
143 	if (!(status & SCxSR_RDxF(port)))
144 		return NO_POLL_CHAR;
145 
146 	c = sci_in(port, SCxRDR);
147 
148 	/* Dummy read */
149 	sci_in(port, SCxSR);
150 	sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
151 
152 	return c;
153 }
154 #endif
155 
156 static void sci_poll_put_char(struct uart_port *port, unsigned char c)
157 {
158 	unsigned short status;
159 
160 	do {
161 		status = sci_in(port, SCxSR);
162 	} while (!(status & SCxSR_TDxE(port)));
163 
164 	sci_out(port, SCxTDR, c);
165 	sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
166 }
167 #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
168 
169 #if defined(__H8300H__) || defined(__H8300S__)
170 static void sci_init_pins(struct uart_port *port, unsigned int cflag)
171 {
172 	int ch = (port->mapbase - SMR0) >> 3;
173 
174 	/* set DDR regs */
175 	H8300_GPIO_DDR(h8300_sci_pins[ch].port,
176 		       h8300_sci_pins[ch].rx,
177 		       H8300_GPIO_INPUT);
178 	H8300_GPIO_DDR(h8300_sci_pins[ch].port,
179 		       h8300_sci_pins[ch].tx,
180 		       H8300_GPIO_OUTPUT);
181 
182 	/* tx mark output*/
183 	H8300_SCI_DR(ch) |= h8300_sci_pins[ch].tx;
184 }
185 #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
186 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
187 {
188 	if (port->mapbase == 0xA4400000) {
189 		__raw_writew(__raw_readw(PACR) & 0xffc0, PACR);
190 		__raw_writew(__raw_readw(PBCR) & 0x0fff, PBCR);
191 	} else if (port->mapbase == 0xA4410000)
192 		__raw_writew(__raw_readw(PBCR) & 0xf003, PBCR);
193 }
194 #elif defined(CONFIG_CPU_SUBTYPE_SH7720) || defined(CONFIG_CPU_SUBTYPE_SH7721)
195 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
196 {
197 	unsigned short data;
198 
199 	if (cflag & CRTSCTS) {
200 		/* enable RTS/CTS */
201 		if (port->mapbase == 0xa4430000) { /* SCIF0 */
202 			/* Clear PTCR bit 9-2; enable all scif pins but sck */
203 			data = __raw_readw(PORT_PTCR);
204 			__raw_writew((data & 0xfc03), PORT_PTCR);
205 		} else if (port->mapbase == 0xa4438000) { /* SCIF1 */
206 			/* Clear PVCR bit 9-2 */
207 			data = __raw_readw(PORT_PVCR);
208 			__raw_writew((data & 0xfc03), PORT_PVCR);
209 		}
210 	} else {
211 		if (port->mapbase == 0xa4430000) { /* SCIF0 */
212 			/* Clear PTCR bit 5-2; enable only tx and rx  */
213 			data = __raw_readw(PORT_PTCR);
214 			__raw_writew((data & 0xffc3), PORT_PTCR);
215 		} else if (port->mapbase == 0xa4438000) { /* SCIF1 */
216 			/* Clear PVCR bit 5-2 */
217 			data = __raw_readw(PORT_PVCR);
218 			__raw_writew((data & 0xffc3), PORT_PVCR);
219 		}
220 	}
221 }
222 #elif defined(CONFIG_CPU_SH3)
223 /* For SH7705, SH7706, SH7707, SH7709, SH7709A, SH7729 */
224 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
225 {
226 	unsigned short data;
227 
228 	/* We need to set SCPCR to enable RTS/CTS */
229 	data = __raw_readw(SCPCR);
230 	/* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/
231 	__raw_writew(data & 0x0fcf, SCPCR);
232 
233 	if (!(cflag & CRTSCTS)) {
234 		/* We need to set SCPCR to enable RTS/CTS */
235 		data = __raw_readw(SCPCR);
236 		/* Clear out SCP7MD1,0, SCP4MD1,0,
237 		   Set SCP6MD1,0 = {01} (output)  */
238 		__raw_writew((data & 0x0fcf) | 0x1000, SCPCR);
239 
240 		data = __raw_readb(SCPDR);
241 		/* Set /RTS2 (bit6) = 0 */
242 		__raw_writeb(data & 0xbf, SCPDR);
243 	}
244 }
245 #elif defined(CONFIG_CPU_SUBTYPE_SH7722)
246 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
247 {
248 	unsigned short data;
249 
250 	if (port->mapbase == 0xffe00000) {
251 		data = __raw_readw(PSCR);
252 		data &= ~0x03cf;
253 		if (!(cflag & CRTSCTS))
254 			data |= 0x0340;
255 
256 		__raw_writew(data, PSCR);
257 	}
258 }
259 #elif defined(CONFIG_CPU_SUBTYPE_SH7757) || \
260       defined(CONFIG_CPU_SUBTYPE_SH7763) || \
261       defined(CONFIG_CPU_SUBTYPE_SH7780) || \
262       defined(CONFIG_CPU_SUBTYPE_SH7785) || \
263       defined(CONFIG_CPU_SUBTYPE_SH7786) || \
264       defined(CONFIG_CPU_SUBTYPE_SHX3)
265 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
266 {
267 	if (!(cflag & CRTSCTS))
268 		__raw_writew(0x0080, SCSPTR0); /* Set RTS = 1 */
269 }
270 #elif defined(CONFIG_CPU_SH4) && !defined(CONFIG_CPU_SH4A)
271 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
272 {
273 	if (!(cflag & CRTSCTS))
274 		__raw_writew(0x0080, SCSPTR2); /* Set RTS = 1 */
275 }
276 #else
277 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
278 {
279 	/* Nothing to do */
280 }
281 #endif
282 
283 #if defined(CONFIG_CPU_SUBTYPE_SH7760) || \
284     defined(CONFIG_CPU_SUBTYPE_SH7780) || \
285     defined(CONFIG_CPU_SUBTYPE_SH7785) || \
286     defined(CONFIG_CPU_SUBTYPE_SH7786)
287 static int scif_txfill(struct uart_port *port)
288 {
289 	return sci_in(port, SCTFDR) & 0xff;
290 }
291 
292 static int scif_txroom(struct uart_port *port)
293 {
294 	return SCIF_TXROOM_MAX - scif_txfill(port);
295 }
296 
297 static int scif_rxfill(struct uart_port *port)
298 {
299 	return sci_in(port, SCRFDR) & 0xff;
300 }
301 #elif defined(CONFIG_CPU_SUBTYPE_SH7763)
302 static int scif_txfill(struct uart_port *port)
303 {
304 	if (port->mapbase == 0xffe00000 ||
305 	    port->mapbase == 0xffe08000)
306 		/* SCIF0/1*/
307 		return sci_in(port, SCTFDR) & 0xff;
308 	else
309 		/* SCIF2 */
310 		return sci_in(port, SCFDR) >> 8;
311 }
312 
313 static int scif_txroom(struct uart_port *port)
314 {
315 	if (port->mapbase == 0xffe00000 ||
316 	    port->mapbase == 0xffe08000)
317 		/* SCIF0/1*/
318 		return SCIF_TXROOM_MAX - scif_txfill(port);
319 	else
320 		/* SCIF2 */
321 		return SCIF2_TXROOM_MAX - scif_txfill(port);
322 }
323 
324 static int scif_rxfill(struct uart_port *port)
325 {
326 	if ((port->mapbase == 0xffe00000) ||
327 	    (port->mapbase == 0xffe08000)) {
328 		/* SCIF0/1*/
329 		return sci_in(port, SCRFDR) & 0xff;
330 	} else {
331 		/* SCIF2 */
332 		return sci_in(port, SCFDR) & SCIF2_RFDC_MASK;
333 	}
334 }
335 #elif defined(CONFIG_ARCH_SH7372)
336 static int scif_txfill(struct uart_port *port)
337 {
338 	if (port->type == PORT_SCIFA)
339 		return sci_in(port, SCFDR) >> 8;
340 	else
341 		return sci_in(port, SCTFDR);
342 }
343 
344 static int scif_txroom(struct uart_port *port)
345 {
346 	return port->fifosize - scif_txfill(port);
347 }
348 
349 static int scif_rxfill(struct uart_port *port)
350 {
351 	if (port->type == PORT_SCIFA)
352 		return sci_in(port, SCFDR) & SCIF_RFDC_MASK;
353 	else
354 		return sci_in(port, SCRFDR);
355 }
356 #else
357 static int scif_txfill(struct uart_port *port)
358 {
359 	return sci_in(port, SCFDR) >> 8;
360 }
361 
362 static int scif_txroom(struct uart_port *port)
363 {
364 	return SCIF_TXROOM_MAX - scif_txfill(port);
365 }
366 
367 static int scif_rxfill(struct uart_port *port)
368 {
369 	return sci_in(port, SCFDR) & SCIF_RFDC_MASK;
370 }
371 #endif
372 
373 static int sci_txfill(struct uart_port *port)
374 {
375 	return !(sci_in(port, SCxSR) & SCI_TDRE);
376 }
377 
378 static int sci_txroom(struct uart_port *port)
379 {
380 	return !sci_txfill(port);
381 }
382 
383 static int sci_rxfill(struct uart_port *port)
384 {
385 	return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
386 }
387 
388 /* ********************************************************************** *
389  *                   the interrupt related routines                       *
390  * ********************************************************************** */
391 
392 static void sci_transmit_chars(struct uart_port *port)
393 {
394 	struct circ_buf *xmit = &port->state->xmit;
395 	unsigned int stopped = uart_tx_stopped(port);
396 	unsigned short status;
397 	unsigned short ctrl;
398 	int count;
399 
400 	status = sci_in(port, SCxSR);
401 	if (!(status & SCxSR_TDxE(port))) {
402 		ctrl = sci_in(port, SCSCR);
403 		if (uart_circ_empty(xmit))
404 			ctrl &= ~SCSCR_TIE;
405 		else
406 			ctrl |= SCSCR_TIE;
407 		sci_out(port, SCSCR, ctrl);
408 		return;
409 	}
410 
411 	if (port->type == PORT_SCI)
412 		count = sci_txroom(port);
413 	else
414 		count = scif_txroom(port);
415 
416 	do {
417 		unsigned char c;
418 
419 		if (port->x_char) {
420 			c = port->x_char;
421 			port->x_char = 0;
422 		} else if (!uart_circ_empty(xmit) && !stopped) {
423 			c = xmit->buf[xmit->tail];
424 			xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
425 		} else {
426 			break;
427 		}
428 
429 		sci_out(port, SCxTDR, c);
430 
431 		port->icount.tx++;
432 	} while (--count > 0);
433 
434 	sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
435 
436 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
437 		uart_write_wakeup(port);
438 	if (uart_circ_empty(xmit)) {
439 		sci_stop_tx(port);
440 	} else {
441 		ctrl = sci_in(port, SCSCR);
442 
443 		if (port->type != PORT_SCI) {
444 			sci_in(port, SCxSR); /* Dummy read */
445 			sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
446 		}
447 
448 		ctrl |= SCSCR_TIE;
449 		sci_out(port, SCSCR, ctrl);
450 	}
451 }
452 
453 /* On SH3, SCIF may read end-of-break as a space->mark char */
454 #define STEPFN(c)  ({int __c = (c); (((__c-1)|(__c)) == -1); })
455 
456 static void sci_receive_chars(struct uart_port *port)
457 {
458 	struct sci_port *sci_port = to_sci_port(port);
459 	struct tty_struct *tty = port->state->port.tty;
460 	int i, count, copied = 0;
461 	unsigned short status;
462 	unsigned char flag;
463 
464 	status = sci_in(port, SCxSR);
465 	if (!(status & SCxSR_RDxF(port)))
466 		return;
467 
468 	while (1) {
469 		if (port->type == PORT_SCI)
470 			count = sci_rxfill(port);
471 		else
472 			count = scif_rxfill(port);
473 
474 		/* Don't copy more bytes than there is room for in the buffer */
475 		count = tty_buffer_request_room(tty, count);
476 
477 		/* If for any reason we can't copy more data, we're done! */
478 		if (count == 0)
479 			break;
480 
481 		if (port->type == PORT_SCI) {
482 			char c = sci_in(port, SCxRDR);
483 			if (uart_handle_sysrq_char(port, c) ||
484 			    sci_port->break_flag)
485 				count = 0;
486 			else
487 				tty_insert_flip_char(tty, c, TTY_NORMAL);
488 		} else {
489 			for (i = 0; i < count; i++) {
490 				char c = sci_in(port, SCxRDR);
491 				status = sci_in(port, SCxSR);
492 #if defined(CONFIG_CPU_SH3)
493 				/* Skip "chars" during break */
494 				if (sci_port->break_flag) {
495 					if ((c == 0) &&
496 					    (status & SCxSR_FER(port))) {
497 						count--; i--;
498 						continue;
499 					}
500 
501 					/* Nonzero => end-of-break */
502 					dev_dbg(port->dev, "debounce<%02x>\n", c);
503 					sci_port->break_flag = 0;
504 
505 					if (STEPFN(c)) {
506 						count--; i--;
507 						continue;
508 					}
509 				}
510 #endif /* CONFIG_CPU_SH3 */
511 				if (uart_handle_sysrq_char(port, c)) {
512 					count--; i--;
513 					continue;
514 				}
515 
516 				/* Store data and status */
517 				if (status & SCxSR_FER(port)) {
518 					flag = TTY_FRAME;
519 					dev_notice(port->dev, "frame error\n");
520 				} else if (status & SCxSR_PER(port)) {
521 					flag = TTY_PARITY;
522 					dev_notice(port->dev, "parity error\n");
523 				} else
524 					flag = TTY_NORMAL;
525 
526 				tty_insert_flip_char(tty, c, flag);
527 			}
528 		}
529 
530 		sci_in(port, SCxSR); /* dummy read */
531 		sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
532 
533 		copied += count;
534 		port->icount.rx += count;
535 	}
536 
537 	if (copied) {
538 		/* Tell the rest of the system the news. New characters! */
539 		tty_flip_buffer_push(tty);
540 	} else {
541 		sci_in(port, SCxSR); /* dummy read */
542 		sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
543 	}
544 }
545 
546 #define SCI_BREAK_JIFFIES (HZ/20)
547 
548 /*
549  * The sci generates interrupts during the break,
550  * 1 per millisecond or so during the break period, for 9600 baud.
551  * So dont bother disabling interrupts.
552  * But dont want more than 1 break event.
553  * Use a kernel timer to periodically poll the rx line until
554  * the break is finished.
555  */
556 static inline void sci_schedule_break_timer(struct sci_port *port)
557 {
558 	mod_timer(&port->break_timer, jiffies + SCI_BREAK_JIFFIES);
559 }
560 
561 /* Ensure that two consecutive samples find the break over. */
562 static void sci_break_timer(unsigned long data)
563 {
564 	struct sci_port *port = (struct sci_port *)data;
565 
566 	if (port->enable)
567 		port->enable(&port->port);
568 
569 	if (sci_rxd_in(&port->port) == 0) {
570 		port->break_flag = 1;
571 		sci_schedule_break_timer(port);
572 	} else if (port->break_flag == 1) {
573 		/* break is over. */
574 		port->break_flag = 2;
575 		sci_schedule_break_timer(port);
576 	} else
577 		port->break_flag = 0;
578 
579 	if (port->disable)
580 		port->disable(&port->port);
581 }
582 
583 static int sci_handle_errors(struct uart_port *port)
584 {
585 	int copied = 0;
586 	unsigned short status = sci_in(port, SCxSR);
587 	struct tty_struct *tty = port->state->port.tty;
588 
589 	if (status & SCxSR_ORER(port)) {
590 		/* overrun error */
591 		if (tty_insert_flip_char(tty, 0, TTY_OVERRUN))
592 			copied++;
593 
594 		dev_notice(port->dev, "overrun error");
595 	}
596 
597 	if (status & SCxSR_FER(port)) {
598 		if (sci_rxd_in(port) == 0) {
599 			/* Notify of BREAK */
600 			struct sci_port *sci_port = to_sci_port(port);
601 
602 			if (!sci_port->break_flag) {
603 				sci_port->break_flag = 1;
604 				sci_schedule_break_timer(sci_port);
605 
606 				/* Do sysrq handling. */
607 				if (uart_handle_break(port))
608 					return 0;
609 
610 				dev_dbg(port->dev, "BREAK detected\n");
611 
612 				if (tty_insert_flip_char(tty, 0, TTY_BREAK))
613 					copied++;
614 			}
615 
616 		} else {
617 			/* frame error */
618 			if (tty_insert_flip_char(tty, 0, TTY_FRAME))
619 				copied++;
620 
621 			dev_notice(port->dev, "frame error\n");
622 		}
623 	}
624 
625 	if (status & SCxSR_PER(port)) {
626 		/* parity error */
627 		if (tty_insert_flip_char(tty, 0, TTY_PARITY))
628 			copied++;
629 
630 		dev_notice(port->dev, "parity error");
631 	}
632 
633 	if (copied)
634 		tty_flip_buffer_push(tty);
635 
636 	return copied;
637 }
638 
639 static int sci_handle_fifo_overrun(struct uart_port *port)
640 {
641 	struct tty_struct *tty = port->state->port.tty;
642 	int copied = 0;
643 
644 	if (port->type != PORT_SCIF)
645 		return 0;
646 
647 	if ((sci_in(port, SCLSR) & SCIF_ORER) != 0) {
648 		sci_out(port, SCLSR, 0);
649 
650 		tty_insert_flip_char(tty, 0, TTY_OVERRUN);
651 		tty_flip_buffer_push(tty);
652 
653 		dev_notice(port->dev, "overrun error\n");
654 		copied++;
655 	}
656 
657 	return copied;
658 }
659 
660 static int sci_handle_breaks(struct uart_port *port)
661 {
662 	int copied = 0;
663 	unsigned short status = sci_in(port, SCxSR);
664 	struct tty_struct *tty = port->state->port.tty;
665 	struct sci_port *s = to_sci_port(port);
666 
667 	if (uart_handle_break(port))
668 		return 0;
669 
670 	if (!s->break_flag && status & SCxSR_BRK(port)) {
671 #if defined(CONFIG_CPU_SH3)
672 		/* Debounce break */
673 		s->break_flag = 1;
674 #endif
675 		/* Notify of BREAK */
676 		if (tty_insert_flip_char(tty, 0, TTY_BREAK))
677 			copied++;
678 
679 		dev_dbg(port->dev, "BREAK detected\n");
680 	}
681 
682 	if (copied)
683 		tty_flip_buffer_push(tty);
684 
685 	copied += sci_handle_fifo_overrun(port);
686 
687 	return copied;
688 }
689 
690 static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
691 {
692 #ifdef CONFIG_SERIAL_SH_SCI_DMA
693 	struct uart_port *port = ptr;
694 	struct sci_port *s = to_sci_port(port);
695 
696 	if (s->chan_rx) {
697 		u16 scr = sci_in(port, SCSCR);
698 		u16 ssr = sci_in(port, SCxSR);
699 
700 		/* Disable future Rx interrupts */
701 		if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
702 			disable_irq_nosync(irq);
703 			scr |= 0x4000;
704 		} else {
705 			scr &= ~SCSCR_RIE;
706 		}
707 		sci_out(port, SCSCR, scr);
708 		/* Clear current interrupt */
709 		sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
710 		dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
711 			jiffies, s->rx_timeout);
712 		mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
713 
714 		return IRQ_HANDLED;
715 	}
716 #endif
717 
718 	/* I think sci_receive_chars has to be called irrespective
719 	 * of whether the I_IXOFF is set, otherwise, how is the interrupt
720 	 * to be disabled?
721 	 */
722 	sci_receive_chars(ptr);
723 
724 	return IRQ_HANDLED;
725 }
726 
727 static irqreturn_t sci_tx_interrupt(int irq, void *ptr)
728 {
729 	struct uart_port *port = ptr;
730 	unsigned long flags;
731 
732 	spin_lock_irqsave(&port->lock, flags);
733 	sci_transmit_chars(port);
734 	spin_unlock_irqrestore(&port->lock, flags);
735 
736 	return IRQ_HANDLED;
737 }
738 
739 static irqreturn_t sci_er_interrupt(int irq, void *ptr)
740 {
741 	struct uart_port *port = ptr;
742 
743 	/* Handle errors */
744 	if (port->type == PORT_SCI) {
745 		if (sci_handle_errors(port)) {
746 			/* discard character in rx buffer */
747 			sci_in(port, SCxSR);
748 			sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
749 		}
750 	} else {
751 		sci_handle_fifo_overrun(port);
752 		sci_rx_interrupt(irq, ptr);
753 	}
754 
755 	sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
756 
757 	/* Kick the transmission */
758 	sci_tx_interrupt(irq, ptr);
759 
760 	return IRQ_HANDLED;
761 }
762 
763 static irqreturn_t sci_br_interrupt(int irq, void *ptr)
764 {
765 	struct uart_port *port = ptr;
766 
767 	/* Handle BREAKs */
768 	sci_handle_breaks(port);
769 	sci_out(port, SCxSR, SCxSR_BREAK_CLEAR(port));
770 
771 	return IRQ_HANDLED;
772 }
773 
774 static inline unsigned long port_rx_irq_mask(struct uart_port *port)
775 {
776 	/*
777 	 * Not all ports (such as SCIFA) will support REIE. Rather than
778 	 * special-casing the port type, we check the port initialization
779 	 * IRQ enable mask to see whether the IRQ is desired at all. If
780 	 * it's unset, it's logically inferred that there's no point in
781 	 * testing for it.
782 	 */
783 	return SCSCR_RIE | (to_sci_port(port)->cfg->scscr & SCSCR_REIE);
784 }
785 
786 static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
787 {
788 	unsigned short ssr_status, scr_status, err_enabled;
789 	struct uart_port *port = ptr;
790 	struct sci_port *s = to_sci_port(port);
791 	irqreturn_t ret = IRQ_NONE;
792 
793 	ssr_status = sci_in(port, SCxSR);
794 	scr_status = sci_in(port, SCSCR);
795 	err_enabled = scr_status & port_rx_irq_mask(port);
796 
797 	/* Tx Interrupt */
798 	if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCSCR_TIE) &&
799 	    !s->chan_tx)
800 		ret = sci_tx_interrupt(irq, ptr);
801 
802 	/*
803 	 * Rx Interrupt: if we're using DMA, the DMA controller clears RDF /
804 	 * DR flags
805 	 */
806 	if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) &&
807 	    (scr_status & SCSCR_RIE))
808 		ret = sci_rx_interrupt(irq, ptr);
809 
810 	/* Error Interrupt */
811 	if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled)
812 		ret = sci_er_interrupt(irq, ptr);
813 
814 	/* Break Interrupt */
815 	if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
816 		ret = sci_br_interrupt(irq, ptr);
817 
818 	return ret;
819 }
820 
821 /*
822  * Here we define a transition notifier so that we can update all of our
823  * ports' baud rate when the peripheral clock changes.
824  */
825 static int sci_notifier(struct notifier_block *self,
826 			unsigned long phase, void *p)
827 {
828 	struct sci_port *sci_port;
829 	unsigned long flags;
830 
831 	sci_port = container_of(self, struct sci_port, freq_transition);
832 
833 	if ((phase == CPUFREQ_POSTCHANGE) ||
834 	    (phase == CPUFREQ_RESUMECHANGE)) {
835 		struct uart_port *port = &sci_port->port;
836 
837 		spin_lock_irqsave(&port->lock, flags);
838 		port->uartclk = clk_get_rate(sci_port->iclk);
839 		spin_unlock_irqrestore(&port->lock, flags);
840 	}
841 
842 	return NOTIFY_OK;
843 }
844 
845 static void sci_clk_enable(struct uart_port *port)
846 {
847 	struct sci_port *sci_port = to_sci_port(port);
848 
849 	pm_runtime_get_sync(port->dev);
850 
851 	clk_enable(sci_port->iclk);
852 	sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
853 	clk_enable(sci_port->fclk);
854 }
855 
856 static void sci_clk_disable(struct uart_port *port)
857 {
858 	struct sci_port *sci_port = to_sci_port(port);
859 
860 	clk_disable(sci_port->fclk);
861 	clk_disable(sci_port->iclk);
862 
863 	pm_runtime_put_sync(port->dev);
864 }
865 
866 static int sci_request_irq(struct sci_port *port)
867 {
868 	int i;
869 	irqreturn_t (*handlers[4])(int irq, void *ptr) = {
870 		sci_er_interrupt, sci_rx_interrupt, sci_tx_interrupt,
871 		sci_br_interrupt,
872 	};
873 	const char *desc[] = { "SCI Receive Error", "SCI Receive Data Full",
874 			       "SCI Transmit Data Empty", "SCI Break" };
875 
876 	if (port->cfg->irqs[0] == port->cfg->irqs[1]) {
877 		if (unlikely(!port->cfg->irqs[0]))
878 			return -ENODEV;
879 
880 		if (request_irq(port->cfg->irqs[0], sci_mpxed_interrupt,
881 				IRQF_DISABLED, "sci", port)) {
882 			dev_err(port->port.dev, "Can't allocate IRQ\n");
883 			return -ENODEV;
884 		}
885 	} else {
886 		for (i = 0; i < ARRAY_SIZE(handlers); i++) {
887 			if (unlikely(!port->cfg->irqs[i]))
888 				continue;
889 
890 			if (request_irq(port->cfg->irqs[i], handlers[i],
891 					IRQF_DISABLED, desc[i], port)) {
892 				dev_err(port->port.dev, "Can't allocate IRQ\n");
893 				return -ENODEV;
894 			}
895 		}
896 	}
897 
898 	return 0;
899 }
900 
901 static void sci_free_irq(struct sci_port *port)
902 {
903 	int i;
904 
905 	if (port->cfg->irqs[0] == port->cfg->irqs[1])
906 		free_irq(port->cfg->irqs[0], port);
907 	else {
908 		for (i = 0; i < ARRAY_SIZE(port->cfg->irqs); i++) {
909 			if (!port->cfg->irqs[i])
910 				continue;
911 
912 			free_irq(port->cfg->irqs[i], port);
913 		}
914 	}
915 }
916 
917 static unsigned int sci_tx_empty(struct uart_port *port)
918 {
919 	unsigned short status = sci_in(port, SCxSR);
920 	unsigned short in_tx_fifo = scif_txfill(port);
921 
922 	return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
923 }
924 
925 static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
926 {
927 	/* This routine is used for seting signals of: DTR, DCD, CTS/RTS */
928 	/* We use SCIF's hardware for CTS/RTS, so don't need any for that. */
929 	/* If you have signals for DTR and DCD, please implement here. */
930 }
931 
932 static unsigned int sci_get_mctrl(struct uart_port *port)
933 {
934 	/* This routine is used for getting signals of: DTR, DCD, DSR, RI,
935 	   and CTS/RTS */
936 
937 	return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR;
938 }
939 
940 #ifdef CONFIG_SERIAL_SH_SCI_DMA
941 static void sci_dma_tx_complete(void *arg)
942 {
943 	struct sci_port *s = arg;
944 	struct uart_port *port = &s->port;
945 	struct circ_buf *xmit = &port->state->xmit;
946 	unsigned long flags;
947 
948 	dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
949 
950 	spin_lock_irqsave(&port->lock, flags);
951 
952 	xmit->tail += sg_dma_len(&s->sg_tx);
953 	xmit->tail &= UART_XMIT_SIZE - 1;
954 
955 	port->icount.tx += sg_dma_len(&s->sg_tx);
956 
957 	async_tx_ack(s->desc_tx);
958 	s->cookie_tx = -EINVAL;
959 	s->desc_tx = NULL;
960 
961 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
962 		uart_write_wakeup(port);
963 
964 	if (!uart_circ_empty(xmit)) {
965 		schedule_work(&s->work_tx);
966 	} else if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
967 		u16 ctrl = sci_in(port, SCSCR);
968 		sci_out(port, SCSCR, ctrl & ~SCSCR_TIE);
969 	}
970 
971 	spin_unlock_irqrestore(&port->lock, flags);
972 }
973 
974 /* Locking: called with port lock held */
975 static int sci_dma_rx_push(struct sci_port *s, struct tty_struct *tty,
976 			   size_t count)
977 {
978 	struct uart_port *port = &s->port;
979 	int i, active, room;
980 
981 	room = tty_buffer_request_room(tty, count);
982 
983 	if (s->active_rx == s->cookie_rx[0]) {
984 		active = 0;
985 	} else if (s->active_rx == s->cookie_rx[1]) {
986 		active = 1;
987 	} else {
988 		dev_err(port->dev, "cookie %d not found!\n", s->active_rx);
989 		return 0;
990 	}
991 
992 	if (room < count)
993 		dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
994 			 count - room);
995 	if (!room)
996 		return room;
997 
998 	for (i = 0; i < room; i++)
999 		tty_insert_flip_char(tty, ((u8 *)sg_virt(&s->sg_rx[active]))[i],
1000 				     TTY_NORMAL);
1001 
1002 	port->icount.rx += room;
1003 
1004 	return room;
1005 }
1006 
1007 static void sci_dma_rx_complete(void *arg)
1008 {
1009 	struct sci_port *s = arg;
1010 	struct uart_port *port = &s->port;
1011 	struct tty_struct *tty = port->state->port.tty;
1012 	unsigned long flags;
1013 	int count;
1014 
1015 	dev_dbg(port->dev, "%s(%d) active #%d\n", __func__, port->line, s->active_rx);
1016 
1017 	spin_lock_irqsave(&port->lock, flags);
1018 
1019 	count = sci_dma_rx_push(s, tty, s->buf_len_rx);
1020 
1021 	mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
1022 
1023 	spin_unlock_irqrestore(&port->lock, flags);
1024 
1025 	if (count)
1026 		tty_flip_buffer_push(tty);
1027 
1028 	schedule_work(&s->work_rx);
1029 }
1030 
1031 static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
1032 {
1033 	struct dma_chan *chan = s->chan_rx;
1034 	struct uart_port *port = &s->port;
1035 
1036 	s->chan_rx = NULL;
1037 	s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
1038 	dma_release_channel(chan);
1039 	if (sg_dma_address(&s->sg_rx[0]))
1040 		dma_free_coherent(port->dev, s->buf_len_rx * 2,
1041 				  sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0]));
1042 	if (enable_pio)
1043 		sci_start_rx(port);
1044 }
1045 
1046 static void sci_tx_dma_release(struct sci_port *s, bool enable_pio)
1047 {
1048 	struct dma_chan *chan = s->chan_tx;
1049 	struct uart_port *port = &s->port;
1050 
1051 	s->chan_tx = NULL;
1052 	s->cookie_tx = -EINVAL;
1053 	dma_release_channel(chan);
1054 	if (enable_pio)
1055 		sci_start_tx(port);
1056 }
1057 
1058 static void sci_submit_rx(struct sci_port *s)
1059 {
1060 	struct dma_chan *chan = s->chan_rx;
1061 	int i;
1062 
1063 	for (i = 0; i < 2; i++) {
1064 		struct scatterlist *sg = &s->sg_rx[i];
1065 		struct dma_async_tx_descriptor *desc;
1066 
1067 		desc = chan->device->device_prep_slave_sg(chan,
1068 			sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT);
1069 
1070 		if (desc) {
1071 			s->desc_rx[i] = desc;
1072 			desc->callback = sci_dma_rx_complete;
1073 			desc->callback_param = s;
1074 			s->cookie_rx[i] = desc->tx_submit(desc);
1075 		}
1076 
1077 		if (!desc || s->cookie_rx[i] < 0) {
1078 			if (i) {
1079 				async_tx_ack(s->desc_rx[0]);
1080 				s->cookie_rx[0] = -EINVAL;
1081 			}
1082 			if (desc) {
1083 				async_tx_ack(desc);
1084 				s->cookie_rx[i] = -EINVAL;
1085 			}
1086 			dev_warn(s->port.dev,
1087 				 "failed to re-start DMA, using PIO\n");
1088 			sci_rx_dma_release(s, true);
1089 			return;
1090 		}
1091 		dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
1092 			s->cookie_rx[i], i);
1093 	}
1094 
1095 	s->active_rx = s->cookie_rx[0];
1096 
1097 	dma_async_issue_pending(chan);
1098 }
1099 
1100 static void work_fn_rx(struct work_struct *work)
1101 {
1102 	struct sci_port *s = container_of(work, struct sci_port, work_rx);
1103 	struct uart_port *port = &s->port;
1104 	struct dma_async_tx_descriptor *desc;
1105 	int new;
1106 
1107 	if (s->active_rx == s->cookie_rx[0]) {
1108 		new = 0;
1109 	} else if (s->active_rx == s->cookie_rx[1]) {
1110 		new = 1;
1111 	} else {
1112 		dev_err(port->dev, "cookie %d not found!\n", s->active_rx);
1113 		return;
1114 	}
1115 	desc = s->desc_rx[new];
1116 
1117 	if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
1118 	    DMA_SUCCESS) {
1119 		/* Handle incomplete DMA receive */
1120 		struct tty_struct *tty = port->state->port.tty;
1121 		struct dma_chan *chan = s->chan_rx;
1122 		struct sh_desc *sh_desc = container_of(desc, struct sh_desc,
1123 						       async_tx);
1124 		unsigned long flags;
1125 		int count;
1126 
1127 		chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
1128 		dev_dbg(port->dev, "Read %u bytes with cookie %d\n",
1129 			sh_desc->partial, sh_desc->cookie);
1130 
1131 		spin_lock_irqsave(&port->lock, flags);
1132 		count = sci_dma_rx_push(s, tty, sh_desc->partial);
1133 		spin_unlock_irqrestore(&port->lock, flags);
1134 
1135 		if (count)
1136 			tty_flip_buffer_push(tty);
1137 
1138 		sci_submit_rx(s);
1139 
1140 		return;
1141 	}
1142 
1143 	s->cookie_rx[new] = desc->tx_submit(desc);
1144 	if (s->cookie_rx[new] < 0) {
1145 		dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
1146 		sci_rx_dma_release(s, true);
1147 		return;
1148 	}
1149 
1150 	s->active_rx = s->cookie_rx[!new];
1151 
1152 	dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n", __func__,
1153 		s->cookie_rx[new], new, s->active_rx);
1154 }
1155 
1156 static void work_fn_tx(struct work_struct *work)
1157 {
1158 	struct sci_port *s = container_of(work, struct sci_port, work_tx);
1159 	struct dma_async_tx_descriptor *desc;
1160 	struct dma_chan *chan = s->chan_tx;
1161 	struct uart_port *port = &s->port;
1162 	struct circ_buf *xmit = &port->state->xmit;
1163 	struct scatterlist *sg = &s->sg_tx;
1164 
1165 	/*
1166 	 * DMA is idle now.
1167 	 * Port xmit buffer is already mapped, and it is one page... Just adjust
1168 	 * offsets and lengths. Since it is a circular buffer, we have to
1169 	 * transmit till the end, and then the rest. Take the port lock to get a
1170 	 * consistent xmit buffer state.
1171 	 */
1172 	spin_lock_irq(&port->lock);
1173 	sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
1174 	sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
1175 		sg->offset;
1176 	sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
1177 		CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
1178 	spin_unlock_irq(&port->lock);
1179 
1180 	BUG_ON(!sg_dma_len(sg));
1181 
1182 	desc = chan->device->device_prep_slave_sg(chan,
1183 			sg, s->sg_len_tx, DMA_TO_DEVICE,
1184 			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1185 	if (!desc) {
1186 		/* switch to PIO */
1187 		sci_tx_dma_release(s, true);
1188 		return;
1189 	}
1190 
1191 	dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE);
1192 
1193 	spin_lock_irq(&port->lock);
1194 	s->desc_tx = desc;
1195 	desc->callback = sci_dma_tx_complete;
1196 	desc->callback_param = s;
1197 	spin_unlock_irq(&port->lock);
1198 	s->cookie_tx = desc->tx_submit(desc);
1199 	if (s->cookie_tx < 0) {
1200 		dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
1201 		/* switch to PIO */
1202 		sci_tx_dma_release(s, true);
1203 		return;
1204 	}
1205 
1206 	dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", __func__,
1207 		xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
1208 
1209 	dma_async_issue_pending(chan);
1210 }
1211 #endif
1212 
1213 static void sci_start_tx(struct uart_port *port)
1214 {
1215 	struct sci_port *s = to_sci_port(port);
1216 	unsigned short ctrl;
1217 
1218 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1219 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1220 		u16 new, scr = sci_in(port, SCSCR);
1221 		if (s->chan_tx)
1222 			new = scr | 0x8000;
1223 		else
1224 			new = scr & ~0x8000;
1225 		if (new != scr)
1226 			sci_out(port, SCSCR, new);
1227 	}
1228 
1229 	if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
1230 	    s->cookie_tx < 0)
1231 		schedule_work(&s->work_tx);
1232 #endif
1233 
1234 	if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1235 		/* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
1236 		ctrl = sci_in(port, SCSCR);
1237 		sci_out(port, SCSCR, ctrl | SCSCR_TIE);
1238 	}
1239 }
1240 
1241 static void sci_stop_tx(struct uart_port *port)
1242 {
1243 	unsigned short ctrl;
1244 
1245 	/* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
1246 	ctrl = sci_in(port, SCSCR);
1247 
1248 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1249 		ctrl &= ~0x8000;
1250 
1251 	ctrl &= ~SCSCR_TIE;
1252 
1253 	sci_out(port, SCSCR, ctrl);
1254 }
1255 
1256 static void sci_start_rx(struct uart_port *port)
1257 {
1258 	unsigned short ctrl;
1259 
1260 	ctrl = sci_in(port, SCSCR) | port_rx_irq_mask(port);
1261 
1262 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1263 		ctrl &= ~0x4000;
1264 
1265 	sci_out(port, SCSCR, ctrl);
1266 }
1267 
1268 static void sci_stop_rx(struct uart_port *port)
1269 {
1270 	unsigned short ctrl;
1271 
1272 	ctrl = sci_in(port, SCSCR);
1273 
1274 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1275 		ctrl &= ~0x4000;
1276 
1277 	ctrl &= ~port_rx_irq_mask(port);
1278 
1279 	sci_out(port, SCSCR, ctrl);
1280 }
1281 
1282 static void sci_enable_ms(struct uart_port *port)
1283 {
1284 	/* Nothing here yet .. */
1285 }
1286 
1287 static void sci_break_ctl(struct uart_port *port, int break_state)
1288 {
1289 	/* Nothing here yet .. */
1290 }
1291 
1292 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1293 static bool filter(struct dma_chan *chan, void *slave)
1294 {
1295 	struct sh_dmae_slave *param = slave;
1296 
1297 	dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__,
1298 		param->slave_id);
1299 
1300 	if (param->dma_dev == chan->device->dev) {
1301 		chan->private = param;
1302 		return true;
1303 	} else {
1304 		return false;
1305 	}
1306 }
1307 
1308 static void rx_timer_fn(unsigned long arg)
1309 {
1310 	struct sci_port *s = (struct sci_port *)arg;
1311 	struct uart_port *port = &s->port;
1312 	u16 scr = sci_in(port, SCSCR);
1313 
1314 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1315 		scr &= ~0x4000;
1316 		enable_irq(s->cfg->irqs[1]);
1317 	}
1318 	sci_out(port, SCSCR, scr | SCSCR_RIE);
1319 	dev_dbg(port->dev, "DMA Rx timed out\n");
1320 	schedule_work(&s->work_rx);
1321 }
1322 
1323 static void sci_request_dma(struct uart_port *port)
1324 {
1325 	struct sci_port *s = to_sci_port(port);
1326 	struct sh_dmae_slave *param;
1327 	struct dma_chan *chan;
1328 	dma_cap_mask_t mask;
1329 	int nent;
1330 
1331 	dev_dbg(port->dev, "%s: port %d DMA %p\n", __func__,
1332 		port->line, s->cfg->dma_dev);
1333 
1334 	if (!s->cfg->dma_dev)
1335 		return;
1336 
1337 	dma_cap_zero(mask);
1338 	dma_cap_set(DMA_SLAVE, mask);
1339 
1340 	param = &s->param_tx;
1341 
1342 	/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
1343 	param->slave_id = s->cfg->dma_slave_tx;
1344 	param->dma_dev = s->cfg->dma_dev;
1345 
1346 	s->cookie_tx = -EINVAL;
1347 	chan = dma_request_channel(mask, filter, param);
1348 	dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan);
1349 	if (chan) {
1350 		s->chan_tx = chan;
1351 		sg_init_table(&s->sg_tx, 1);
1352 		/* UART circular tx buffer is an aligned page. */
1353 		BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK);
1354 		sg_set_page(&s->sg_tx, virt_to_page(port->state->xmit.buf),
1355 			    UART_XMIT_SIZE, (int)port->state->xmit.buf & ~PAGE_MASK);
1356 		nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE);
1357 		if (!nent)
1358 			sci_tx_dma_release(s, false);
1359 		else
1360 			dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__,
1361 				sg_dma_len(&s->sg_tx),
1362 				port->state->xmit.buf, sg_dma_address(&s->sg_tx));
1363 
1364 		s->sg_len_tx = nent;
1365 
1366 		INIT_WORK(&s->work_tx, work_fn_tx);
1367 	}
1368 
1369 	param = &s->param_rx;
1370 
1371 	/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
1372 	param->slave_id = s->cfg->dma_slave_rx;
1373 	param->dma_dev = s->cfg->dma_dev;
1374 
1375 	chan = dma_request_channel(mask, filter, param);
1376 	dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
1377 	if (chan) {
1378 		dma_addr_t dma[2];
1379 		void *buf[2];
1380 		int i;
1381 
1382 		s->chan_rx = chan;
1383 
1384 		s->buf_len_rx = 2 * max(16, (int)port->fifosize);
1385 		buf[0] = dma_alloc_coherent(port->dev, s->buf_len_rx * 2,
1386 					    &dma[0], GFP_KERNEL);
1387 
1388 		if (!buf[0]) {
1389 			dev_warn(port->dev,
1390 				 "failed to allocate dma buffer, using PIO\n");
1391 			sci_rx_dma_release(s, true);
1392 			return;
1393 		}
1394 
1395 		buf[1] = buf[0] + s->buf_len_rx;
1396 		dma[1] = dma[0] + s->buf_len_rx;
1397 
1398 		for (i = 0; i < 2; i++) {
1399 			struct scatterlist *sg = &s->sg_rx[i];
1400 
1401 			sg_init_table(sg, 1);
1402 			sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx,
1403 				    (int)buf[i] & ~PAGE_MASK);
1404 			sg_dma_address(sg) = dma[i];
1405 		}
1406 
1407 		INIT_WORK(&s->work_rx, work_fn_rx);
1408 		setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s);
1409 
1410 		sci_submit_rx(s);
1411 	}
1412 }
1413 
1414 static void sci_free_dma(struct uart_port *port)
1415 {
1416 	struct sci_port *s = to_sci_port(port);
1417 
1418 	if (!s->cfg->dma_dev)
1419 		return;
1420 
1421 	if (s->chan_tx)
1422 		sci_tx_dma_release(s, false);
1423 	if (s->chan_rx)
1424 		sci_rx_dma_release(s, false);
1425 }
1426 #else
1427 static inline void sci_request_dma(struct uart_port *port)
1428 {
1429 }
1430 
1431 static inline void sci_free_dma(struct uart_port *port)
1432 {
1433 }
1434 #endif
1435 
1436 static int sci_startup(struct uart_port *port)
1437 {
1438 	struct sci_port *s = to_sci_port(port);
1439 	int ret;
1440 
1441 	dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1442 
1443 	if (s->enable)
1444 		s->enable(port);
1445 
1446 	ret = sci_request_irq(s);
1447 	if (unlikely(ret < 0))
1448 		return ret;
1449 
1450 	sci_request_dma(port);
1451 
1452 	sci_start_tx(port);
1453 	sci_start_rx(port);
1454 
1455 	return 0;
1456 }
1457 
1458 static void sci_shutdown(struct uart_port *port)
1459 {
1460 	struct sci_port *s = to_sci_port(port);
1461 
1462 	dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1463 
1464 	sci_stop_rx(port);
1465 	sci_stop_tx(port);
1466 
1467 	sci_free_dma(port);
1468 	sci_free_irq(s);
1469 
1470 	if (s->disable)
1471 		s->disable(port);
1472 }
1473 
1474 static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
1475 				   unsigned long freq)
1476 {
1477 	switch (algo_id) {
1478 	case SCBRR_ALGO_1:
1479 		return ((freq + 16 * bps) / (16 * bps) - 1);
1480 	case SCBRR_ALGO_2:
1481 		return ((freq + 16 * bps) / (32 * bps) - 1);
1482 	case SCBRR_ALGO_3:
1483 		return (((freq * 2) + 16 * bps) / (16 * bps) - 1);
1484 	case SCBRR_ALGO_4:
1485 		return (((freq * 2) + 16 * bps) / (32 * bps) - 1);
1486 	case SCBRR_ALGO_5:
1487 		return (((freq * 1000 / 32) / bps) - 1);
1488 	}
1489 
1490 	/* Warn, but use a safe default */
1491 	WARN_ON(1);
1492 
1493 	return ((freq + 16 * bps) / (32 * bps) - 1);
1494 }
1495 
1496 static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1497 			    struct ktermios *old)
1498 {
1499 	struct sci_port *s = to_sci_port(port);
1500 	unsigned int status, baud, smr_val, max_baud;
1501 	int t = -1;
1502 	u16 scfcr = 0;
1503 
1504 	/*
1505 	 * earlyprintk comes here early on with port->uartclk set to zero.
1506 	 * the clock framework is not up and running at this point so here
1507 	 * we assume that 115200 is the maximum baud rate. please note that
1508 	 * the baud rate is not programmed during earlyprintk - it is assumed
1509 	 * that the previous boot loader has enabled required clocks and
1510 	 * setup the baud rate generator hardware for us already.
1511 	 */
1512 	max_baud = port->uartclk ? port->uartclk / 16 : 115200;
1513 
1514 	baud = uart_get_baud_rate(port, termios, old, 0, max_baud);
1515 	if (likely(baud && port->uartclk))
1516 		t = sci_scbrr_calc(s->cfg->scbrr_algo_id, baud, port->uartclk);
1517 
1518 	if (s->enable)
1519 		s->enable(port);
1520 
1521 	do {
1522 		status = sci_in(port, SCxSR);
1523 	} while (!(status & SCxSR_TEND(port)));
1524 
1525 	sci_out(port, SCSCR, 0x00);	/* TE=0, RE=0, CKE1=0 */
1526 
1527 	if (port->type != PORT_SCI)
1528 		sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST);
1529 
1530 	smr_val = sci_in(port, SCSMR) & 3;
1531 
1532 	if ((termios->c_cflag & CSIZE) == CS7)
1533 		smr_val |= 0x40;
1534 	if (termios->c_cflag & PARENB)
1535 		smr_val |= 0x20;
1536 	if (termios->c_cflag & PARODD)
1537 		smr_val |= 0x30;
1538 	if (termios->c_cflag & CSTOPB)
1539 		smr_val |= 0x08;
1540 
1541 	uart_update_timeout(port, termios->c_cflag, baud);
1542 
1543 	sci_out(port, SCSMR, smr_val);
1544 
1545 	dev_dbg(port->dev, "%s: SMR %x, t %x, SCSCR %x\n", __func__, smr_val, t,
1546 		s->cfg->scscr);
1547 
1548 	if (t > 0) {
1549 		if (t >= 256) {
1550 			sci_out(port, SCSMR, (sci_in(port, SCSMR) & ~3) | 1);
1551 			t >>= 2;
1552 		} else
1553 			sci_out(port, SCSMR, sci_in(port, SCSMR) & ~3);
1554 
1555 		sci_out(port, SCBRR, t);
1556 		udelay((1000000+(baud-1)) / baud); /* Wait one bit interval */
1557 	}
1558 
1559 	sci_init_pins(port, termios->c_cflag);
1560 	sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0));
1561 
1562 	sci_out(port, SCSCR, s->cfg->scscr);
1563 
1564 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1565 	/*
1566 	 * Calculate delay for 1.5 DMA buffers: see
1567 	 * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits
1568 	 * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
1569 	 * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
1570 	 * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO
1571 	 * sizes), but it has been found out experimentally, that this is not
1572 	 * enough: the driver too often needlessly runs on a DMA timeout. 20ms
1573 	 * as a minimum seem to work perfectly.
1574 	 */
1575 	if (s->chan_rx) {
1576 		s->rx_timeout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
1577 			port->fifosize / 2;
1578 		dev_dbg(port->dev,
1579 			"DMA Rx t-out %ums, tty t-out %u jiffies\n",
1580 			s->rx_timeout * 1000 / HZ, port->timeout);
1581 		if (s->rx_timeout < msecs_to_jiffies(20))
1582 			s->rx_timeout = msecs_to_jiffies(20);
1583 	}
1584 #endif
1585 
1586 	if ((termios->c_cflag & CREAD) != 0)
1587 		sci_start_rx(port);
1588 
1589 	if (s->disable)
1590 		s->disable(port);
1591 }
1592 
1593 static const char *sci_type(struct uart_port *port)
1594 {
1595 	switch (port->type) {
1596 	case PORT_IRDA:
1597 		return "irda";
1598 	case PORT_SCI:
1599 		return "sci";
1600 	case PORT_SCIF:
1601 		return "scif";
1602 	case PORT_SCIFA:
1603 		return "scifa";
1604 	case PORT_SCIFB:
1605 		return "scifb";
1606 	}
1607 
1608 	return NULL;
1609 }
1610 
1611 static inline unsigned long sci_port_size(struct uart_port *port)
1612 {
1613 	/*
1614 	 * Pick an arbitrary size that encapsulates all of the base
1615 	 * registers by default. This can be optimized later, or derived
1616 	 * from platform resource data at such a time that ports begin to
1617 	 * behave more erratically.
1618 	 */
1619 	return 64;
1620 }
1621 
1622 static int sci_remap_port(struct uart_port *port)
1623 {
1624 	unsigned long size = sci_port_size(port);
1625 
1626 	/*
1627 	 * Nothing to do if there's already an established membase.
1628 	 */
1629 	if (port->membase)
1630 		return 0;
1631 
1632 	if (port->flags & UPF_IOREMAP) {
1633 		port->membase = ioremap_nocache(port->mapbase, size);
1634 		if (unlikely(!port->membase)) {
1635 			dev_err(port->dev, "can't remap port#%d\n", port->line);
1636 			return -ENXIO;
1637 		}
1638 	} else {
1639 		/*
1640 		 * For the simple (and majority of) cases where we don't
1641 		 * need to do any remapping, just cast the cookie
1642 		 * directly.
1643 		 */
1644 		port->membase = (void __iomem *)port->mapbase;
1645 	}
1646 
1647 	return 0;
1648 }
1649 
1650 static void sci_release_port(struct uart_port *port)
1651 {
1652 	if (port->flags & UPF_IOREMAP) {
1653 		iounmap(port->membase);
1654 		port->membase = NULL;
1655 	}
1656 
1657 	release_mem_region(port->mapbase, sci_port_size(port));
1658 }
1659 
1660 static int sci_request_port(struct uart_port *port)
1661 {
1662 	unsigned long size = sci_port_size(port);
1663 	struct resource *res;
1664 	int ret;
1665 
1666 	res = request_mem_region(port->mapbase, size, dev_name(port->dev));
1667 	if (unlikely(res == NULL))
1668 		return -EBUSY;
1669 
1670 	ret = sci_remap_port(port);
1671 	if (unlikely(ret != 0)) {
1672 		release_resource(res);
1673 		return ret;
1674 	}
1675 
1676 	return 0;
1677 }
1678 
1679 static void sci_config_port(struct uart_port *port, int flags)
1680 {
1681 	if (flags & UART_CONFIG_TYPE) {
1682 		struct sci_port *sport = to_sci_port(port);
1683 
1684 		port->type = sport->cfg->type;
1685 		sci_request_port(port);
1686 	}
1687 }
1688 
1689 static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
1690 {
1691 	struct sci_port *s = to_sci_port(port);
1692 
1693 	if (ser->irq != s->cfg->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs)
1694 		return -EINVAL;
1695 	if (ser->baud_base < 2400)
1696 		/* No paper tape reader for Mitch.. */
1697 		return -EINVAL;
1698 
1699 	return 0;
1700 }
1701 
1702 static struct uart_ops sci_uart_ops = {
1703 	.tx_empty	= sci_tx_empty,
1704 	.set_mctrl	= sci_set_mctrl,
1705 	.get_mctrl	= sci_get_mctrl,
1706 	.start_tx	= sci_start_tx,
1707 	.stop_tx	= sci_stop_tx,
1708 	.stop_rx	= sci_stop_rx,
1709 	.enable_ms	= sci_enable_ms,
1710 	.break_ctl	= sci_break_ctl,
1711 	.startup	= sci_startup,
1712 	.shutdown	= sci_shutdown,
1713 	.set_termios	= sci_set_termios,
1714 	.type		= sci_type,
1715 	.release_port	= sci_release_port,
1716 	.request_port	= sci_request_port,
1717 	.config_port	= sci_config_port,
1718 	.verify_port	= sci_verify_port,
1719 #ifdef CONFIG_CONSOLE_POLL
1720 	.poll_get_char	= sci_poll_get_char,
1721 	.poll_put_char	= sci_poll_put_char,
1722 #endif
1723 };
1724 
1725 static int __devinit sci_init_single(struct platform_device *dev,
1726 				     struct sci_port *sci_port,
1727 				     unsigned int index,
1728 				     struct plat_sci_port *p)
1729 {
1730 	struct uart_port *port = &sci_port->port;
1731 
1732 	port->ops	= &sci_uart_ops;
1733 	port->iotype	= UPIO_MEM;
1734 	port->line	= index;
1735 
1736 	switch (p->type) {
1737 	case PORT_SCIFB:
1738 		port->fifosize = 256;
1739 		break;
1740 	case PORT_SCIFA:
1741 		port->fifosize = 64;
1742 		break;
1743 	case PORT_SCIF:
1744 		port->fifosize = 16;
1745 		break;
1746 	default:
1747 		port->fifosize = 1;
1748 		break;
1749 	}
1750 
1751 	if (dev) {
1752 		sci_port->iclk = clk_get(&dev->dev, "sci_ick");
1753 		if (IS_ERR(sci_port->iclk)) {
1754 			sci_port->iclk = clk_get(&dev->dev, "peripheral_clk");
1755 			if (IS_ERR(sci_port->iclk)) {
1756 				dev_err(&dev->dev, "can't get iclk\n");
1757 				return PTR_ERR(sci_port->iclk);
1758 			}
1759 		}
1760 
1761 		/*
1762 		 * The function clock is optional, ignore it if we can't
1763 		 * find it.
1764 		 */
1765 		sci_port->fclk = clk_get(&dev->dev, "sci_fck");
1766 		if (IS_ERR(sci_port->fclk))
1767 			sci_port->fclk = NULL;
1768 
1769 		sci_port->enable = sci_clk_enable;
1770 		sci_port->disable = sci_clk_disable;
1771 		port->dev = &dev->dev;
1772 
1773 		pm_runtime_enable(&dev->dev);
1774 	}
1775 
1776 	sci_port->break_timer.data = (unsigned long)sci_port;
1777 	sci_port->break_timer.function = sci_break_timer;
1778 	init_timer(&sci_port->break_timer);
1779 
1780 	sci_port->cfg		= p;
1781 
1782 	port->mapbase		= p->mapbase;
1783 	port->type		= p->type;
1784 	port->flags		= p->flags;
1785 
1786 	/*
1787 	 * The UART port needs an IRQ value, so we peg this to the TX IRQ
1788 	 * for the multi-IRQ ports, which is where we are primarily
1789 	 * concerned with the shutdown path synchronization.
1790 	 *
1791 	 * For the muxed case there's nothing more to do.
1792 	 */
1793 	port->irq		= p->irqs[SCIx_RXI_IRQ];
1794 
1795 	if (p->dma_dev)
1796 		dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n",
1797 			p->dma_dev, p->dma_slave_tx, p->dma_slave_rx);
1798 
1799 	return 0;
1800 }
1801 
1802 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
1803 static void serial_console_putchar(struct uart_port *port, int ch)
1804 {
1805 	sci_poll_put_char(port, ch);
1806 }
1807 
1808 /*
1809  *	Print a string to the serial port trying not to disturb
1810  *	any possible real use of the port...
1811  */
1812 static void serial_console_write(struct console *co, const char *s,
1813 				 unsigned count)
1814 {
1815 	struct sci_port *sci_port = &sci_ports[co->index];
1816 	struct uart_port *port = &sci_port->port;
1817 	unsigned short bits;
1818 
1819 	if (sci_port->enable)
1820 		sci_port->enable(port);
1821 
1822 	uart_console_write(port, s, count, serial_console_putchar);
1823 
1824 	/* wait until fifo is empty and last bit has been transmitted */
1825 	bits = SCxSR_TDxE(port) | SCxSR_TEND(port);
1826 	while ((sci_in(port, SCxSR) & bits) != bits)
1827 		cpu_relax();
1828 
1829 	if (sci_port->disable)
1830 		sci_port->disable(port);
1831 }
1832 
1833 static int __devinit serial_console_setup(struct console *co, char *options)
1834 {
1835 	struct sci_port *sci_port;
1836 	struct uart_port *port;
1837 	int baud = 115200;
1838 	int bits = 8;
1839 	int parity = 'n';
1840 	int flow = 'n';
1841 	int ret;
1842 
1843 	/*
1844 	 * Refuse to handle any bogus ports.
1845 	 */
1846 	if (co->index < 0 || co->index >= SCI_NPORTS)
1847 		return -ENODEV;
1848 
1849 	sci_port = &sci_ports[co->index];
1850 	port = &sci_port->port;
1851 
1852 	/*
1853 	 * Refuse to handle uninitialized ports.
1854 	 */
1855 	if (!port->ops)
1856 		return -ENODEV;
1857 
1858 	ret = sci_remap_port(port);
1859 	if (unlikely(ret != 0))
1860 		return ret;
1861 
1862 	if (sci_port->enable)
1863 		sci_port->enable(port);
1864 
1865 	if (options)
1866 		uart_parse_options(options, &baud, &parity, &bits, &flow);
1867 
1868 	ret = uart_set_options(port, co, baud, parity, bits, flow);
1869 #if defined(__H8300H__) || defined(__H8300S__)
1870 	/* disable rx interrupt */
1871 	if (ret == 0)
1872 		sci_stop_rx(port);
1873 #endif
1874 	/* TODO: disable clock */
1875 	return ret;
1876 }
1877 
1878 static struct console serial_console = {
1879 	.name		= "ttySC",
1880 	.device		= uart_console_device,
1881 	.write		= serial_console_write,
1882 	.setup		= serial_console_setup,
1883 	.flags		= CON_PRINTBUFFER,
1884 	.index		= -1,
1885 	.data		= &sci_uart_driver,
1886 };
1887 
1888 static struct console early_serial_console = {
1889 	.name           = "early_ttySC",
1890 	.write          = serial_console_write,
1891 	.flags          = CON_PRINTBUFFER,
1892 	.index		= -1,
1893 };
1894 
1895 static char early_serial_buf[32];
1896 
1897 static int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
1898 {
1899 	struct plat_sci_port *cfg = pdev->dev.platform_data;
1900 
1901 	if (early_serial_console.data)
1902 		return -EEXIST;
1903 
1904 	early_serial_console.index = pdev->id;
1905 
1906 	sci_init_single(NULL, &sci_ports[pdev->id], pdev->id, cfg);
1907 
1908 	serial_console_setup(&early_serial_console, early_serial_buf);
1909 
1910 	if (!strstr(early_serial_buf, "keep"))
1911 		early_serial_console.flags |= CON_BOOT;
1912 
1913 	register_console(&early_serial_console);
1914 	return 0;
1915 }
1916 
1917 #define SCI_CONSOLE	(&serial_console)
1918 
1919 #else
1920 static inline int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
1921 {
1922 	return -EINVAL;
1923 }
1924 
1925 #define SCI_CONSOLE	NULL
1926 
1927 #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
1928 
1929 static char banner[] __initdata =
1930 	KERN_INFO "SuperH SCI(F) driver initialized\n";
1931 
1932 static struct uart_driver sci_uart_driver = {
1933 	.owner		= THIS_MODULE,
1934 	.driver_name	= "sci",
1935 	.dev_name	= "ttySC",
1936 	.major		= SCI_MAJOR,
1937 	.minor		= SCI_MINOR_START,
1938 	.nr		= SCI_NPORTS,
1939 	.cons		= SCI_CONSOLE,
1940 };
1941 
1942 static int sci_remove(struct platform_device *dev)
1943 {
1944 	struct sci_port *port = platform_get_drvdata(dev);
1945 
1946 	cpufreq_unregister_notifier(&port->freq_transition,
1947 				    CPUFREQ_TRANSITION_NOTIFIER);
1948 
1949 	uart_remove_one_port(&sci_uart_driver, &port->port);
1950 
1951 	clk_put(port->iclk);
1952 	clk_put(port->fclk);
1953 
1954 	pm_runtime_disable(&dev->dev);
1955 	return 0;
1956 }
1957 
1958 static int __devinit sci_probe_single(struct platform_device *dev,
1959 				      unsigned int index,
1960 				      struct plat_sci_port *p,
1961 				      struct sci_port *sciport)
1962 {
1963 	int ret;
1964 
1965 	/* Sanity check */
1966 	if (unlikely(index >= SCI_NPORTS)) {
1967 		dev_notice(&dev->dev, "Attempting to register port "
1968 			   "%d when only %d are available.\n",
1969 			   index+1, SCI_NPORTS);
1970 		dev_notice(&dev->dev, "Consider bumping "
1971 			   "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
1972 		return 0;
1973 	}
1974 
1975 	ret = sci_init_single(dev, sciport, index, p);
1976 	if (ret)
1977 		return ret;
1978 
1979 	return uart_add_one_port(&sci_uart_driver, &sciport->port);
1980 }
1981 
1982 static int __devinit sci_probe(struct platform_device *dev)
1983 {
1984 	struct plat_sci_port *p = dev->dev.platform_data;
1985 	struct sci_port *sp = &sci_ports[dev->id];
1986 	int ret;
1987 
1988 	/*
1989 	 * If we've come here via earlyprintk initialization, head off to
1990 	 * the special early probe. We don't have sufficient device state
1991 	 * to make it beyond this yet.
1992 	 */
1993 	if (is_early_platform_device(dev))
1994 		return sci_probe_earlyprintk(dev);
1995 
1996 	platform_set_drvdata(dev, sp);
1997 
1998 	ret = sci_probe_single(dev, dev->id, p, sp);
1999 	if (ret)
2000 		goto err_unreg;
2001 
2002 	sp->freq_transition.notifier_call = sci_notifier;
2003 
2004 	ret = cpufreq_register_notifier(&sp->freq_transition,
2005 					CPUFREQ_TRANSITION_NOTIFIER);
2006 	if (unlikely(ret < 0))
2007 		goto err_unreg;
2008 
2009 #ifdef CONFIG_SH_STANDARD_BIOS
2010 	sh_bios_gdb_detach();
2011 #endif
2012 
2013 	return 0;
2014 
2015 err_unreg:
2016 	sci_remove(dev);
2017 	return ret;
2018 }
2019 
2020 static int sci_suspend(struct device *dev)
2021 {
2022 	struct sci_port *sport = dev_get_drvdata(dev);
2023 
2024 	if (sport)
2025 		uart_suspend_port(&sci_uart_driver, &sport->port);
2026 
2027 	return 0;
2028 }
2029 
2030 static int sci_resume(struct device *dev)
2031 {
2032 	struct sci_port *sport = dev_get_drvdata(dev);
2033 
2034 	if (sport)
2035 		uart_resume_port(&sci_uart_driver, &sport->port);
2036 
2037 	return 0;
2038 }
2039 
2040 static const struct dev_pm_ops sci_dev_pm_ops = {
2041 	.suspend	= sci_suspend,
2042 	.resume		= sci_resume,
2043 };
2044 
2045 static struct platform_driver sci_driver = {
2046 	.probe		= sci_probe,
2047 	.remove		= sci_remove,
2048 	.driver		= {
2049 		.name	= "sh-sci",
2050 		.owner	= THIS_MODULE,
2051 		.pm	= &sci_dev_pm_ops,
2052 	},
2053 };
2054 
2055 static int __init sci_init(void)
2056 {
2057 	int ret;
2058 
2059 	printk(banner);
2060 
2061 	ret = uart_register_driver(&sci_uart_driver);
2062 	if (likely(ret == 0)) {
2063 		ret = platform_driver_register(&sci_driver);
2064 		if (unlikely(ret))
2065 			uart_unregister_driver(&sci_uart_driver);
2066 	}
2067 
2068 	return ret;
2069 }
2070 
2071 static void __exit sci_exit(void)
2072 {
2073 	platform_driver_unregister(&sci_driver);
2074 	uart_unregister_driver(&sci_uart_driver);
2075 }
2076 
2077 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
2078 early_platform_init_buffer("earlyprintk", &sci_driver,
2079 			   early_serial_buf, ARRAY_SIZE(early_serial_buf));
2080 #endif
2081 module_init(sci_init);
2082 module_exit(sci_exit);
2083 
2084 MODULE_LICENSE("GPL");
2085 MODULE_ALIAS("platform:sh-sci");
2086