xref: /linux/drivers/tty/serial/atmel_serial.c (revision b1d29ba82cf2bc784f4c963ddd6a2cf29e229b33)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *  Driver for Atmel AT91 Serial ports
4  *  Copyright (C) 2003 Rick Bronson
5  *
6  *  Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd.
7  *  Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
8  *
9  *  DMA support added by Chip Coldwell.
10  */
11 #include <linux/tty.h>
12 #include <linux/ioport.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/serial.h>
16 #include <linux/clk.h>
17 #include <linux/console.h>
18 #include <linux/sysrq.h>
19 #include <linux/tty_flip.h>
20 #include <linux/platform_device.h>
21 #include <linux/of.h>
22 #include <linux/of_device.h>
23 #include <linux/of_gpio.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dmaengine.h>
26 #include <linux/atmel_pdc.h>
27 #include <linux/uaccess.h>
28 #include <linux/platform_data/atmel.h>
29 #include <linux/timer.h>
30 #include <linux/gpio.h>
31 #include <linux/gpio/consumer.h>
32 #include <linux/err.h>
33 #include <linux/irq.h>
34 #include <linux/suspend.h>
35 #include <linux/mm.h>
36 
37 #include <asm/io.h>
38 #include <asm/ioctls.h>
39 
40 #define PDC_BUFFER_SIZE		512
41 /* Revisit: We should calculate this based on the actual port settings */
42 #define PDC_RX_TIMEOUT		(3 * 10)		/* 3 bytes */
43 
44 /* The minium number of data FIFOs should be able to contain */
45 #define ATMEL_MIN_FIFO_SIZE	8
46 /*
47  * These two offsets are substracted from the RX FIFO size to define the RTS
48  * high and low thresholds
49  */
50 #define ATMEL_RTS_HIGH_OFFSET	16
51 #define ATMEL_RTS_LOW_OFFSET	20
52 
53 #if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
54 #define SUPPORT_SYSRQ
55 #endif
56 
57 #include <linux/serial_core.h>
58 
59 #include "serial_mctrl_gpio.h"
60 #include "atmel_serial.h"
61 
62 static void atmel_start_rx(struct uart_port *port);
63 static void atmel_stop_rx(struct uart_port *port);
64 
65 #ifdef CONFIG_SERIAL_ATMEL_TTYAT
66 
67 /* Use device name ttyAT, major 204 and minor 154-169.  This is necessary if we
68  * should coexist with the 8250 driver, such as if we have an external 16C550
69  * UART. */
70 #define SERIAL_ATMEL_MAJOR	204
71 #define MINOR_START		154
72 #define ATMEL_DEVICENAME	"ttyAT"
73 
74 #else
75 
76 /* Use device name ttyS, major 4, minor 64-68.  This is the usual serial port
77  * name, but it is legally reserved for the 8250 driver. */
78 #define SERIAL_ATMEL_MAJOR	TTY_MAJOR
79 #define MINOR_START		64
80 #define ATMEL_DEVICENAME	"ttyS"
81 
82 #endif
83 
84 #define ATMEL_ISR_PASS_LIMIT	256
85 
86 struct atmel_dma_buffer {
87 	unsigned char	*buf;
88 	dma_addr_t	dma_addr;
89 	unsigned int	dma_size;
90 	unsigned int	ofs;
91 };
92 
93 struct atmel_uart_char {
94 	u16		status;
95 	u16		ch;
96 };
97 
98 /*
99  * Be careful, the real size of the ring buffer is
100  * sizeof(atmel_uart_char) * ATMEL_SERIAL_RINGSIZE. It means that ring buffer
101  * can contain up to 1024 characters in PIO mode and up to 4096 characters in
102  * DMA mode.
103  */
104 #define ATMEL_SERIAL_RINGSIZE 1024
105 
106 /*
107  * at91: 6 USARTs and one DBGU port (SAM9260)
108  * samx7: 3 USARTs and 5 UARTs
109  */
110 #define ATMEL_MAX_UART		8
111 
112 /*
113  * We wrap our port structure around the generic uart_port.
114  */
115 struct atmel_uart_port {
116 	struct uart_port	uart;		/* uart */
117 	struct clk		*clk;		/* uart clock */
118 	int			may_wakeup;	/* cached value of device_may_wakeup for times we need to disable it */
119 	u32			backup_imr;	/* IMR saved during suspend */
120 	int			break_active;	/* break being received */
121 
122 	bool			use_dma_rx;	/* enable DMA receiver */
123 	bool			use_pdc_rx;	/* enable PDC receiver */
124 	short			pdc_rx_idx;	/* current PDC RX buffer */
125 	struct atmel_dma_buffer	pdc_rx[2];	/* PDC receier */
126 
127 	bool			use_dma_tx;     /* enable DMA transmitter */
128 	bool			use_pdc_tx;	/* enable PDC transmitter */
129 	struct atmel_dma_buffer	pdc_tx;		/* PDC transmitter */
130 
131 	spinlock_t			lock_tx;	/* port lock */
132 	spinlock_t			lock_rx;	/* port lock */
133 	struct dma_chan			*chan_tx;
134 	struct dma_chan			*chan_rx;
135 	struct dma_async_tx_descriptor	*desc_tx;
136 	struct dma_async_tx_descriptor	*desc_rx;
137 	dma_cookie_t			cookie_tx;
138 	dma_cookie_t			cookie_rx;
139 	struct scatterlist		sg_tx;
140 	struct scatterlist		sg_rx;
141 	struct tasklet_struct	tasklet_rx;
142 	struct tasklet_struct	tasklet_tx;
143 	atomic_t		tasklet_shutdown;
144 	unsigned int		irq_status_prev;
145 	unsigned int		tx_len;
146 
147 	struct circ_buf		rx_ring;
148 
149 	struct mctrl_gpios	*gpios;
150 	unsigned int		tx_done_mask;
151 	u32			fifo_size;
152 	u32			rts_high;
153 	u32			rts_low;
154 	bool			ms_irq_enabled;
155 	u32			rtor;	/* address of receiver timeout register if it exists */
156 	bool			has_frac_baudrate;
157 	bool			has_hw_timer;
158 	struct timer_list	uart_timer;
159 
160 	bool			tx_stopped;
161 	bool			suspended;
162 	unsigned int		pending;
163 	unsigned int		pending_status;
164 	spinlock_t		lock_suspended;
165 
166 #ifdef CONFIG_PM
167 	struct {
168 		u32		cr;
169 		u32		mr;
170 		u32		imr;
171 		u32		brgr;
172 		u32		rtor;
173 		u32		ttgr;
174 		u32		fmr;
175 		u32		fimr;
176 	} cache;
177 #endif
178 
179 	int (*prepare_rx)(struct uart_port *port);
180 	int (*prepare_tx)(struct uart_port *port);
181 	void (*schedule_rx)(struct uart_port *port);
182 	void (*schedule_tx)(struct uart_port *port);
183 	void (*release_rx)(struct uart_port *port);
184 	void (*release_tx)(struct uart_port *port);
185 };
186 
187 static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
188 static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
189 
190 #ifdef SUPPORT_SYSRQ
191 static struct console atmel_console;
192 #endif
193 
194 #if defined(CONFIG_OF)
195 static const struct of_device_id atmel_serial_dt_ids[] = {
196 	{ .compatible = "atmel,at91rm9200-usart-serial" },
197 	{ /* sentinel */ }
198 };
199 #endif
200 
201 static inline struct atmel_uart_port *
202 to_atmel_uart_port(struct uart_port *uart)
203 {
204 	return container_of(uart, struct atmel_uart_port, uart);
205 }
206 
207 static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg)
208 {
209 	return __raw_readl(port->membase + reg);
210 }
211 
212 static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value)
213 {
214 	__raw_writel(value, port->membase + reg);
215 }
216 
217 static inline u8 atmel_uart_read_char(struct uart_port *port)
218 {
219 	return __raw_readb(port->membase + ATMEL_US_RHR);
220 }
221 
222 static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
223 {
224 	__raw_writeb(value, port->membase + ATMEL_US_THR);
225 }
226 
227 #ifdef CONFIG_SERIAL_ATMEL_PDC
228 static bool atmel_use_pdc_rx(struct uart_port *port)
229 {
230 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
231 
232 	return atmel_port->use_pdc_rx;
233 }
234 
235 static bool atmel_use_pdc_tx(struct uart_port *port)
236 {
237 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
238 
239 	return atmel_port->use_pdc_tx;
240 }
241 #else
242 static bool atmel_use_pdc_rx(struct uart_port *port)
243 {
244 	return false;
245 }
246 
247 static bool atmel_use_pdc_tx(struct uart_port *port)
248 {
249 	return false;
250 }
251 #endif
252 
253 static bool atmel_use_dma_tx(struct uart_port *port)
254 {
255 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
256 
257 	return atmel_port->use_dma_tx;
258 }
259 
260 static bool atmel_use_dma_rx(struct uart_port *port)
261 {
262 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
263 
264 	return atmel_port->use_dma_rx;
265 }
266 
267 static bool atmel_use_fifo(struct uart_port *port)
268 {
269 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
270 
271 	return atmel_port->fifo_size;
272 }
273 
274 static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port,
275 				   struct tasklet_struct *t)
276 {
277 	if (!atomic_read(&atmel_port->tasklet_shutdown))
278 		tasklet_schedule(t);
279 }
280 
281 static unsigned int atmel_get_lines_status(struct uart_port *port)
282 {
283 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
284 	unsigned int status, ret = 0;
285 
286 	status = atmel_uart_readl(port, ATMEL_US_CSR);
287 
288 	mctrl_gpio_get(atmel_port->gpios, &ret);
289 
290 	if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
291 						UART_GPIO_CTS))) {
292 		if (ret & TIOCM_CTS)
293 			status &= ~ATMEL_US_CTS;
294 		else
295 			status |= ATMEL_US_CTS;
296 	}
297 
298 	if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
299 						UART_GPIO_DSR))) {
300 		if (ret & TIOCM_DSR)
301 			status &= ~ATMEL_US_DSR;
302 		else
303 			status |= ATMEL_US_DSR;
304 	}
305 
306 	if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
307 						UART_GPIO_RI))) {
308 		if (ret & TIOCM_RI)
309 			status &= ~ATMEL_US_RI;
310 		else
311 			status |= ATMEL_US_RI;
312 	}
313 
314 	if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
315 						UART_GPIO_DCD))) {
316 		if (ret & TIOCM_CD)
317 			status &= ~ATMEL_US_DCD;
318 		else
319 			status |= ATMEL_US_DCD;
320 	}
321 
322 	return status;
323 }
324 
325 /* Enable or disable the rs485 support */
326 static int atmel_config_rs485(struct uart_port *port,
327 			      struct serial_rs485 *rs485conf)
328 {
329 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
330 	unsigned int mode;
331 
332 	/* Disable interrupts */
333 	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
334 
335 	mode = atmel_uart_readl(port, ATMEL_US_MR);
336 
337 	/* Resetting serial mode to RS232 (0x0) */
338 	mode &= ~ATMEL_US_USMODE;
339 
340 	port->rs485 = *rs485conf;
341 
342 	if (rs485conf->flags & SER_RS485_ENABLED) {
343 		dev_dbg(port->dev, "Setting UART to RS485\n");
344 		atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
345 		atmel_uart_writel(port, ATMEL_US_TTGR,
346 				  rs485conf->delay_rts_after_send);
347 		mode |= ATMEL_US_USMODE_RS485;
348 	} else {
349 		dev_dbg(port->dev, "Setting UART to RS232\n");
350 		if (atmel_use_pdc_tx(port))
351 			atmel_port->tx_done_mask = ATMEL_US_ENDTX |
352 				ATMEL_US_TXBUFE;
353 		else
354 			atmel_port->tx_done_mask = ATMEL_US_TXRDY;
355 	}
356 	atmel_uart_writel(port, ATMEL_US_MR, mode);
357 
358 	/* Enable interrupts */
359 	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
360 
361 	return 0;
362 }
363 
364 /*
365  * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
366  */
367 static u_int atmel_tx_empty(struct uart_port *port)
368 {
369 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
370 
371 	if (atmel_port->tx_stopped)
372 		return TIOCSER_TEMT;
373 	return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ?
374 		TIOCSER_TEMT :
375 		0;
376 }
377 
378 /*
379  * Set state of the modem control output lines
380  */
381 static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
382 {
383 	unsigned int control = 0;
384 	unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR);
385 	unsigned int rts_paused, rts_ready;
386 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
387 
388 	/* override mode to RS485 if needed, otherwise keep the current mode */
389 	if (port->rs485.flags & SER_RS485_ENABLED) {
390 		atmel_uart_writel(port, ATMEL_US_TTGR,
391 				  port->rs485.delay_rts_after_send);
392 		mode &= ~ATMEL_US_USMODE;
393 		mode |= ATMEL_US_USMODE_RS485;
394 	}
395 
396 	/* set the RTS line state according to the mode */
397 	if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
398 		/* force RTS line to high level */
399 		rts_paused = ATMEL_US_RTSEN;
400 
401 		/* give the control of the RTS line back to the hardware */
402 		rts_ready = ATMEL_US_RTSDIS;
403 	} else {
404 		/* force RTS line to high level */
405 		rts_paused = ATMEL_US_RTSDIS;
406 
407 		/* force RTS line to low level */
408 		rts_ready = ATMEL_US_RTSEN;
409 	}
410 
411 	if (mctrl & TIOCM_RTS)
412 		control |= rts_ready;
413 	else
414 		control |= rts_paused;
415 
416 	if (mctrl & TIOCM_DTR)
417 		control |= ATMEL_US_DTREN;
418 	else
419 		control |= ATMEL_US_DTRDIS;
420 
421 	atmel_uart_writel(port, ATMEL_US_CR, control);
422 
423 	mctrl_gpio_set(atmel_port->gpios, mctrl);
424 
425 	/* Local loopback mode? */
426 	mode &= ~ATMEL_US_CHMODE;
427 	if (mctrl & TIOCM_LOOP)
428 		mode |= ATMEL_US_CHMODE_LOC_LOOP;
429 	else
430 		mode |= ATMEL_US_CHMODE_NORMAL;
431 
432 	atmel_uart_writel(port, ATMEL_US_MR, mode);
433 }
434 
435 /*
436  * Get state of the modem control input lines
437  */
438 static u_int atmel_get_mctrl(struct uart_port *port)
439 {
440 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
441 	unsigned int ret = 0, status;
442 
443 	status = atmel_uart_readl(port, ATMEL_US_CSR);
444 
445 	/*
446 	 * The control signals are active low.
447 	 */
448 	if (!(status & ATMEL_US_DCD))
449 		ret |= TIOCM_CD;
450 	if (!(status & ATMEL_US_CTS))
451 		ret |= TIOCM_CTS;
452 	if (!(status & ATMEL_US_DSR))
453 		ret |= TIOCM_DSR;
454 	if (!(status & ATMEL_US_RI))
455 		ret |= TIOCM_RI;
456 
457 	return mctrl_gpio_get(atmel_port->gpios, &ret);
458 }
459 
460 /*
461  * Stop transmitting.
462  */
463 static void atmel_stop_tx(struct uart_port *port)
464 {
465 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
466 
467 	if (atmel_use_pdc_tx(port)) {
468 		/* disable PDC transmit */
469 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
470 	}
471 
472 	/*
473 	 * Disable the transmitter.
474 	 * This is mandatory when DMA is used, otherwise the DMA buffer
475 	 * is fully transmitted.
476 	 */
477 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS);
478 	atmel_port->tx_stopped = true;
479 
480 	/* Disable interrupts */
481 	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
482 
483 	if ((port->rs485.flags & SER_RS485_ENABLED) &&
484 	    !(port->rs485.flags & SER_RS485_RX_DURING_TX))
485 		atmel_start_rx(port);
486 }
487 
488 /*
489  * Start transmitting.
490  */
491 static void atmel_start_tx(struct uart_port *port)
492 {
493 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
494 
495 	if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR)
496 				       & ATMEL_PDC_TXTEN))
497 		/* The transmitter is already running.  Yes, we
498 		   really need this.*/
499 		return;
500 
501 	if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
502 		if ((port->rs485.flags & SER_RS485_ENABLED) &&
503 		    !(port->rs485.flags & SER_RS485_RX_DURING_TX))
504 			atmel_stop_rx(port);
505 
506 	if (atmel_use_pdc_tx(port))
507 		/* re-enable PDC transmit */
508 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
509 
510 	/* Enable interrupts */
511 	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
512 
513 	/* re-enable the transmitter */
514 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
515 	atmel_port->tx_stopped = false;
516 }
517 
518 /*
519  * start receiving - port is in process of being opened.
520  */
521 static void atmel_start_rx(struct uart_port *port)
522 {
523 	/* reset status and receiver */
524 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
525 
526 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN);
527 
528 	if (atmel_use_pdc_rx(port)) {
529 		/* enable PDC controller */
530 		atmel_uart_writel(port, ATMEL_US_IER,
531 				  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
532 				  port->read_status_mask);
533 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
534 	} else {
535 		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
536 	}
537 }
538 
539 /*
540  * Stop receiving - port is in process of being closed.
541  */
542 static void atmel_stop_rx(struct uart_port *port)
543 {
544 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS);
545 
546 	if (atmel_use_pdc_rx(port)) {
547 		/* disable PDC receive */
548 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS);
549 		atmel_uart_writel(port, ATMEL_US_IDR,
550 				  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
551 				  port->read_status_mask);
552 	} else {
553 		atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY);
554 	}
555 }
556 
557 /*
558  * Enable modem status interrupts
559  */
560 static void atmel_enable_ms(struct uart_port *port)
561 {
562 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
563 	uint32_t ier = 0;
564 
565 	/*
566 	 * Interrupt should not be enabled twice
567 	 */
568 	if (atmel_port->ms_irq_enabled)
569 		return;
570 
571 	atmel_port->ms_irq_enabled = true;
572 
573 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
574 		ier |= ATMEL_US_CTSIC;
575 
576 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR))
577 		ier |= ATMEL_US_DSRIC;
578 
579 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI))
580 		ier |= ATMEL_US_RIIC;
581 
582 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD))
583 		ier |= ATMEL_US_DCDIC;
584 
585 	atmel_uart_writel(port, ATMEL_US_IER, ier);
586 
587 	mctrl_gpio_enable_ms(atmel_port->gpios);
588 }
589 
590 /*
591  * Disable modem status interrupts
592  */
593 static void atmel_disable_ms(struct uart_port *port)
594 {
595 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
596 	uint32_t idr = 0;
597 
598 	/*
599 	 * Interrupt should not be disabled twice
600 	 */
601 	if (!atmel_port->ms_irq_enabled)
602 		return;
603 
604 	atmel_port->ms_irq_enabled = false;
605 
606 	mctrl_gpio_disable_ms(atmel_port->gpios);
607 
608 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
609 		idr |= ATMEL_US_CTSIC;
610 
611 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR))
612 		idr |= ATMEL_US_DSRIC;
613 
614 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI))
615 		idr |= ATMEL_US_RIIC;
616 
617 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD))
618 		idr |= ATMEL_US_DCDIC;
619 
620 	atmel_uart_writel(port, ATMEL_US_IDR, idr);
621 }
622 
623 /*
624  * Control the transmission of a break signal
625  */
626 static void atmel_break_ctl(struct uart_port *port, int break_state)
627 {
628 	if (break_state != 0)
629 		/* start break */
630 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK);
631 	else
632 		/* stop break */
633 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK);
634 }
635 
636 /*
637  * Stores the incoming character in the ring buffer
638  */
639 static void
640 atmel_buffer_rx_char(struct uart_port *port, unsigned int status,
641 		     unsigned int ch)
642 {
643 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
644 	struct circ_buf *ring = &atmel_port->rx_ring;
645 	struct atmel_uart_char *c;
646 
647 	if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE))
648 		/* Buffer overflow, ignore char */
649 		return;
650 
651 	c = &((struct atmel_uart_char *)ring->buf)[ring->head];
652 	c->status	= status;
653 	c->ch		= ch;
654 
655 	/* Make sure the character is stored before we update head. */
656 	smp_wmb();
657 
658 	ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
659 }
660 
661 /*
662  * Deal with parity, framing and overrun errors.
663  */
664 static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status)
665 {
666 	/* clear error */
667 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
668 
669 	if (status & ATMEL_US_RXBRK) {
670 		/* ignore side-effect */
671 		status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
672 		port->icount.brk++;
673 	}
674 	if (status & ATMEL_US_PARE)
675 		port->icount.parity++;
676 	if (status & ATMEL_US_FRAME)
677 		port->icount.frame++;
678 	if (status & ATMEL_US_OVRE)
679 		port->icount.overrun++;
680 }
681 
682 /*
683  * Characters received (called from interrupt handler)
684  */
685 static void atmel_rx_chars(struct uart_port *port)
686 {
687 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
688 	unsigned int status, ch;
689 
690 	status = atmel_uart_readl(port, ATMEL_US_CSR);
691 	while (status & ATMEL_US_RXRDY) {
692 		ch = atmel_uart_read_char(port);
693 
694 		/*
695 		 * note that the error handling code is
696 		 * out of the main execution path
697 		 */
698 		if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
699 				       | ATMEL_US_OVRE | ATMEL_US_RXBRK)
700 			     || atmel_port->break_active)) {
701 
702 			/* clear error */
703 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
704 
705 			if (status & ATMEL_US_RXBRK
706 			    && !atmel_port->break_active) {
707 				atmel_port->break_active = 1;
708 				atmel_uart_writel(port, ATMEL_US_IER,
709 						  ATMEL_US_RXBRK);
710 			} else {
711 				/*
712 				 * This is either the end-of-break
713 				 * condition or we've received at
714 				 * least one character without RXBRK
715 				 * being set. In both cases, the next
716 				 * RXBRK will indicate start-of-break.
717 				 */
718 				atmel_uart_writel(port, ATMEL_US_IDR,
719 						  ATMEL_US_RXBRK);
720 				status &= ~ATMEL_US_RXBRK;
721 				atmel_port->break_active = 0;
722 			}
723 		}
724 
725 		atmel_buffer_rx_char(port, status, ch);
726 		status = atmel_uart_readl(port, ATMEL_US_CSR);
727 	}
728 
729 	atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
730 }
731 
732 /*
733  * Transmit characters (called from tasklet with TXRDY interrupt
734  * disabled)
735  */
736 static void atmel_tx_chars(struct uart_port *port)
737 {
738 	struct circ_buf *xmit = &port->state->xmit;
739 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
740 
741 	if (port->x_char &&
742 	    (atmel_uart_readl(port, ATMEL_US_CSR) & atmel_port->tx_done_mask)) {
743 		atmel_uart_write_char(port, port->x_char);
744 		port->icount.tx++;
745 		port->x_char = 0;
746 	}
747 	if (uart_circ_empty(xmit) || uart_tx_stopped(port))
748 		return;
749 
750 	while (atmel_uart_readl(port, ATMEL_US_CSR) &
751 	       atmel_port->tx_done_mask) {
752 		atmel_uart_write_char(port, xmit->buf[xmit->tail]);
753 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
754 		port->icount.tx++;
755 		if (uart_circ_empty(xmit))
756 			break;
757 	}
758 
759 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
760 		uart_write_wakeup(port);
761 
762 	if (!uart_circ_empty(xmit))
763 		/* Enable interrupts */
764 		atmel_uart_writel(port, ATMEL_US_IER,
765 				  atmel_port->tx_done_mask);
766 }
767 
768 static void atmel_complete_tx_dma(void *arg)
769 {
770 	struct atmel_uart_port *atmel_port = arg;
771 	struct uart_port *port = &atmel_port->uart;
772 	struct circ_buf *xmit = &port->state->xmit;
773 	struct dma_chan *chan = atmel_port->chan_tx;
774 	unsigned long flags;
775 
776 	spin_lock_irqsave(&port->lock, flags);
777 
778 	if (chan)
779 		dmaengine_terminate_all(chan);
780 	xmit->tail += atmel_port->tx_len;
781 	xmit->tail &= UART_XMIT_SIZE - 1;
782 
783 	port->icount.tx += atmel_port->tx_len;
784 
785 	spin_lock_irq(&atmel_port->lock_tx);
786 	async_tx_ack(atmel_port->desc_tx);
787 	atmel_port->cookie_tx = -EINVAL;
788 	atmel_port->desc_tx = NULL;
789 	spin_unlock_irq(&atmel_port->lock_tx);
790 
791 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
792 		uart_write_wakeup(port);
793 
794 	/*
795 	 * xmit is a circular buffer so, if we have just send data from
796 	 * xmit->tail to the end of xmit->buf, now we have to transmit the
797 	 * remaining data from the beginning of xmit->buf to xmit->head.
798 	 */
799 	if (!uart_circ_empty(xmit))
800 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
801 	else if ((port->rs485.flags & SER_RS485_ENABLED) &&
802 		 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
803 		/* DMA done, stop TX, start RX for RS485 */
804 		atmel_start_rx(port);
805 	}
806 
807 	spin_unlock_irqrestore(&port->lock, flags);
808 }
809 
810 static void atmel_release_tx_dma(struct uart_port *port)
811 {
812 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
813 	struct dma_chan *chan = atmel_port->chan_tx;
814 
815 	if (chan) {
816 		dmaengine_terminate_all(chan);
817 		dma_release_channel(chan);
818 		dma_unmap_sg(port->dev, &atmel_port->sg_tx, 1,
819 				DMA_TO_DEVICE);
820 	}
821 
822 	atmel_port->desc_tx = NULL;
823 	atmel_port->chan_tx = NULL;
824 	atmel_port->cookie_tx = -EINVAL;
825 }
826 
827 /*
828  * Called from tasklet with TXRDY interrupt is disabled.
829  */
830 static void atmel_tx_dma(struct uart_port *port)
831 {
832 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
833 	struct circ_buf *xmit = &port->state->xmit;
834 	struct dma_chan *chan = atmel_port->chan_tx;
835 	struct dma_async_tx_descriptor *desc;
836 	struct scatterlist sgl[2], *sg, *sg_tx = &atmel_port->sg_tx;
837 	unsigned int tx_len, part1_len, part2_len, sg_len;
838 	dma_addr_t phys_addr;
839 
840 	/* Make sure we have an idle channel */
841 	if (atmel_port->desc_tx != NULL)
842 		return;
843 
844 	if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
845 		/*
846 		 * DMA is idle now.
847 		 * Port xmit buffer is already mapped,
848 		 * and it is one page... Just adjust
849 		 * offsets and lengths. Since it is a circular buffer,
850 		 * we have to transmit till the end, and then the rest.
851 		 * Take the port lock to get a
852 		 * consistent xmit buffer state.
853 		 */
854 		tx_len = CIRC_CNT_TO_END(xmit->head,
855 					 xmit->tail,
856 					 UART_XMIT_SIZE);
857 
858 		if (atmel_port->fifo_size) {
859 			/* multi data mode */
860 			part1_len = (tx_len & ~0x3); /* DWORD access */
861 			part2_len = (tx_len & 0x3); /* BYTE access */
862 		} else {
863 			/* single data (legacy) mode */
864 			part1_len = 0;
865 			part2_len = tx_len; /* BYTE access only */
866 		}
867 
868 		sg_init_table(sgl, 2);
869 		sg_len = 0;
870 		phys_addr = sg_dma_address(sg_tx) + xmit->tail;
871 		if (part1_len) {
872 			sg = &sgl[sg_len++];
873 			sg_dma_address(sg) = phys_addr;
874 			sg_dma_len(sg) = part1_len;
875 
876 			phys_addr += part1_len;
877 		}
878 
879 		if (part2_len) {
880 			sg = &sgl[sg_len++];
881 			sg_dma_address(sg) = phys_addr;
882 			sg_dma_len(sg) = part2_len;
883 		}
884 
885 		/*
886 		 * save tx_len so atmel_complete_tx_dma() will increase
887 		 * xmit->tail correctly
888 		 */
889 		atmel_port->tx_len = tx_len;
890 
891 		desc = dmaengine_prep_slave_sg(chan,
892 					       sgl,
893 					       sg_len,
894 					       DMA_MEM_TO_DEV,
895 					       DMA_PREP_INTERRUPT |
896 					       DMA_CTRL_ACK);
897 		if (!desc) {
898 			dev_err(port->dev, "Failed to send via dma!\n");
899 			return;
900 		}
901 
902 		dma_sync_sg_for_device(port->dev, sg_tx, 1, DMA_TO_DEVICE);
903 
904 		atmel_port->desc_tx = desc;
905 		desc->callback = atmel_complete_tx_dma;
906 		desc->callback_param = atmel_port;
907 		atmel_port->cookie_tx = dmaengine_submit(desc);
908 	}
909 
910 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
911 		uart_write_wakeup(port);
912 }
913 
914 static int atmel_prepare_tx_dma(struct uart_port *port)
915 {
916 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
917 	struct device *mfd_dev = port->dev->parent;
918 	dma_cap_mask_t		mask;
919 	struct dma_slave_config config;
920 	int ret, nent;
921 
922 	dma_cap_zero(mask);
923 	dma_cap_set(DMA_SLAVE, mask);
924 
925 	atmel_port->chan_tx = dma_request_slave_channel(mfd_dev, "tx");
926 	if (atmel_port->chan_tx == NULL)
927 		goto chan_err;
928 	dev_info(port->dev, "using %s for tx DMA transfers\n",
929 		dma_chan_name(atmel_port->chan_tx));
930 
931 	spin_lock_init(&atmel_port->lock_tx);
932 	sg_init_table(&atmel_port->sg_tx, 1);
933 	/* UART circular tx buffer is an aligned page. */
934 	BUG_ON(!PAGE_ALIGNED(port->state->xmit.buf));
935 	sg_set_page(&atmel_port->sg_tx,
936 			virt_to_page(port->state->xmit.buf),
937 			UART_XMIT_SIZE,
938 			offset_in_page(port->state->xmit.buf));
939 	nent = dma_map_sg(port->dev,
940 				&atmel_port->sg_tx,
941 				1,
942 				DMA_TO_DEVICE);
943 
944 	if (!nent) {
945 		dev_dbg(port->dev, "need to release resource of dma\n");
946 		goto chan_err;
947 	} else {
948 		dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
949 			sg_dma_len(&atmel_port->sg_tx),
950 			port->state->xmit.buf,
951 			&sg_dma_address(&atmel_port->sg_tx));
952 	}
953 
954 	/* Configure the slave DMA */
955 	memset(&config, 0, sizeof(config));
956 	config.direction = DMA_MEM_TO_DEV;
957 	config.dst_addr_width = (atmel_port->fifo_size) ?
958 				DMA_SLAVE_BUSWIDTH_4_BYTES :
959 				DMA_SLAVE_BUSWIDTH_1_BYTE;
960 	config.dst_addr = port->mapbase + ATMEL_US_THR;
961 	config.dst_maxburst = 1;
962 
963 	ret = dmaengine_slave_config(atmel_port->chan_tx,
964 				     &config);
965 	if (ret) {
966 		dev_err(port->dev, "DMA tx slave configuration failed\n");
967 		goto chan_err;
968 	}
969 
970 	return 0;
971 
972 chan_err:
973 	dev_err(port->dev, "TX channel not available, switch to pio\n");
974 	atmel_port->use_dma_tx = 0;
975 	if (atmel_port->chan_tx)
976 		atmel_release_tx_dma(port);
977 	return -EINVAL;
978 }
979 
980 static void atmel_complete_rx_dma(void *arg)
981 {
982 	struct uart_port *port = arg;
983 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
984 
985 	atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
986 }
987 
988 static void atmel_release_rx_dma(struct uart_port *port)
989 {
990 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
991 	struct dma_chan *chan = atmel_port->chan_rx;
992 
993 	if (chan) {
994 		dmaengine_terminate_all(chan);
995 		dma_release_channel(chan);
996 		dma_unmap_sg(port->dev, &atmel_port->sg_rx, 1,
997 				DMA_FROM_DEVICE);
998 	}
999 
1000 	atmel_port->desc_rx = NULL;
1001 	atmel_port->chan_rx = NULL;
1002 	atmel_port->cookie_rx = -EINVAL;
1003 }
1004 
1005 static void atmel_rx_from_dma(struct uart_port *port)
1006 {
1007 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1008 	struct tty_port *tport = &port->state->port;
1009 	struct circ_buf *ring = &atmel_port->rx_ring;
1010 	struct dma_chan *chan = atmel_port->chan_rx;
1011 	struct dma_tx_state state;
1012 	enum dma_status dmastat;
1013 	size_t count;
1014 
1015 
1016 	/* Reset the UART timeout early so that we don't miss one */
1017 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1018 	dmastat = dmaengine_tx_status(chan,
1019 				atmel_port->cookie_rx,
1020 				&state);
1021 	/* Restart a new tasklet if DMA status is error */
1022 	if (dmastat == DMA_ERROR) {
1023 		dev_dbg(port->dev, "Get residue error, restart tasklet\n");
1024 		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1025 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
1026 		return;
1027 	}
1028 
1029 	/* CPU claims ownership of RX DMA buffer */
1030 	dma_sync_sg_for_cpu(port->dev,
1031 			    &atmel_port->sg_rx,
1032 			    1,
1033 			    DMA_FROM_DEVICE);
1034 
1035 	/*
1036 	 * ring->head points to the end of data already written by the DMA.
1037 	 * ring->tail points to the beginning of data to be read by the
1038 	 * framework.
1039 	 * The current transfer size should not be larger than the dma buffer
1040 	 * length.
1041 	 */
1042 	ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue;
1043 	BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx));
1044 	/*
1045 	 * At this point ring->head may point to the first byte right after the
1046 	 * last byte of the dma buffer:
1047 	 * 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx)
1048 	 *
1049 	 * However ring->tail must always points inside the dma buffer:
1050 	 * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1
1051 	 *
1052 	 * Since we use a ring buffer, we have to handle the case
1053 	 * where head is lower than tail. In such a case, we first read from
1054 	 * tail to the end of the buffer then reset tail.
1055 	 */
1056 	if (ring->head < ring->tail) {
1057 		count = sg_dma_len(&atmel_port->sg_rx) - ring->tail;
1058 
1059 		tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1060 		ring->tail = 0;
1061 		port->icount.rx += count;
1062 	}
1063 
1064 	/* Finally we read data from tail to head */
1065 	if (ring->tail < ring->head) {
1066 		count = ring->head - ring->tail;
1067 
1068 		tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1069 		/* Wrap ring->head if needed */
1070 		if (ring->head >= sg_dma_len(&atmel_port->sg_rx))
1071 			ring->head = 0;
1072 		ring->tail = ring->head;
1073 		port->icount.rx += count;
1074 	}
1075 
1076 	/* USART retreives ownership of RX DMA buffer */
1077 	dma_sync_sg_for_device(port->dev,
1078 			       &atmel_port->sg_rx,
1079 			       1,
1080 			       DMA_FROM_DEVICE);
1081 
1082 	/*
1083 	 * Drop the lock here since it might end up calling
1084 	 * uart_start(), which takes the lock.
1085 	 */
1086 	spin_unlock(&port->lock);
1087 	tty_flip_buffer_push(tport);
1088 	spin_lock(&port->lock);
1089 
1090 	atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1091 }
1092 
1093 static int atmel_prepare_rx_dma(struct uart_port *port)
1094 {
1095 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1096 	struct device *mfd_dev = port->dev->parent;
1097 	struct dma_async_tx_descriptor *desc;
1098 	dma_cap_mask_t		mask;
1099 	struct dma_slave_config config;
1100 	struct circ_buf		*ring;
1101 	int ret, nent;
1102 
1103 	ring = &atmel_port->rx_ring;
1104 
1105 	dma_cap_zero(mask);
1106 	dma_cap_set(DMA_CYCLIC, mask);
1107 
1108 	atmel_port->chan_rx = dma_request_slave_channel(mfd_dev, "rx");
1109 	if (atmel_port->chan_rx == NULL)
1110 		goto chan_err;
1111 	dev_info(port->dev, "using %s for rx DMA transfers\n",
1112 		dma_chan_name(atmel_port->chan_rx));
1113 
1114 	spin_lock_init(&atmel_port->lock_rx);
1115 	sg_init_table(&atmel_port->sg_rx, 1);
1116 	/* UART circular rx buffer is an aligned page. */
1117 	BUG_ON(!PAGE_ALIGNED(ring->buf));
1118 	sg_set_page(&atmel_port->sg_rx,
1119 		    virt_to_page(ring->buf),
1120 		    sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE,
1121 		    offset_in_page(ring->buf));
1122 	nent = dma_map_sg(port->dev,
1123 			  &atmel_port->sg_rx,
1124 			  1,
1125 			  DMA_FROM_DEVICE);
1126 
1127 	if (!nent) {
1128 		dev_dbg(port->dev, "need to release resource of dma\n");
1129 		goto chan_err;
1130 	} else {
1131 		dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
1132 			sg_dma_len(&atmel_port->sg_rx),
1133 			ring->buf,
1134 			&sg_dma_address(&atmel_port->sg_rx));
1135 	}
1136 
1137 	/* Configure the slave DMA */
1138 	memset(&config, 0, sizeof(config));
1139 	config.direction = DMA_DEV_TO_MEM;
1140 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1141 	config.src_addr = port->mapbase + ATMEL_US_RHR;
1142 	config.src_maxburst = 1;
1143 
1144 	ret = dmaengine_slave_config(atmel_port->chan_rx,
1145 				     &config);
1146 	if (ret) {
1147 		dev_err(port->dev, "DMA rx slave configuration failed\n");
1148 		goto chan_err;
1149 	}
1150 	/*
1151 	 * Prepare a cyclic dma transfer, assign 2 descriptors,
1152 	 * each one is half ring buffer size
1153 	 */
1154 	desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx,
1155 					 sg_dma_address(&atmel_port->sg_rx),
1156 					 sg_dma_len(&atmel_port->sg_rx),
1157 					 sg_dma_len(&atmel_port->sg_rx)/2,
1158 					 DMA_DEV_TO_MEM,
1159 					 DMA_PREP_INTERRUPT);
1160 	desc->callback = atmel_complete_rx_dma;
1161 	desc->callback_param = port;
1162 	atmel_port->desc_rx = desc;
1163 	atmel_port->cookie_rx = dmaengine_submit(desc);
1164 
1165 	return 0;
1166 
1167 chan_err:
1168 	dev_err(port->dev, "RX channel not available, switch to pio\n");
1169 	atmel_port->use_dma_rx = 0;
1170 	if (atmel_port->chan_rx)
1171 		atmel_release_rx_dma(port);
1172 	return -EINVAL;
1173 }
1174 
1175 static void atmel_uart_timer_callback(struct timer_list *t)
1176 {
1177 	struct atmel_uart_port *atmel_port = from_timer(atmel_port, t,
1178 							uart_timer);
1179 	struct uart_port *port = &atmel_port->uart;
1180 
1181 	if (!atomic_read(&atmel_port->tasklet_shutdown)) {
1182 		tasklet_schedule(&atmel_port->tasklet_rx);
1183 		mod_timer(&atmel_port->uart_timer,
1184 			  jiffies + uart_poll_timeout(port));
1185 	}
1186 }
1187 
1188 /*
1189  * receive interrupt handler.
1190  */
1191 static void
1192 atmel_handle_receive(struct uart_port *port, unsigned int pending)
1193 {
1194 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1195 
1196 	if (atmel_use_pdc_rx(port)) {
1197 		/*
1198 		 * PDC receive. Just schedule the tasklet and let it
1199 		 * figure out the details.
1200 		 *
1201 		 * TODO: We're not handling error flags correctly at
1202 		 * the moment.
1203 		 */
1204 		if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
1205 			atmel_uart_writel(port, ATMEL_US_IDR,
1206 					  (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT));
1207 			atmel_tasklet_schedule(atmel_port,
1208 					       &atmel_port->tasklet_rx);
1209 		}
1210 
1211 		if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE |
1212 				ATMEL_US_FRAME | ATMEL_US_PARE))
1213 			atmel_pdc_rxerr(port, pending);
1214 	}
1215 
1216 	if (atmel_use_dma_rx(port)) {
1217 		if (pending & ATMEL_US_TIMEOUT) {
1218 			atmel_uart_writel(port, ATMEL_US_IDR,
1219 					  ATMEL_US_TIMEOUT);
1220 			atmel_tasklet_schedule(atmel_port,
1221 					       &atmel_port->tasklet_rx);
1222 		}
1223 	}
1224 
1225 	/* Interrupt receive */
1226 	if (pending & ATMEL_US_RXRDY)
1227 		atmel_rx_chars(port);
1228 	else if (pending & ATMEL_US_RXBRK) {
1229 		/*
1230 		 * End of break detected. If it came along with a
1231 		 * character, atmel_rx_chars will handle it.
1232 		 */
1233 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
1234 		atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK);
1235 		atmel_port->break_active = 0;
1236 	}
1237 }
1238 
1239 /*
1240  * transmit interrupt handler. (Transmit is IRQF_NODELAY safe)
1241  */
1242 static void
1243 atmel_handle_transmit(struct uart_port *port, unsigned int pending)
1244 {
1245 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1246 
1247 	if (pending & atmel_port->tx_done_mask) {
1248 		/* Either PDC or interrupt transmission */
1249 		atmel_uart_writel(port, ATMEL_US_IDR,
1250 				  atmel_port->tx_done_mask);
1251 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
1252 	}
1253 }
1254 
1255 /*
1256  * status flags interrupt handler.
1257  */
1258 static void
1259 atmel_handle_status(struct uart_port *port, unsigned int pending,
1260 		    unsigned int status)
1261 {
1262 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1263 	unsigned int status_change;
1264 
1265 	if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
1266 				| ATMEL_US_CTSIC)) {
1267 		status_change = status ^ atmel_port->irq_status_prev;
1268 		atmel_port->irq_status_prev = status;
1269 
1270 		if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
1271 					| ATMEL_US_DCD | ATMEL_US_CTS)) {
1272 			/* TODO: All reads to CSR will clear these interrupts! */
1273 			if (status_change & ATMEL_US_RI)
1274 				port->icount.rng++;
1275 			if (status_change & ATMEL_US_DSR)
1276 				port->icount.dsr++;
1277 			if (status_change & ATMEL_US_DCD)
1278 				uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
1279 			if (status_change & ATMEL_US_CTS)
1280 				uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
1281 
1282 			wake_up_interruptible(&port->state->port.delta_msr_wait);
1283 		}
1284 	}
1285 }
1286 
1287 /*
1288  * Interrupt handler
1289  */
1290 static irqreturn_t atmel_interrupt(int irq, void *dev_id)
1291 {
1292 	struct uart_port *port = dev_id;
1293 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1294 	unsigned int status, pending, mask, pass_counter = 0;
1295 
1296 	spin_lock(&atmel_port->lock_suspended);
1297 
1298 	do {
1299 		status = atmel_get_lines_status(port);
1300 		mask = atmel_uart_readl(port, ATMEL_US_IMR);
1301 		pending = status & mask;
1302 		if (!pending)
1303 			break;
1304 
1305 		if (atmel_port->suspended) {
1306 			atmel_port->pending |= pending;
1307 			atmel_port->pending_status = status;
1308 			atmel_uart_writel(port, ATMEL_US_IDR, mask);
1309 			pm_system_wakeup();
1310 			break;
1311 		}
1312 
1313 		atmel_handle_receive(port, pending);
1314 		atmel_handle_status(port, pending, status);
1315 		atmel_handle_transmit(port, pending);
1316 	} while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
1317 
1318 	spin_unlock(&atmel_port->lock_suspended);
1319 
1320 	return pass_counter ? IRQ_HANDLED : IRQ_NONE;
1321 }
1322 
1323 static void atmel_release_tx_pdc(struct uart_port *port)
1324 {
1325 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1326 	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1327 
1328 	dma_unmap_single(port->dev,
1329 			 pdc->dma_addr,
1330 			 pdc->dma_size,
1331 			 DMA_TO_DEVICE);
1332 }
1333 
1334 /*
1335  * Called from tasklet with ENDTX and TXBUFE interrupts disabled.
1336  */
1337 static void atmel_tx_pdc(struct uart_port *port)
1338 {
1339 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1340 	struct circ_buf *xmit = &port->state->xmit;
1341 	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1342 	int count;
1343 
1344 	/* nothing left to transmit? */
1345 	if (atmel_uart_readl(port, ATMEL_PDC_TCR))
1346 		return;
1347 
1348 	xmit->tail += pdc->ofs;
1349 	xmit->tail &= UART_XMIT_SIZE - 1;
1350 
1351 	port->icount.tx += pdc->ofs;
1352 	pdc->ofs = 0;
1353 
1354 	/* more to transmit - setup next transfer */
1355 
1356 	/* disable PDC transmit */
1357 	atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
1358 
1359 	if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
1360 		dma_sync_single_for_device(port->dev,
1361 					   pdc->dma_addr,
1362 					   pdc->dma_size,
1363 					   DMA_TO_DEVICE);
1364 
1365 		count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
1366 		pdc->ofs = count;
1367 
1368 		atmel_uart_writel(port, ATMEL_PDC_TPR,
1369 				  pdc->dma_addr + xmit->tail);
1370 		atmel_uart_writel(port, ATMEL_PDC_TCR, count);
1371 		/* re-enable PDC transmit */
1372 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
1373 		/* Enable interrupts */
1374 		atmel_uart_writel(port, ATMEL_US_IER,
1375 				  atmel_port->tx_done_mask);
1376 	} else {
1377 		if ((port->rs485.flags & SER_RS485_ENABLED) &&
1378 		    !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
1379 			/* DMA done, stop TX, start RX for RS485 */
1380 			atmel_start_rx(port);
1381 		}
1382 	}
1383 
1384 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1385 		uart_write_wakeup(port);
1386 }
1387 
1388 static int atmel_prepare_tx_pdc(struct uart_port *port)
1389 {
1390 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1391 	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1392 	struct circ_buf *xmit = &port->state->xmit;
1393 
1394 	pdc->buf = xmit->buf;
1395 	pdc->dma_addr = dma_map_single(port->dev,
1396 					pdc->buf,
1397 					UART_XMIT_SIZE,
1398 					DMA_TO_DEVICE);
1399 	pdc->dma_size = UART_XMIT_SIZE;
1400 	pdc->ofs = 0;
1401 
1402 	return 0;
1403 }
1404 
1405 static void atmel_rx_from_ring(struct uart_port *port)
1406 {
1407 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1408 	struct circ_buf *ring = &atmel_port->rx_ring;
1409 	unsigned int flg;
1410 	unsigned int status;
1411 
1412 	while (ring->head != ring->tail) {
1413 		struct atmel_uart_char c;
1414 
1415 		/* Make sure c is loaded after head. */
1416 		smp_rmb();
1417 
1418 		c = ((struct atmel_uart_char *)ring->buf)[ring->tail];
1419 
1420 		ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
1421 
1422 		port->icount.rx++;
1423 		status = c.status;
1424 		flg = TTY_NORMAL;
1425 
1426 		/*
1427 		 * note that the error handling code is
1428 		 * out of the main execution path
1429 		 */
1430 		if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
1431 				       | ATMEL_US_OVRE | ATMEL_US_RXBRK))) {
1432 			if (status & ATMEL_US_RXBRK) {
1433 				/* ignore side-effect */
1434 				status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
1435 
1436 				port->icount.brk++;
1437 				if (uart_handle_break(port))
1438 					continue;
1439 			}
1440 			if (status & ATMEL_US_PARE)
1441 				port->icount.parity++;
1442 			if (status & ATMEL_US_FRAME)
1443 				port->icount.frame++;
1444 			if (status & ATMEL_US_OVRE)
1445 				port->icount.overrun++;
1446 
1447 			status &= port->read_status_mask;
1448 
1449 			if (status & ATMEL_US_RXBRK)
1450 				flg = TTY_BREAK;
1451 			else if (status & ATMEL_US_PARE)
1452 				flg = TTY_PARITY;
1453 			else if (status & ATMEL_US_FRAME)
1454 				flg = TTY_FRAME;
1455 		}
1456 
1457 
1458 		if (uart_handle_sysrq_char(port, c.ch))
1459 			continue;
1460 
1461 		uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg);
1462 	}
1463 
1464 	/*
1465 	 * Drop the lock here since it might end up calling
1466 	 * uart_start(), which takes the lock.
1467 	 */
1468 	spin_unlock(&port->lock);
1469 	tty_flip_buffer_push(&port->state->port);
1470 	spin_lock(&port->lock);
1471 }
1472 
1473 static void atmel_release_rx_pdc(struct uart_port *port)
1474 {
1475 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1476 	int i;
1477 
1478 	for (i = 0; i < 2; i++) {
1479 		struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1480 
1481 		dma_unmap_single(port->dev,
1482 				 pdc->dma_addr,
1483 				 pdc->dma_size,
1484 				 DMA_FROM_DEVICE);
1485 		kfree(pdc->buf);
1486 	}
1487 }
1488 
1489 static void atmel_rx_from_pdc(struct uart_port *port)
1490 {
1491 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1492 	struct tty_port *tport = &port->state->port;
1493 	struct atmel_dma_buffer *pdc;
1494 	int rx_idx = atmel_port->pdc_rx_idx;
1495 	unsigned int head;
1496 	unsigned int tail;
1497 	unsigned int count;
1498 
1499 	do {
1500 		/* Reset the UART timeout early so that we don't miss one */
1501 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1502 
1503 		pdc = &atmel_port->pdc_rx[rx_idx];
1504 		head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr;
1505 		tail = pdc->ofs;
1506 
1507 		/* If the PDC has switched buffers, RPR won't contain
1508 		 * any address within the current buffer. Since head
1509 		 * is unsigned, we just need a one-way comparison to
1510 		 * find out.
1511 		 *
1512 		 * In this case, we just need to consume the entire
1513 		 * buffer and resubmit it for DMA. This will clear the
1514 		 * ENDRX bit as well, so that we can safely re-enable
1515 		 * all interrupts below.
1516 		 */
1517 		head = min(head, pdc->dma_size);
1518 
1519 		if (likely(head != tail)) {
1520 			dma_sync_single_for_cpu(port->dev, pdc->dma_addr,
1521 					pdc->dma_size, DMA_FROM_DEVICE);
1522 
1523 			/*
1524 			 * head will only wrap around when we recycle
1525 			 * the DMA buffer, and when that happens, we
1526 			 * explicitly set tail to 0. So head will
1527 			 * always be greater than tail.
1528 			 */
1529 			count = head - tail;
1530 
1531 			tty_insert_flip_string(tport, pdc->buf + pdc->ofs,
1532 						count);
1533 
1534 			dma_sync_single_for_device(port->dev, pdc->dma_addr,
1535 					pdc->dma_size, DMA_FROM_DEVICE);
1536 
1537 			port->icount.rx += count;
1538 			pdc->ofs = head;
1539 		}
1540 
1541 		/*
1542 		 * If the current buffer is full, we need to check if
1543 		 * the next one contains any additional data.
1544 		 */
1545 		if (head >= pdc->dma_size) {
1546 			pdc->ofs = 0;
1547 			atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr);
1548 			atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size);
1549 
1550 			rx_idx = !rx_idx;
1551 			atmel_port->pdc_rx_idx = rx_idx;
1552 		}
1553 	} while (head >= pdc->dma_size);
1554 
1555 	/*
1556 	 * Drop the lock here since it might end up calling
1557 	 * uart_start(), which takes the lock.
1558 	 */
1559 	spin_unlock(&port->lock);
1560 	tty_flip_buffer_push(tport);
1561 	spin_lock(&port->lock);
1562 
1563 	atmel_uart_writel(port, ATMEL_US_IER,
1564 			  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1565 }
1566 
1567 static int atmel_prepare_rx_pdc(struct uart_port *port)
1568 {
1569 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1570 	int i;
1571 
1572 	for (i = 0; i < 2; i++) {
1573 		struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1574 
1575 		pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL);
1576 		if (pdc->buf == NULL) {
1577 			if (i != 0) {
1578 				dma_unmap_single(port->dev,
1579 					atmel_port->pdc_rx[0].dma_addr,
1580 					PDC_BUFFER_SIZE,
1581 					DMA_FROM_DEVICE);
1582 				kfree(atmel_port->pdc_rx[0].buf);
1583 			}
1584 			atmel_port->use_pdc_rx = 0;
1585 			return -ENOMEM;
1586 		}
1587 		pdc->dma_addr = dma_map_single(port->dev,
1588 						pdc->buf,
1589 						PDC_BUFFER_SIZE,
1590 						DMA_FROM_DEVICE);
1591 		pdc->dma_size = PDC_BUFFER_SIZE;
1592 		pdc->ofs = 0;
1593 	}
1594 
1595 	atmel_port->pdc_rx_idx = 0;
1596 
1597 	atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr);
1598 	atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE);
1599 
1600 	atmel_uart_writel(port, ATMEL_PDC_RNPR,
1601 			  atmel_port->pdc_rx[1].dma_addr);
1602 	atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE);
1603 
1604 	return 0;
1605 }
1606 
1607 /*
1608  * tasklet handling tty stuff outside the interrupt handler.
1609  */
1610 static void atmel_tasklet_rx_func(unsigned long data)
1611 {
1612 	struct uart_port *port = (struct uart_port *)data;
1613 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1614 
1615 	/* The interrupt handler does not take the lock */
1616 	spin_lock(&port->lock);
1617 	atmel_port->schedule_rx(port);
1618 	spin_unlock(&port->lock);
1619 }
1620 
1621 static void atmel_tasklet_tx_func(unsigned long data)
1622 {
1623 	struct uart_port *port = (struct uart_port *)data;
1624 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1625 
1626 	/* The interrupt handler does not take the lock */
1627 	spin_lock(&port->lock);
1628 	atmel_port->schedule_tx(port);
1629 	spin_unlock(&port->lock);
1630 }
1631 
1632 static void atmel_init_property(struct atmel_uart_port *atmel_port,
1633 				struct platform_device *pdev)
1634 {
1635 	struct device_node *np = pdev->dev.of_node;
1636 
1637 	/* DMA/PDC usage specification */
1638 	if (of_property_read_bool(np, "atmel,use-dma-rx")) {
1639 		if (of_property_read_bool(np, "dmas")) {
1640 			atmel_port->use_dma_rx  = true;
1641 			atmel_port->use_pdc_rx  = false;
1642 		} else {
1643 			atmel_port->use_dma_rx  = false;
1644 			atmel_port->use_pdc_rx  = true;
1645 		}
1646 	} else {
1647 		atmel_port->use_dma_rx  = false;
1648 		atmel_port->use_pdc_rx  = false;
1649 	}
1650 
1651 	if (of_property_read_bool(np, "atmel,use-dma-tx")) {
1652 		if (of_property_read_bool(np, "dmas")) {
1653 			atmel_port->use_dma_tx  = true;
1654 			atmel_port->use_pdc_tx  = false;
1655 		} else {
1656 			atmel_port->use_dma_tx  = false;
1657 			atmel_port->use_pdc_tx  = true;
1658 		}
1659 	} else {
1660 		atmel_port->use_dma_tx  = false;
1661 		atmel_port->use_pdc_tx  = false;
1662 	}
1663 }
1664 
1665 static void atmel_set_ops(struct uart_port *port)
1666 {
1667 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1668 
1669 	if (atmel_use_dma_rx(port)) {
1670 		atmel_port->prepare_rx = &atmel_prepare_rx_dma;
1671 		atmel_port->schedule_rx = &atmel_rx_from_dma;
1672 		atmel_port->release_rx = &atmel_release_rx_dma;
1673 	} else if (atmel_use_pdc_rx(port)) {
1674 		atmel_port->prepare_rx = &atmel_prepare_rx_pdc;
1675 		atmel_port->schedule_rx = &atmel_rx_from_pdc;
1676 		atmel_port->release_rx = &atmel_release_rx_pdc;
1677 	} else {
1678 		atmel_port->prepare_rx = NULL;
1679 		atmel_port->schedule_rx = &atmel_rx_from_ring;
1680 		atmel_port->release_rx = NULL;
1681 	}
1682 
1683 	if (atmel_use_dma_tx(port)) {
1684 		atmel_port->prepare_tx = &atmel_prepare_tx_dma;
1685 		atmel_port->schedule_tx = &atmel_tx_dma;
1686 		atmel_port->release_tx = &atmel_release_tx_dma;
1687 	} else if (atmel_use_pdc_tx(port)) {
1688 		atmel_port->prepare_tx = &atmel_prepare_tx_pdc;
1689 		atmel_port->schedule_tx = &atmel_tx_pdc;
1690 		atmel_port->release_tx = &atmel_release_tx_pdc;
1691 	} else {
1692 		atmel_port->prepare_tx = NULL;
1693 		atmel_port->schedule_tx = &atmel_tx_chars;
1694 		atmel_port->release_tx = NULL;
1695 	}
1696 }
1697 
1698 /*
1699  * Get ip name usart or uart
1700  */
1701 static void atmel_get_ip_name(struct uart_port *port)
1702 {
1703 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1704 	int name = atmel_uart_readl(port, ATMEL_US_NAME);
1705 	u32 version;
1706 	u32 usart, dbgu_uart, new_uart;
1707 	/* ASCII decoding for IP version */
1708 	usart = 0x55534152;	/* USAR(T) */
1709 	dbgu_uart = 0x44424755;	/* DBGU */
1710 	new_uart = 0x55415254;	/* UART */
1711 
1712 	/*
1713 	 * Only USART devices from at91sam9260 SOC implement fractional
1714 	 * baudrate. It is available for all asynchronous modes, with the
1715 	 * following restriction: the sampling clock's duty cycle is not
1716 	 * constant.
1717 	 */
1718 	atmel_port->has_frac_baudrate = false;
1719 	atmel_port->has_hw_timer = false;
1720 
1721 	if (name == new_uart) {
1722 		dev_dbg(port->dev, "Uart with hw timer");
1723 		atmel_port->has_hw_timer = true;
1724 		atmel_port->rtor = ATMEL_UA_RTOR;
1725 	} else if (name == usart) {
1726 		dev_dbg(port->dev, "Usart\n");
1727 		atmel_port->has_frac_baudrate = true;
1728 		atmel_port->has_hw_timer = true;
1729 		atmel_port->rtor = ATMEL_US_RTOR;
1730 	} else if (name == dbgu_uart) {
1731 		dev_dbg(port->dev, "Dbgu or uart without hw timer\n");
1732 	} else {
1733 		/* fallback for older SoCs: use version field */
1734 		version = atmel_uart_readl(port, ATMEL_US_VERSION);
1735 		switch (version) {
1736 		case 0x302:
1737 		case 0x10213:
1738 		case 0x10302:
1739 			dev_dbg(port->dev, "This version is usart\n");
1740 			atmel_port->has_frac_baudrate = true;
1741 			atmel_port->has_hw_timer = true;
1742 			atmel_port->rtor = ATMEL_US_RTOR;
1743 			break;
1744 		case 0x203:
1745 		case 0x10202:
1746 			dev_dbg(port->dev, "This version is uart\n");
1747 			break;
1748 		default:
1749 			dev_err(port->dev, "Not supported ip name nor version, set to uart\n");
1750 		}
1751 	}
1752 }
1753 
1754 /*
1755  * Perform initialization and enable port for reception
1756  */
1757 static int atmel_startup(struct uart_port *port)
1758 {
1759 	struct platform_device *pdev = to_platform_device(port->dev);
1760 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1761 	int retval;
1762 
1763 	/*
1764 	 * Ensure that no interrupts are enabled otherwise when
1765 	 * request_irq() is called we could get stuck trying to
1766 	 * handle an unexpected interrupt
1767 	 */
1768 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
1769 	atmel_port->ms_irq_enabled = false;
1770 
1771 	/*
1772 	 * Allocate the IRQ
1773 	 */
1774 	retval = request_irq(port->irq, atmel_interrupt,
1775 			     IRQF_SHARED | IRQF_COND_SUSPEND,
1776 			     dev_name(&pdev->dev), port);
1777 	if (retval) {
1778 		dev_err(port->dev, "atmel_startup - Can't get irq\n");
1779 		return retval;
1780 	}
1781 
1782 	atomic_set(&atmel_port->tasklet_shutdown, 0);
1783 	tasklet_init(&atmel_port->tasklet_rx, atmel_tasklet_rx_func,
1784 			(unsigned long)port);
1785 	tasklet_init(&atmel_port->tasklet_tx, atmel_tasklet_tx_func,
1786 			(unsigned long)port);
1787 
1788 	/*
1789 	 * Initialize DMA (if necessary)
1790 	 */
1791 	atmel_init_property(atmel_port, pdev);
1792 	atmel_set_ops(port);
1793 
1794 	if (atmel_port->prepare_rx) {
1795 		retval = atmel_port->prepare_rx(port);
1796 		if (retval < 0)
1797 			atmel_set_ops(port);
1798 	}
1799 
1800 	if (atmel_port->prepare_tx) {
1801 		retval = atmel_port->prepare_tx(port);
1802 		if (retval < 0)
1803 			atmel_set_ops(port);
1804 	}
1805 
1806 	/*
1807 	 * Enable FIFO when available
1808 	 */
1809 	if (atmel_port->fifo_size) {
1810 		unsigned int txrdym = ATMEL_US_ONE_DATA;
1811 		unsigned int rxrdym = ATMEL_US_ONE_DATA;
1812 		unsigned int fmr;
1813 
1814 		atmel_uart_writel(port, ATMEL_US_CR,
1815 				  ATMEL_US_FIFOEN |
1816 				  ATMEL_US_RXFCLR |
1817 				  ATMEL_US_TXFLCLR);
1818 
1819 		if (atmel_use_dma_tx(port))
1820 			txrdym = ATMEL_US_FOUR_DATA;
1821 
1822 		fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym);
1823 		if (atmel_port->rts_high &&
1824 		    atmel_port->rts_low)
1825 			fmr |=	ATMEL_US_FRTSC |
1826 				ATMEL_US_RXFTHRES(atmel_port->rts_high) |
1827 				ATMEL_US_RXFTHRES2(atmel_port->rts_low);
1828 
1829 		atmel_uart_writel(port, ATMEL_US_FMR, fmr);
1830 	}
1831 
1832 	/* Save current CSR for comparison in atmel_tasklet_func() */
1833 	atmel_port->irq_status_prev = atmel_get_lines_status(port);
1834 
1835 	/*
1836 	 * Finally, enable the serial port
1837 	 */
1838 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
1839 	/* enable xmit & rcvr */
1840 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
1841 	atmel_port->tx_stopped = false;
1842 
1843 	timer_setup(&atmel_port->uart_timer, atmel_uart_timer_callback, 0);
1844 
1845 	if (atmel_use_pdc_rx(port)) {
1846 		/* set UART timeout */
1847 		if (!atmel_port->has_hw_timer) {
1848 			mod_timer(&atmel_port->uart_timer,
1849 					jiffies + uart_poll_timeout(port));
1850 		/* set USART timeout */
1851 		} else {
1852 			atmel_uart_writel(port, atmel_port->rtor,
1853 					  PDC_RX_TIMEOUT);
1854 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1855 
1856 			atmel_uart_writel(port, ATMEL_US_IER,
1857 					  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1858 		}
1859 		/* enable PDC controller */
1860 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
1861 	} else if (atmel_use_dma_rx(port)) {
1862 		/* set UART timeout */
1863 		if (!atmel_port->has_hw_timer) {
1864 			mod_timer(&atmel_port->uart_timer,
1865 					jiffies + uart_poll_timeout(port));
1866 		/* set USART timeout */
1867 		} else {
1868 			atmel_uart_writel(port, atmel_port->rtor,
1869 					  PDC_RX_TIMEOUT);
1870 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1871 
1872 			atmel_uart_writel(port, ATMEL_US_IER,
1873 					  ATMEL_US_TIMEOUT);
1874 		}
1875 	} else {
1876 		/* enable receive only */
1877 		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
1878 	}
1879 
1880 	return 0;
1881 }
1882 
1883 /*
1884  * Flush any TX data submitted for DMA. Called when the TX circular
1885  * buffer is reset.
1886  */
1887 static void atmel_flush_buffer(struct uart_port *port)
1888 {
1889 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1890 
1891 	if (atmel_use_pdc_tx(port)) {
1892 		atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
1893 		atmel_port->pdc_tx.ofs = 0;
1894 	}
1895 	/*
1896 	 * in uart_flush_buffer(), the xmit circular buffer has just
1897 	 * been cleared, so we have to reset tx_len accordingly.
1898 	 */
1899 	atmel_port->tx_len = 0;
1900 }
1901 
1902 /*
1903  * Disable the port
1904  */
1905 static void atmel_shutdown(struct uart_port *port)
1906 {
1907 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1908 
1909 	/* Disable modem control lines interrupts */
1910 	atmel_disable_ms(port);
1911 
1912 	/* Disable interrupts at device level */
1913 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
1914 
1915 	/* Prevent spurious interrupts from scheduling the tasklet */
1916 	atomic_inc(&atmel_port->tasklet_shutdown);
1917 
1918 	/*
1919 	 * Prevent any tasklets being scheduled during
1920 	 * cleanup
1921 	 */
1922 	del_timer_sync(&atmel_port->uart_timer);
1923 
1924 	/* Make sure that no interrupt is on the fly */
1925 	synchronize_irq(port->irq);
1926 
1927 	/*
1928 	 * Clear out any scheduled tasklets before
1929 	 * we destroy the buffers
1930 	 */
1931 	tasklet_kill(&atmel_port->tasklet_rx);
1932 	tasklet_kill(&atmel_port->tasklet_tx);
1933 
1934 	/*
1935 	 * Ensure everything is stopped and
1936 	 * disable port and break condition.
1937 	 */
1938 	atmel_stop_rx(port);
1939 	atmel_stop_tx(port);
1940 
1941 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
1942 
1943 	/*
1944 	 * Shut-down the DMA.
1945 	 */
1946 	if (atmel_port->release_rx)
1947 		atmel_port->release_rx(port);
1948 	if (atmel_port->release_tx)
1949 		atmel_port->release_tx(port);
1950 
1951 	/*
1952 	 * Reset ring buffer pointers
1953 	 */
1954 	atmel_port->rx_ring.head = 0;
1955 	atmel_port->rx_ring.tail = 0;
1956 
1957 	/*
1958 	 * Free the interrupts
1959 	 */
1960 	free_irq(port->irq, port);
1961 
1962 	atmel_flush_buffer(port);
1963 }
1964 
1965 /*
1966  * Power / Clock management.
1967  */
1968 static void atmel_serial_pm(struct uart_port *port, unsigned int state,
1969 			    unsigned int oldstate)
1970 {
1971 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1972 
1973 	switch (state) {
1974 	case 0:
1975 		/*
1976 		 * Enable the peripheral clock for this serial port.
1977 		 * This is called on uart_open() or a resume event.
1978 		 */
1979 		clk_prepare_enable(atmel_port->clk);
1980 
1981 		/* re-enable interrupts if we disabled some on suspend */
1982 		atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr);
1983 		break;
1984 	case 3:
1985 		/* Back up the interrupt mask and disable all interrupts */
1986 		atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR);
1987 		atmel_uart_writel(port, ATMEL_US_IDR, -1);
1988 
1989 		/*
1990 		 * Disable the peripheral clock for this serial port.
1991 		 * This is called on uart_close() or a suspend event.
1992 		 */
1993 		clk_disable_unprepare(atmel_port->clk);
1994 		break;
1995 	default:
1996 		dev_err(port->dev, "atmel_serial: unknown pm %d\n", state);
1997 	}
1998 }
1999 
2000 /*
2001  * Change the port parameters
2002  */
2003 static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
2004 			      struct ktermios *old)
2005 {
2006 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2007 	unsigned long flags;
2008 	unsigned int old_mode, mode, imr, quot, baud, div, cd, fp = 0;
2009 
2010 	/* save the current mode register */
2011 	mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR);
2012 
2013 	/* reset the mode, clock divisor, parity, stop bits and data size */
2014 	mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP |
2015 		  ATMEL_US_PAR | ATMEL_US_USMODE);
2016 
2017 	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
2018 
2019 	/* byte size */
2020 	switch (termios->c_cflag & CSIZE) {
2021 	case CS5:
2022 		mode |= ATMEL_US_CHRL_5;
2023 		break;
2024 	case CS6:
2025 		mode |= ATMEL_US_CHRL_6;
2026 		break;
2027 	case CS7:
2028 		mode |= ATMEL_US_CHRL_7;
2029 		break;
2030 	default:
2031 		mode |= ATMEL_US_CHRL_8;
2032 		break;
2033 	}
2034 
2035 	/* stop bits */
2036 	if (termios->c_cflag & CSTOPB)
2037 		mode |= ATMEL_US_NBSTOP_2;
2038 
2039 	/* parity */
2040 	if (termios->c_cflag & PARENB) {
2041 		/* Mark or Space parity */
2042 		if (termios->c_cflag & CMSPAR) {
2043 			if (termios->c_cflag & PARODD)
2044 				mode |= ATMEL_US_PAR_MARK;
2045 			else
2046 				mode |= ATMEL_US_PAR_SPACE;
2047 		} else if (termios->c_cflag & PARODD)
2048 			mode |= ATMEL_US_PAR_ODD;
2049 		else
2050 			mode |= ATMEL_US_PAR_EVEN;
2051 	} else
2052 		mode |= ATMEL_US_PAR_NONE;
2053 
2054 	spin_lock_irqsave(&port->lock, flags);
2055 
2056 	port->read_status_mask = ATMEL_US_OVRE;
2057 	if (termios->c_iflag & INPCK)
2058 		port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2059 	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
2060 		port->read_status_mask |= ATMEL_US_RXBRK;
2061 
2062 	if (atmel_use_pdc_rx(port))
2063 		/* need to enable error interrupts */
2064 		atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask);
2065 
2066 	/*
2067 	 * Characters to ignore
2068 	 */
2069 	port->ignore_status_mask = 0;
2070 	if (termios->c_iflag & IGNPAR)
2071 		port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2072 	if (termios->c_iflag & IGNBRK) {
2073 		port->ignore_status_mask |= ATMEL_US_RXBRK;
2074 		/*
2075 		 * If we're ignoring parity and break indicators,
2076 		 * ignore overruns too (for real raw support).
2077 		 */
2078 		if (termios->c_iflag & IGNPAR)
2079 			port->ignore_status_mask |= ATMEL_US_OVRE;
2080 	}
2081 	/* TODO: Ignore all characters if CREAD is set.*/
2082 
2083 	/* update the per-port timeout */
2084 	uart_update_timeout(port, termios->c_cflag, baud);
2085 
2086 	/*
2087 	 * save/disable interrupts. The tty layer will ensure that the
2088 	 * transmitter is empty if requested by the caller, so there's
2089 	 * no need to wait for it here.
2090 	 */
2091 	imr = atmel_uart_readl(port, ATMEL_US_IMR);
2092 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
2093 
2094 	/* disable receiver and transmitter */
2095 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
2096 	atmel_port->tx_stopped = true;
2097 
2098 	/* mode */
2099 	if (port->rs485.flags & SER_RS485_ENABLED) {
2100 		atmel_uart_writel(port, ATMEL_US_TTGR,
2101 				  port->rs485.delay_rts_after_send);
2102 		mode |= ATMEL_US_USMODE_RS485;
2103 	} else if (termios->c_cflag & CRTSCTS) {
2104 		/* RS232 with hardware handshake (RTS/CTS) */
2105 		if (atmel_use_fifo(port) &&
2106 		    !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) {
2107 			/*
2108 			 * with ATMEL_US_USMODE_HWHS set, the controller will
2109 			 * be able to drive the RTS pin high/low when the RX
2110 			 * FIFO is above RXFTHRES/below RXFTHRES2.
2111 			 * It will also disable the transmitter when the CTS
2112 			 * pin is high.
2113 			 * This mode is not activated if CTS pin is a GPIO
2114 			 * because in this case, the transmitter is always
2115 			 * disabled (there must be an internal pull-up
2116 			 * responsible for this behaviour).
2117 			 * If the RTS pin is a GPIO, the controller won't be
2118 			 * able to drive it according to the FIFO thresholds,
2119 			 * but it will be handled by the driver.
2120 			 */
2121 			mode |= ATMEL_US_USMODE_HWHS;
2122 		} else {
2123 			/*
2124 			 * For platforms without FIFO, the flow control is
2125 			 * handled by the driver.
2126 			 */
2127 			mode |= ATMEL_US_USMODE_NORMAL;
2128 		}
2129 	} else {
2130 		/* RS232 without hadware handshake */
2131 		mode |= ATMEL_US_USMODE_NORMAL;
2132 	}
2133 
2134 	/* set the mode, clock divisor, parity, stop bits and data size */
2135 	atmel_uart_writel(port, ATMEL_US_MR, mode);
2136 
2137 	/*
2138 	 * when switching the mode, set the RTS line state according to the
2139 	 * new mode, otherwise keep the former state
2140 	 */
2141 	if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
2142 		unsigned int rts_state;
2143 
2144 		if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
2145 			/* let the hardware control the RTS line */
2146 			rts_state = ATMEL_US_RTSDIS;
2147 		} else {
2148 			/* force RTS line to low level */
2149 			rts_state = ATMEL_US_RTSEN;
2150 		}
2151 
2152 		atmel_uart_writel(port, ATMEL_US_CR, rts_state);
2153 	}
2154 
2155 	/*
2156 	 * Set the baud rate:
2157 	 * Fractional baudrate allows to setup output frequency more
2158 	 * accurately. This feature is enabled only when using normal mode.
2159 	 * baudrate = selected clock / (8 * (2 - OVER) * (CD + FP / 8))
2160 	 * Currently, OVER is always set to 0 so we get
2161 	 * baudrate = selected clock / (16 * (CD + FP / 8))
2162 	 * then
2163 	 * 8 CD + FP = selected clock / (2 * baudrate)
2164 	 */
2165 	if (atmel_port->has_frac_baudrate) {
2166 		div = DIV_ROUND_CLOSEST(port->uartclk, baud * 2);
2167 		cd = div >> 3;
2168 		fp = div & ATMEL_US_FP_MASK;
2169 	} else {
2170 		cd = uart_get_divisor(port, baud);
2171 	}
2172 
2173 	if (cd > 65535) {	/* BRGR is 16-bit, so switch to slower clock */
2174 		cd /= 8;
2175 		mode |= ATMEL_US_USCLKS_MCK_DIV8;
2176 	}
2177 	quot = cd | fp << ATMEL_US_FP_OFFSET;
2178 
2179 	atmel_uart_writel(port, ATMEL_US_BRGR, quot);
2180 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2181 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2182 	atmel_port->tx_stopped = false;
2183 
2184 	/* restore interrupts */
2185 	atmel_uart_writel(port, ATMEL_US_IER, imr);
2186 
2187 	/* CTS flow-control and modem-status interrupts */
2188 	if (UART_ENABLE_MS(port, termios->c_cflag))
2189 		atmel_enable_ms(port);
2190 	else
2191 		atmel_disable_ms(port);
2192 
2193 	spin_unlock_irqrestore(&port->lock, flags);
2194 }
2195 
2196 static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios)
2197 {
2198 	if (termios->c_line == N_PPS) {
2199 		port->flags |= UPF_HARDPPS_CD;
2200 		spin_lock_irq(&port->lock);
2201 		atmel_enable_ms(port);
2202 		spin_unlock_irq(&port->lock);
2203 	} else {
2204 		port->flags &= ~UPF_HARDPPS_CD;
2205 		if (!UART_ENABLE_MS(port, termios->c_cflag)) {
2206 			spin_lock_irq(&port->lock);
2207 			atmel_disable_ms(port);
2208 			spin_unlock_irq(&port->lock);
2209 		}
2210 	}
2211 }
2212 
2213 /*
2214  * Return string describing the specified port
2215  */
2216 static const char *atmel_type(struct uart_port *port)
2217 {
2218 	return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL;
2219 }
2220 
2221 /*
2222  * Release the memory region(s) being used by 'port'.
2223  */
2224 static void atmel_release_port(struct uart_port *port)
2225 {
2226 	struct platform_device *mpdev = to_platform_device(port->dev->parent);
2227 	int size = resource_size(mpdev->resource);
2228 
2229 	release_mem_region(port->mapbase, size);
2230 
2231 	if (port->flags & UPF_IOREMAP) {
2232 		iounmap(port->membase);
2233 		port->membase = NULL;
2234 	}
2235 }
2236 
2237 /*
2238  * Request the memory region(s) being used by 'port'.
2239  */
2240 static int atmel_request_port(struct uart_port *port)
2241 {
2242 	struct platform_device *mpdev = to_platform_device(port->dev->parent);
2243 	int size = resource_size(mpdev->resource);
2244 
2245 	if (!request_mem_region(port->mapbase, size, "atmel_serial"))
2246 		return -EBUSY;
2247 
2248 	if (port->flags & UPF_IOREMAP) {
2249 		port->membase = ioremap(port->mapbase, size);
2250 		if (port->membase == NULL) {
2251 			release_mem_region(port->mapbase, size);
2252 			return -ENOMEM;
2253 		}
2254 	}
2255 
2256 	return 0;
2257 }
2258 
2259 /*
2260  * Configure/autoconfigure the port.
2261  */
2262 static void atmel_config_port(struct uart_port *port, int flags)
2263 {
2264 	if (flags & UART_CONFIG_TYPE) {
2265 		port->type = PORT_ATMEL;
2266 		atmel_request_port(port);
2267 	}
2268 }
2269 
2270 /*
2271  * Verify the new serial_struct (for TIOCSSERIAL).
2272  */
2273 static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
2274 {
2275 	int ret = 0;
2276 	if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL)
2277 		ret = -EINVAL;
2278 	if (port->irq != ser->irq)
2279 		ret = -EINVAL;
2280 	if (ser->io_type != SERIAL_IO_MEM)
2281 		ret = -EINVAL;
2282 	if (port->uartclk / 16 != ser->baud_base)
2283 		ret = -EINVAL;
2284 	if (port->mapbase != (unsigned long)ser->iomem_base)
2285 		ret = -EINVAL;
2286 	if (port->iobase != ser->port)
2287 		ret = -EINVAL;
2288 	if (ser->hub6 != 0)
2289 		ret = -EINVAL;
2290 	return ret;
2291 }
2292 
2293 #ifdef CONFIG_CONSOLE_POLL
2294 static int atmel_poll_get_char(struct uart_port *port)
2295 {
2296 	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY))
2297 		cpu_relax();
2298 
2299 	return atmel_uart_read_char(port);
2300 }
2301 
2302 static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
2303 {
2304 	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2305 		cpu_relax();
2306 
2307 	atmel_uart_write_char(port, ch);
2308 }
2309 #endif
2310 
2311 static const struct uart_ops atmel_pops = {
2312 	.tx_empty	= atmel_tx_empty,
2313 	.set_mctrl	= atmel_set_mctrl,
2314 	.get_mctrl	= atmel_get_mctrl,
2315 	.stop_tx	= atmel_stop_tx,
2316 	.start_tx	= atmel_start_tx,
2317 	.stop_rx	= atmel_stop_rx,
2318 	.enable_ms	= atmel_enable_ms,
2319 	.break_ctl	= atmel_break_ctl,
2320 	.startup	= atmel_startup,
2321 	.shutdown	= atmel_shutdown,
2322 	.flush_buffer	= atmel_flush_buffer,
2323 	.set_termios	= atmel_set_termios,
2324 	.set_ldisc	= atmel_set_ldisc,
2325 	.type		= atmel_type,
2326 	.release_port	= atmel_release_port,
2327 	.request_port	= atmel_request_port,
2328 	.config_port	= atmel_config_port,
2329 	.verify_port	= atmel_verify_port,
2330 	.pm		= atmel_serial_pm,
2331 #ifdef CONFIG_CONSOLE_POLL
2332 	.poll_get_char	= atmel_poll_get_char,
2333 	.poll_put_char	= atmel_poll_put_char,
2334 #endif
2335 };
2336 
2337 /*
2338  * Configure the port from the platform device resource info.
2339  */
2340 static int atmel_init_port(struct atmel_uart_port *atmel_port,
2341 				      struct platform_device *pdev)
2342 {
2343 	int ret;
2344 	struct uart_port *port = &atmel_port->uart;
2345 	struct platform_device *mpdev = to_platform_device(pdev->dev.parent);
2346 
2347 	atmel_init_property(atmel_port, pdev);
2348 	atmel_set_ops(port);
2349 
2350 	uart_get_rs485_mode(&mpdev->dev, &port->rs485);
2351 
2352 	port->iotype		= UPIO_MEM;
2353 	port->flags		= UPF_BOOT_AUTOCONF | UPF_IOREMAP;
2354 	port->ops		= &atmel_pops;
2355 	port->fifosize		= 1;
2356 	port->dev		= &pdev->dev;
2357 	port->mapbase		= mpdev->resource[0].start;
2358 	port->irq		= mpdev->resource[1].start;
2359 	port->rs485_config	= atmel_config_rs485;
2360 	port->membase		= NULL;
2361 
2362 	memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
2363 
2364 	/* for console, the clock could already be configured */
2365 	if (!atmel_port->clk) {
2366 		atmel_port->clk = clk_get(&mpdev->dev, "usart");
2367 		if (IS_ERR(atmel_port->clk)) {
2368 			ret = PTR_ERR(atmel_port->clk);
2369 			atmel_port->clk = NULL;
2370 			return ret;
2371 		}
2372 		ret = clk_prepare_enable(atmel_port->clk);
2373 		if (ret) {
2374 			clk_put(atmel_port->clk);
2375 			atmel_port->clk = NULL;
2376 			return ret;
2377 		}
2378 		port->uartclk = clk_get_rate(atmel_port->clk);
2379 		clk_disable_unprepare(atmel_port->clk);
2380 		/* only enable clock when USART is in use */
2381 	}
2382 
2383 	/* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */
2384 	if (port->rs485.flags & SER_RS485_ENABLED)
2385 		atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
2386 	else if (atmel_use_pdc_tx(port)) {
2387 		port->fifosize = PDC_BUFFER_SIZE;
2388 		atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE;
2389 	} else {
2390 		atmel_port->tx_done_mask = ATMEL_US_TXRDY;
2391 	}
2392 
2393 	return 0;
2394 }
2395 
2396 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
2397 static void atmel_console_putchar(struct uart_port *port, int ch)
2398 {
2399 	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2400 		cpu_relax();
2401 	atmel_uart_write_char(port, ch);
2402 }
2403 
2404 /*
2405  * Interrupts are disabled on entering
2406  */
2407 static void atmel_console_write(struct console *co, const char *s, u_int count)
2408 {
2409 	struct uart_port *port = &atmel_ports[co->index].uart;
2410 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2411 	unsigned int status, imr;
2412 	unsigned int pdc_tx;
2413 
2414 	/*
2415 	 * First, save IMR and then disable interrupts
2416 	 */
2417 	imr = atmel_uart_readl(port, ATMEL_US_IMR);
2418 	atmel_uart_writel(port, ATMEL_US_IDR,
2419 			  ATMEL_US_RXRDY | atmel_port->tx_done_mask);
2420 
2421 	/* Store PDC transmit status and disable it */
2422 	pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
2423 	atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
2424 
2425 	/* Make sure that tx path is actually able to send characters */
2426 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
2427 	atmel_port->tx_stopped = false;
2428 
2429 	uart_console_write(port, s, count, atmel_console_putchar);
2430 
2431 	/*
2432 	 * Finally, wait for transmitter to become empty
2433 	 * and restore IMR
2434 	 */
2435 	do {
2436 		status = atmel_uart_readl(port, ATMEL_US_CSR);
2437 	} while (!(status & ATMEL_US_TXRDY));
2438 
2439 	/* Restore PDC transmit status */
2440 	if (pdc_tx)
2441 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
2442 
2443 	/* set interrupts back the way they were */
2444 	atmel_uart_writel(port, ATMEL_US_IER, imr);
2445 }
2446 
2447 /*
2448  * If the port was already initialised (eg, by a boot loader),
2449  * try to determine the current setup.
2450  */
2451 static void __init atmel_console_get_options(struct uart_port *port, int *baud,
2452 					     int *parity, int *bits)
2453 {
2454 	unsigned int mr, quot;
2455 
2456 	/*
2457 	 * If the baud rate generator isn't running, the port wasn't
2458 	 * initialized by the boot loader.
2459 	 */
2460 	quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD;
2461 	if (!quot)
2462 		return;
2463 
2464 	mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL;
2465 	if (mr == ATMEL_US_CHRL_8)
2466 		*bits = 8;
2467 	else
2468 		*bits = 7;
2469 
2470 	mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR;
2471 	if (mr == ATMEL_US_PAR_EVEN)
2472 		*parity = 'e';
2473 	else if (mr == ATMEL_US_PAR_ODD)
2474 		*parity = 'o';
2475 
2476 	/*
2477 	 * The serial core only rounds down when matching this to a
2478 	 * supported baud rate. Make sure we don't end up slightly
2479 	 * lower than one of those, as it would make us fall through
2480 	 * to a much lower baud rate than we really want.
2481 	 */
2482 	*baud = port->uartclk / (16 * (quot - 1));
2483 }
2484 
2485 static int __init atmel_console_setup(struct console *co, char *options)
2486 {
2487 	int ret;
2488 	struct uart_port *port = &atmel_ports[co->index].uart;
2489 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2490 	int baud = 115200;
2491 	int bits = 8;
2492 	int parity = 'n';
2493 	int flow = 'n';
2494 
2495 	if (port->membase == NULL) {
2496 		/* Port not initialized yet - delay setup */
2497 		return -ENODEV;
2498 	}
2499 
2500 	ret = clk_prepare_enable(atmel_ports[co->index].clk);
2501 	if (ret)
2502 		return ret;
2503 
2504 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
2505 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2506 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2507 	atmel_port->tx_stopped = false;
2508 
2509 	if (options)
2510 		uart_parse_options(options, &baud, &parity, &bits, &flow);
2511 	else
2512 		atmel_console_get_options(port, &baud, &parity, &bits);
2513 
2514 	return uart_set_options(port, co, baud, parity, bits, flow);
2515 }
2516 
2517 static struct uart_driver atmel_uart;
2518 
2519 static struct console atmel_console = {
2520 	.name		= ATMEL_DEVICENAME,
2521 	.write		= atmel_console_write,
2522 	.device		= uart_console_device,
2523 	.setup		= atmel_console_setup,
2524 	.flags		= CON_PRINTBUFFER,
2525 	.index		= -1,
2526 	.data		= &atmel_uart,
2527 };
2528 
2529 #define ATMEL_CONSOLE_DEVICE	(&atmel_console)
2530 
2531 static inline bool atmel_is_console_port(struct uart_port *port)
2532 {
2533 	return port->cons && port->cons->index == port->line;
2534 }
2535 
2536 #else
2537 #define ATMEL_CONSOLE_DEVICE	NULL
2538 
2539 static inline bool atmel_is_console_port(struct uart_port *port)
2540 {
2541 	return false;
2542 }
2543 #endif
2544 
2545 static struct uart_driver atmel_uart = {
2546 	.owner		= THIS_MODULE,
2547 	.driver_name	= "atmel_serial",
2548 	.dev_name	= ATMEL_DEVICENAME,
2549 	.major		= SERIAL_ATMEL_MAJOR,
2550 	.minor		= MINOR_START,
2551 	.nr		= ATMEL_MAX_UART,
2552 	.cons		= ATMEL_CONSOLE_DEVICE,
2553 };
2554 
2555 #ifdef CONFIG_PM
2556 static bool atmel_serial_clk_will_stop(void)
2557 {
2558 #ifdef CONFIG_ARCH_AT91
2559 	return at91_suspend_entering_slow_clock();
2560 #else
2561 	return false;
2562 #endif
2563 }
2564 
2565 static int atmel_serial_suspend(struct platform_device *pdev,
2566 				pm_message_t state)
2567 {
2568 	struct uart_port *port = platform_get_drvdata(pdev);
2569 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2570 
2571 	if (atmel_is_console_port(port) && console_suspend_enabled) {
2572 		/* Drain the TX shifter */
2573 		while (!(atmel_uart_readl(port, ATMEL_US_CSR) &
2574 			 ATMEL_US_TXEMPTY))
2575 			cpu_relax();
2576 	}
2577 
2578 	if (atmel_is_console_port(port) && !console_suspend_enabled) {
2579 		/* Cache register values as we won't get a full shutdown/startup
2580 		 * cycle
2581 		 */
2582 		atmel_port->cache.mr = atmel_uart_readl(port, ATMEL_US_MR);
2583 		atmel_port->cache.imr = atmel_uart_readl(port, ATMEL_US_IMR);
2584 		atmel_port->cache.brgr = atmel_uart_readl(port, ATMEL_US_BRGR);
2585 		atmel_port->cache.rtor = atmel_uart_readl(port,
2586 							  atmel_port->rtor);
2587 		atmel_port->cache.ttgr = atmel_uart_readl(port, ATMEL_US_TTGR);
2588 		atmel_port->cache.fmr = atmel_uart_readl(port, ATMEL_US_FMR);
2589 		atmel_port->cache.fimr = atmel_uart_readl(port, ATMEL_US_FIMR);
2590 	}
2591 
2592 	/* we can not wake up if we're running on slow clock */
2593 	atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
2594 	if (atmel_serial_clk_will_stop()) {
2595 		unsigned long flags;
2596 
2597 		spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2598 		atmel_port->suspended = true;
2599 		spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2600 		device_set_wakeup_enable(&pdev->dev, 0);
2601 	}
2602 
2603 	uart_suspend_port(&atmel_uart, port);
2604 
2605 	return 0;
2606 }
2607 
2608 static int atmel_serial_resume(struct platform_device *pdev)
2609 {
2610 	struct uart_port *port = platform_get_drvdata(pdev);
2611 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2612 	unsigned long flags;
2613 
2614 	if (atmel_is_console_port(port) && !console_suspend_enabled) {
2615 		atmel_uart_writel(port, ATMEL_US_MR, atmel_port->cache.mr);
2616 		atmel_uart_writel(port, ATMEL_US_IER, atmel_port->cache.imr);
2617 		atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->cache.brgr);
2618 		atmel_uart_writel(port, atmel_port->rtor,
2619 				  atmel_port->cache.rtor);
2620 		atmel_uart_writel(port, ATMEL_US_TTGR, atmel_port->cache.ttgr);
2621 
2622 		if (atmel_port->fifo_size) {
2623 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_FIFOEN |
2624 					  ATMEL_US_RXFCLR | ATMEL_US_TXFLCLR);
2625 			atmel_uart_writel(port, ATMEL_US_FMR,
2626 					  atmel_port->cache.fmr);
2627 			atmel_uart_writel(port, ATMEL_US_FIER,
2628 					  atmel_port->cache.fimr);
2629 		}
2630 		atmel_start_rx(port);
2631 	}
2632 
2633 	spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2634 	if (atmel_port->pending) {
2635 		atmel_handle_receive(port, atmel_port->pending);
2636 		atmel_handle_status(port, atmel_port->pending,
2637 				    atmel_port->pending_status);
2638 		atmel_handle_transmit(port, atmel_port->pending);
2639 		atmel_port->pending = 0;
2640 	}
2641 	atmel_port->suspended = false;
2642 	spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2643 
2644 	uart_resume_port(&atmel_uart, port);
2645 	device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
2646 
2647 	return 0;
2648 }
2649 #else
2650 #define atmel_serial_suspend NULL
2651 #define atmel_serial_resume NULL
2652 #endif
2653 
2654 static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port,
2655 				     struct platform_device *pdev)
2656 {
2657 	atmel_port->fifo_size = 0;
2658 	atmel_port->rts_low = 0;
2659 	atmel_port->rts_high = 0;
2660 
2661 	if (of_property_read_u32(pdev->dev.of_node,
2662 				 "atmel,fifo-size",
2663 				 &atmel_port->fifo_size))
2664 		return;
2665 
2666 	if (!atmel_port->fifo_size)
2667 		return;
2668 
2669 	if (atmel_port->fifo_size < ATMEL_MIN_FIFO_SIZE) {
2670 		atmel_port->fifo_size = 0;
2671 		dev_err(&pdev->dev, "Invalid FIFO size\n");
2672 		return;
2673 	}
2674 
2675 	/*
2676 	 * 0 <= rts_low <= rts_high <= fifo_size
2677 	 * Once their CTS line asserted by the remote peer, some x86 UARTs tend
2678 	 * to flush their internal TX FIFO, commonly up to 16 data, before
2679 	 * actually stopping to send new data. So we try to set the RTS High
2680 	 * Threshold to a reasonably high value respecting this 16 data
2681 	 * empirical rule when possible.
2682 	 */
2683 	atmel_port->rts_high = max_t(int, atmel_port->fifo_size >> 1,
2684 			       atmel_port->fifo_size - ATMEL_RTS_HIGH_OFFSET);
2685 	atmel_port->rts_low  = max_t(int, atmel_port->fifo_size >> 2,
2686 			       atmel_port->fifo_size - ATMEL_RTS_LOW_OFFSET);
2687 
2688 	dev_info(&pdev->dev, "Using FIFO (%u data)\n",
2689 		 atmel_port->fifo_size);
2690 	dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n",
2691 		atmel_port->rts_high);
2692 	dev_dbg(&pdev->dev, "RTS Low Threshold  : %2u data\n",
2693 		atmel_port->rts_low);
2694 }
2695 
2696 static int atmel_serial_probe(struct platform_device *pdev)
2697 {
2698 	struct atmel_uart_port *atmel_port;
2699 	struct device_node *np = pdev->dev.parent->of_node;
2700 	void *data;
2701 	int ret = -ENODEV;
2702 	bool rs485_enabled;
2703 
2704 	BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
2705 
2706 	/*
2707 	 * In device tree there is no node with "atmel,at91rm9200-usart-serial"
2708 	 * as compatible string. This driver is probed by at91-usart mfd driver
2709 	 * which is just a wrapper over the atmel_serial driver and
2710 	 * spi-at91-usart driver. All attributes needed by this driver are
2711 	 * found in of_node of parent.
2712 	 */
2713 	pdev->dev.of_node = np;
2714 
2715 	ret = of_alias_get_id(np, "serial");
2716 	if (ret < 0)
2717 		/* port id not found in platform data nor device-tree aliases:
2718 		 * auto-enumerate it */
2719 		ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART);
2720 
2721 	if (ret >= ATMEL_MAX_UART) {
2722 		ret = -ENODEV;
2723 		goto err;
2724 	}
2725 
2726 	if (test_and_set_bit(ret, atmel_ports_in_use)) {
2727 		/* port already in use */
2728 		ret = -EBUSY;
2729 		goto err;
2730 	}
2731 
2732 	atmel_port = &atmel_ports[ret];
2733 	atmel_port->backup_imr = 0;
2734 	atmel_port->uart.line = ret;
2735 	atmel_serial_probe_fifos(atmel_port, pdev);
2736 
2737 	atomic_set(&atmel_port->tasklet_shutdown, 0);
2738 	spin_lock_init(&atmel_port->lock_suspended);
2739 
2740 	ret = atmel_init_port(atmel_port, pdev);
2741 	if (ret)
2742 		goto err_clear_bit;
2743 
2744 	atmel_port->gpios = mctrl_gpio_init(&atmel_port->uart, 0);
2745 	if (IS_ERR(atmel_port->gpios)) {
2746 		ret = PTR_ERR(atmel_port->gpios);
2747 		goto err_clear_bit;
2748 	}
2749 
2750 	if (!atmel_use_pdc_rx(&atmel_port->uart)) {
2751 		ret = -ENOMEM;
2752 		data = kmalloc_array(ATMEL_SERIAL_RINGSIZE,
2753 				     sizeof(struct atmel_uart_char),
2754 				     GFP_KERNEL);
2755 		if (!data)
2756 			goto err_alloc_ring;
2757 		atmel_port->rx_ring.buf = data;
2758 	}
2759 
2760 	rs485_enabled = atmel_port->uart.rs485.flags & SER_RS485_ENABLED;
2761 
2762 	ret = uart_add_one_port(&atmel_uart, &atmel_port->uart);
2763 	if (ret)
2764 		goto err_add_port;
2765 
2766 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
2767 	if (atmel_is_console_port(&atmel_port->uart)
2768 			&& ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
2769 		/*
2770 		 * The serial core enabled the clock for us, so undo
2771 		 * the clk_prepare_enable() in atmel_console_setup()
2772 		 */
2773 		clk_disable_unprepare(atmel_port->clk);
2774 	}
2775 #endif
2776 
2777 	device_init_wakeup(&pdev->dev, 1);
2778 	platform_set_drvdata(pdev, atmel_port);
2779 
2780 	/*
2781 	 * The peripheral clock has been disabled by atmel_init_port():
2782 	 * enable it before accessing I/O registers
2783 	 */
2784 	clk_prepare_enable(atmel_port->clk);
2785 
2786 	if (rs485_enabled) {
2787 		atmel_uart_writel(&atmel_port->uart, ATMEL_US_MR,
2788 				  ATMEL_US_USMODE_NORMAL);
2789 		atmel_uart_writel(&atmel_port->uart, ATMEL_US_CR,
2790 				  ATMEL_US_RTSEN);
2791 	}
2792 
2793 	/*
2794 	 * Get port name of usart or uart
2795 	 */
2796 	atmel_get_ip_name(&atmel_port->uart);
2797 
2798 	/*
2799 	 * The peripheral clock can now safely be disabled till the port
2800 	 * is used
2801 	 */
2802 	clk_disable_unprepare(atmel_port->clk);
2803 
2804 	return 0;
2805 
2806 err_add_port:
2807 	kfree(atmel_port->rx_ring.buf);
2808 	atmel_port->rx_ring.buf = NULL;
2809 err_alloc_ring:
2810 	if (!atmel_is_console_port(&atmel_port->uart)) {
2811 		clk_put(atmel_port->clk);
2812 		atmel_port->clk = NULL;
2813 	}
2814 err_clear_bit:
2815 	clear_bit(atmel_port->uart.line, atmel_ports_in_use);
2816 err:
2817 	return ret;
2818 }
2819 
2820 /*
2821  * Even if the driver is not modular, it makes sense to be able to
2822  * unbind a device: there can be many bound devices, and there are
2823  * situations where dynamic binding and unbinding can be useful.
2824  *
2825  * For example, a connected device can require a specific firmware update
2826  * protocol that needs bitbanging on IO lines, but use the regular serial
2827  * port in the normal case.
2828  */
2829 static int atmel_serial_remove(struct platform_device *pdev)
2830 {
2831 	struct uart_port *port = platform_get_drvdata(pdev);
2832 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2833 	int ret = 0;
2834 
2835 	tasklet_kill(&atmel_port->tasklet_rx);
2836 	tasklet_kill(&atmel_port->tasklet_tx);
2837 
2838 	device_init_wakeup(&pdev->dev, 0);
2839 
2840 	ret = uart_remove_one_port(&atmel_uart, port);
2841 
2842 	kfree(atmel_port->rx_ring.buf);
2843 
2844 	/* "port" is allocated statically, so we shouldn't free it */
2845 
2846 	clear_bit(port->line, atmel_ports_in_use);
2847 
2848 	clk_put(atmel_port->clk);
2849 	atmel_port->clk = NULL;
2850 	pdev->dev.of_node = NULL;
2851 
2852 	return ret;
2853 }
2854 
2855 static struct platform_driver atmel_serial_driver = {
2856 	.probe		= atmel_serial_probe,
2857 	.remove		= atmel_serial_remove,
2858 	.suspend	= atmel_serial_suspend,
2859 	.resume		= atmel_serial_resume,
2860 	.driver		= {
2861 		.name			= "atmel_usart_serial",
2862 		.of_match_table		= of_match_ptr(atmel_serial_dt_ids),
2863 	},
2864 };
2865 
2866 static int __init atmel_serial_init(void)
2867 {
2868 	int ret;
2869 
2870 	ret = uart_register_driver(&atmel_uart);
2871 	if (ret)
2872 		return ret;
2873 
2874 	ret = platform_driver_register(&atmel_serial_driver);
2875 	if (ret)
2876 		uart_unregister_driver(&atmel_uart);
2877 
2878 	return ret;
2879 }
2880 device_initcall(atmel_serial_init);
2881