xref: /linux/drivers/tty/serial/atmel_serial.c (revision 16cd1c2657762c62a00ac78eecaa25868f7e601b)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *  Driver for Atmel AT91 Serial ports
4  *  Copyright (C) 2003 Rick Bronson
5  *
6  *  Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd.
7  *  Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
8  *
9  *  DMA support added by Chip Coldwell.
10  */
11 #include <linux/circ_buf.h>
12 #include <linux/tty.h>
13 #include <linux/ioport.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/serial.h>
17 #include <linux/clk.h>
18 #include <linux/clk-provider.h>
19 #include <linux/console.h>
20 #include <linux/sysrq.h>
21 #include <linux/tty_flip.h>
22 #include <linux/platform_device.h>
23 #include <linux/of.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dmaengine.h>
26 #include <linux/atmel_pdc.h>
27 #include <linux/uaccess.h>
28 #include <linux/platform_data/atmel.h>
29 #include <linux/timer.h>
30 #include <linux/err.h>
31 #include <linux/irq.h>
32 #include <linux/suspend.h>
33 #include <linux/mm.h>
34 #include <linux/io.h>
35 
36 #include <asm/div64.h>
37 #include <asm/ioctls.h>
38 
39 #define PDC_BUFFER_SIZE		512
40 /* Revisit: We should calculate this based on the actual port settings */
41 #define PDC_RX_TIMEOUT		(3 * 10)		/* 3 bytes */
42 
43 /* The minium number of data FIFOs should be able to contain */
44 #define ATMEL_MIN_FIFO_SIZE	8
45 /*
46  * These two offsets are substracted from the RX FIFO size to define the RTS
47  * high and low thresholds
48  */
49 #define ATMEL_RTS_HIGH_OFFSET	16
50 #define ATMEL_RTS_LOW_OFFSET	20
51 
52 #include <linux/serial_core.h>
53 
54 #include "serial_mctrl_gpio.h"
55 #include "atmel_serial.h"
56 
57 static void atmel_start_rx(struct uart_port *port);
58 static void atmel_stop_rx(struct uart_port *port);
59 
60 #ifdef CONFIG_SERIAL_ATMEL_TTYAT
61 
62 /* Use device name ttyAT, major 204 and minor 154-169.  This is necessary if we
63  * should coexist with the 8250 driver, such as if we have an external 16C550
64  * UART. */
65 #define SERIAL_ATMEL_MAJOR	204
66 #define MINOR_START		154
67 #define ATMEL_DEVICENAME	"ttyAT"
68 
69 #else
70 
71 /* Use device name ttyS, major 4, minor 64-68.  This is the usual serial port
72  * name, but it is legally reserved for the 8250 driver. */
73 #define SERIAL_ATMEL_MAJOR	TTY_MAJOR
74 #define MINOR_START		64
75 #define ATMEL_DEVICENAME	"ttyS"
76 
77 #endif
78 
79 #define ATMEL_ISR_PASS_LIMIT	256
80 
81 struct atmel_dma_buffer {
82 	unsigned char	*buf;
83 	dma_addr_t	dma_addr;
84 	unsigned int	dma_size;
85 	unsigned int	ofs;
86 };
87 
88 struct atmel_uart_char {
89 	u16		status;
90 	u16		ch;
91 };
92 
93 /*
94  * Be careful, the real size of the ring buffer is
95  * sizeof(atmel_uart_char) * ATMEL_SERIAL_RINGSIZE. It means that ring buffer
96  * can contain up to 1024 characters in PIO mode and up to 4096 characters in
97  * DMA mode.
98  */
99 #define ATMEL_SERIAL_RINGSIZE	1024
100 #define ATMEL_SERIAL_RX_SIZE	array_size(sizeof(struct atmel_uart_char), \
101 					   ATMEL_SERIAL_RINGSIZE)
102 
103 /*
104  * at91: 6 USARTs and one DBGU port (SAM9260)
105  * samx7: 3 USARTs and 5 UARTs
106  */
107 #define ATMEL_MAX_UART		8
108 
109 /*
110  * We wrap our port structure around the generic uart_port.
111  */
112 struct atmel_uart_port {
113 	struct uart_port	uart;		/* uart */
114 	struct clk		*clk;		/* uart clock */
115 	struct clk		*gclk;		/* uart generic clock */
116 	int			may_wakeup;	/* cached value of device_may_wakeup for times we need to disable it */
117 	u32			backup_imr;	/* IMR saved during suspend */
118 	int			break_active;	/* break being received */
119 
120 	bool			use_dma_rx;	/* enable DMA receiver */
121 	bool			use_pdc_rx;	/* enable PDC receiver */
122 	short			pdc_rx_idx;	/* current PDC RX buffer */
123 	struct atmel_dma_buffer	pdc_rx[2];	/* PDC receier */
124 
125 	bool			use_dma_tx;     /* enable DMA transmitter */
126 	bool			use_pdc_tx;	/* enable PDC transmitter */
127 	struct atmel_dma_buffer	pdc_tx;		/* PDC transmitter */
128 
129 	spinlock_t			lock_tx;	/* port lock */
130 	spinlock_t			lock_rx;	/* port lock */
131 	struct dma_chan			*chan_tx;
132 	struct dma_chan			*chan_rx;
133 	struct dma_async_tx_descriptor	*desc_tx;
134 	struct dma_async_tx_descriptor	*desc_rx;
135 	dma_cookie_t			cookie_tx;
136 	dma_cookie_t			cookie_rx;
137 	dma_addr_t			tx_phys;
138 	dma_addr_t			rx_phys;
139 	struct tasklet_struct	tasklet_rx;
140 	struct tasklet_struct	tasklet_tx;
141 	atomic_t		tasklet_shutdown;
142 	unsigned int		irq_status_prev;
143 	unsigned int		tx_len;
144 
145 	struct circ_buf		rx_ring;
146 
147 	struct mctrl_gpios	*gpios;
148 	u32			backup_mode;	/* MR saved during iso7816 operations */
149 	u32			backup_brgr;	/* BRGR saved during iso7816 operations */
150 	unsigned int		tx_done_mask;
151 	u32			fifo_size;
152 	u32			rts_high;
153 	u32			rts_low;
154 	bool			ms_irq_enabled;
155 	u32			rtor;	/* address of receiver timeout register if it exists */
156 	bool			is_usart;
157 	bool			has_frac_baudrate;
158 	bool			has_hw_timer;
159 	struct timer_list	uart_timer;
160 
161 	bool			tx_stopped;
162 	bool			suspended;
163 	unsigned int		pending;
164 	unsigned int		pending_status;
165 	spinlock_t		lock_suspended;
166 
167 	bool			hd_start_rx;	/* can start RX during half-duplex operation */
168 
169 	/* ISO7816 */
170 	unsigned int		fidi_min;
171 	unsigned int		fidi_max;
172 
173 	struct {
174 		u32		cr;
175 		u32		mr;
176 		u32		imr;
177 		u32		brgr;
178 		u32		rtor;
179 		u32		ttgr;
180 		u32		fmr;
181 		u32		fimr;
182 	} cache;
183 
184 	int (*prepare_rx)(struct uart_port *port);
185 	int (*prepare_tx)(struct uart_port *port);
186 	void (*schedule_rx)(struct uart_port *port);
187 	void (*schedule_tx)(struct uart_port *port);
188 	void (*release_rx)(struct uart_port *port);
189 	void (*release_tx)(struct uart_port *port);
190 };
191 
192 static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
193 static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
194 
195 #if defined(CONFIG_OF)
196 static const struct of_device_id atmel_serial_dt_ids[] = {
197 	{ .compatible = "atmel,at91rm9200-usart-serial" },
198 	{ /* sentinel */ }
199 };
200 #endif
201 
202 static inline struct atmel_uart_port *
to_atmel_uart_port(struct uart_port * uart)203 to_atmel_uart_port(struct uart_port *uart)
204 {
205 	return container_of(uart, struct atmel_uart_port, uart);
206 }
207 
atmel_uart_readl(struct uart_port * port,u32 reg)208 static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg)
209 {
210 	return __raw_readl(port->membase + reg);
211 }
212 
atmel_uart_writel(struct uart_port * port,u32 reg,u32 value)213 static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value)
214 {
215 	__raw_writel(value, port->membase + reg);
216 }
217 
atmel_uart_read_char(struct uart_port * port)218 static inline u8 atmel_uart_read_char(struct uart_port *port)
219 {
220 	return __raw_readb(port->membase + ATMEL_US_RHR);
221 }
222 
atmel_uart_write_char(struct uart_port * port,u8 value)223 static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
224 {
225 	__raw_writeb(value, port->membase + ATMEL_US_THR);
226 }
227 
atmel_uart_is_half_duplex(struct uart_port * port)228 static inline int atmel_uart_is_half_duplex(struct uart_port *port)
229 {
230 	return ((port->rs485.flags & SER_RS485_ENABLED) &&
231 		!(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
232 		(port->iso7816.flags & SER_ISO7816_ENABLED);
233 }
234 
atmel_error_rate(int desired_value,int actual_value)235 static inline int atmel_error_rate(int desired_value, int actual_value)
236 {
237 	return 100 - (desired_value * 100) / actual_value;
238 }
239 
240 #ifdef CONFIG_SERIAL_ATMEL_PDC
atmel_use_pdc_rx(struct uart_port * port)241 static bool atmel_use_pdc_rx(struct uart_port *port)
242 {
243 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
244 
245 	return atmel_port->use_pdc_rx;
246 }
247 
atmel_use_pdc_tx(struct uart_port * port)248 static bool atmel_use_pdc_tx(struct uart_port *port)
249 {
250 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
251 
252 	return atmel_port->use_pdc_tx;
253 }
254 #else
atmel_use_pdc_rx(struct uart_port * port)255 static bool atmel_use_pdc_rx(struct uart_port *port)
256 {
257 	return false;
258 }
259 
atmel_use_pdc_tx(struct uart_port * port)260 static bool atmel_use_pdc_tx(struct uart_port *port)
261 {
262 	return false;
263 }
264 #endif
265 
atmel_use_dma_tx(struct uart_port * port)266 static bool atmel_use_dma_tx(struct uart_port *port)
267 {
268 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
269 
270 	return atmel_port->use_dma_tx;
271 }
272 
atmel_use_dma_rx(struct uart_port * port)273 static bool atmel_use_dma_rx(struct uart_port *port)
274 {
275 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
276 
277 	return atmel_port->use_dma_rx;
278 }
279 
atmel_use_fifo(struct uart_port * port)280 static bool atmel_use_fifo(struct uart_port *port)
281 {
282 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
283 
284 	return atmel_port->fifo_size;
285 }
286 
atmel_tasklet_schedule(struct atmel_uart_port * atmel_port,struct tasklet_struct * t)287 static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port,
288 				   struct tasklet_struct *t)
289 {
290 	if (!atomic_read(&atmel_port->tasklet_shutdown))
291 		tasklet_schedule(t);
292 }
293 
294 /* Enable or disable the rs485 support */
atmel_config_rs485(struct uart_port * port,struct ktermios * termios,struct serial_rs485 * rs485conf)295 static int atmel_config_rs485(struct uart_port *port, struct ktermios *termios,
296 			      struct serial_rs485 *rs485conf)
297 {
298 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
299 	unsigned int mode;
300 
301 	/* Disable interrupts */
302 	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
303 
304 	mode = atmel_uart_readl(port, ATMEL_US_MR);
305 
306 	if (rs485conf->flags & SER_RS485_ENABLED) {
307 		dev_dbg(port->dev, "Setting UART to RS485\n");
308 		if (rs485conf->flags & SER_RS485_RX_DURING_TX)
309 			atmel_port->tx_done_mask = ATMEL_US_TXRDY;
310 		else
311 			atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
312 
313 		atmel_uart_writel(port, ATMEL_US_TTGR,
314 				  rs485conf->delay_rts_after_send);
315 		mode &= ~ATMEL_US_USMODE;
316 		mode |= ATMEL_US_USMODE_RS485;
317 	} else {
318 		dev_dbg(port->dev, "Setting UART to RS232\n");
319 		if (atmel_use_pdc_tx(port))
320 			atmel_port->tx_done_mask = ATMEL_US_ENDTX |
321 				ATMEL_US_TXBUFE;
322 		else
323 			atmel_port->tx_done_mask = ATMEL_US_TXRDY;
324 	}
325 	atmel_uart_writel(port, ATMEL_US_MR, mode);
326 
327 	/* Enable interrupts */
328 	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
329 
330 	return 0;
331 }
332 
atmel_calc_cd(struct uart_port * port,struct serial_iso7816 * iso7816conf)333 static unsigned int atmel_calc_cd(struct uart_port *port,
334 				  struct serial_iso7816 *iso7816conf)
335 {
336 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
337 	unsigned int cd;
338 	u64 mck_rate;
339 
340 	mck_rate = (u64)clk_get_rate(atmel_port->clk);
341 	do_div(mck_rate, iso7816conf->clk);
342 	cd = mck_rate;
343 	return cd;
344 }
345 
atmel_calc_fidi(struct uart_port * port,struct serial_iso7816 * iso7816conf)346 static unsigned int atmel_calc_fidi(struct uart_port *port,
347 				    struct serial_iso7816 *iso7816conf)
348 {
349 	u64 fidi = 0;
350 
351 	if (iso7816conf->sc_fi && iso7816conf->sc_di) {
352 		fidi = (u64)iso7816conf->sc_fi;
353 		do_div(fidi, iso7816conf->sc_di);
354 	}
355 	return (u32)fidi;
356 }
357 
358 /* Enable or disable the iso7816 support */
359 /* Called with interrupts disabled */
atmel_config_iso7816(struct uart_port * port,struct serial_iso7816 * iso7816conf)360 static int atmel_config_iso7816(struct uart_port *port,
361 				struct serial_iso7816 *iso7816conf)
362 {
363 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
364 	unsigned int mode;
365 	unsigned int cd, fidi;
366 	int ret = 0;
367 
368 	/* Disable interrupts */
369 	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
370 
371 	mode = atmel_uart_readl(port, ATMEL_US_MR);
372 
373 	if (iso7816conf->flags & SER_ISO7816_ENABLED) {
374 		mode &= ~ATMEL_US_USMODE;
375 
376 		if (iso7816conf->tg > 255) {
377 			dev_err(port->dev, "ISO7816: Timeguard exceeding 255\n");
378 			memset(iso7816conf, 0, sizeof(struct serial_iso7816));
379 			ret = -EINVAL;
380 			goto err_out;
381 		}
382 
383 		if ((iso7816conf->flags & SER_ISO7816_T_PARAM)
384 		    == SER_ISO7816_T(0)) {
385 			mode |= ATMEL_US_USMODE_ISO7816_T0 | ATMEL_US_DSNACK;
386 		} else if ((iso7816conf->flags & SER_ISO7816_T_PARAM)
387 			   == SER_ISO7816_T(1)) {
388 			mode |= ATMEL_US_USMODE_ISO7816_T1 | ATMEL_US_INACK;
389 		} else {
390 			dev_err(port->dev, "ISO7816: Type not supported\n");
391 			memset(iso7816conf, 0, sizeof(struct serial_iso7816));
392 			ret = -EINVAL;
393 			goto err_out;
394 		}
395 
396 		mode &= ~(ATMEL_US_USCLKS | ATMEL_US_NBSTOP | ATMEL_US_PAR);
397 
398 		/* select mck clock, and output  */
399 		mode |= ATMEL_US_USCLKS_MCK | ATMEL_US_CLKO;
400 		/* set parity for normal/inverse mode + max iterations */
401 		mode |= ATMEL_US_PAR_EVEN | ATMEL_US_NBSTOP_1 | ATMEL_US_MAX_ITER(3);
402 
403 		cd = atmel_calc_cd(port, iso7816conf);
404 		fidi = atmel_calc_fidi(port, iso7816conf);
405 		if (fidi == 0) {
406 			dev_warn(port->dev, "ISO7816 fidi = 0, Generator generates no signal\n");
407 		} else if (fidi < atmel_port->fidi_min
408 			   || fidi > atmel_port->fidi_max) {
409 			dev_err(port->dev, "ISO7816 fidi = %u, value not supported\n", fidi);
410 			memset(iso7816conf, 0, sizeof(struct serial_iso7816));
411 			ret = -EINVAL;
412 			goto err_out;
413 		}
414 
415 		if (!(port->iso7816.flags & SER_ISO7816_ENABLED)) {
416 			/* port not yet in iso7816 mode: store configuration */
417 			atmel_port->backup_mode = atmel_uart_readl(port, ATMEL_US_MR);
418 			atmel_port->backup_brgr = atmel_uart_readl(port, ATMEL_US_BRGR);
419 		}
420 
421 		atmel_uart_writel(port, ATMEL_US_TTGR, iso7816conf->tg);
422 		atmel_uart_writel(port, ATMEL_US_BRGR, cd);
423 		atmel_uart_writel(port, ATMEL_US_FIDI, fidi);
424 
425 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXEN);
426 		atmel_port->tx_done_mask = ATMEL_US_TXEMPTY | ATMEL_US_NACK | ATMEL_US_ITERATION;
427 	} else {
428 		dev_dbg(port->dev, "Setting UART back to RS232\n");
429 		/* back to last RS232 settings */
430 		mode = atmel_port->backup_mode;
431 		memset(iso7816conf, 0, sizeof(struct serial_iso7816));
432 		atmel_uart_writel(port, ATMEL_US_TTGR, 0);
433 		atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->backup_brgr);
434 		atmel_uart_writel(port, ATMEL_US_FIDI, 0x174);
435 
436 		if (atmel_use_pdc_tx(port))
437 			atmel_port->tx_done_mask = ATMEL_US_ENDTX |
438 						   ATMEL_US_TXBUFE;
439 		else
440 			atmel_port->tx_done_mask = ATMEL_US_TXRDY;
441 	}
442 
443 	port->iso7816 = *iso7816conf;
444 
445 	atmel_uart_writel(port, ATMEL_US_MR, mode);
446 
447 err_out:
448 	/* Enable interrupts */
449 	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
450 
451 	return ret;
452 }
453 
454 /*
455  * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
456  */
atmel_tx_empty(struct uart_port * port)457 static u_int atmel_tx_empty(struct uart_port *port)
458 {
459 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
460 
461 	if (atmel_port->tx_stopped)
462 		return TIOCSER_TEMT;
463 	return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ?
464 		TIOCSER_TEMT :
465 		0;
466 }
467 
468 /*
469  * Set state of the modem control output lines
470  */
atmel_set_mctrl(struct uart_port * port,u_int mctrl)471 static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
472 {
473 	unsigned int control = 0;
474 	unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR);
475 	unsigned int rts_paused, rts_ready;
476 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
477 
478 	/* override mode to RS485 if needed, otherwise keep the current mode */
479 	if (port->rs485.flags & SER_RS485_ENABLED) {
480 		atmel_uart_writel(port, ATMEL_US_TTGR,
481 				  port->rs485.delay_rts_after_send);
482 		mode &= ~ATMEL_US_USMODE;
483 		mode |= ATMEL_US_USMODE_RS485;
484 	}
485 
486 	/* set the RTS line state according to the mode */
487 	if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
488 		/* force RTS line to high level */
489 		rts_paused = ATMEL_US_RTSEN;
490 
491 		/* give the control of the RTS line back to the hardware */
492 		rts_ready = ATMEL_US_RTSDIS;
493 	} else {
494 		/* force RTS line to high level */
495 		rts_paused = ATMEL_US_RTSDIS;
496 
497 		/* force RTS line to low level */
498 		rts_ready = ATMEL_US_RTSEN;
499 	}
500 
501 	if (mctrl & TIOCM_RTS)
502 		control |= rts_ready;
503 	else
504 		control |= rts_paused;
505 
506 	if (mctrl & TIOCM_DTR)
507 		control |= ATMEL_US_DTREN;
508 	else
509 		control |= ATMEL_US_DTRDIS;
510 
511 	atmel_uart_writel(port, ATMEL_US_CR, control);
512 
513 	mctrl_gpio_set(atmel_port->gpios, mctrl);
514 
515 	/* Local loopback mode? */
516 	mode &= ~ATMEL_US_CHMODE;
517 	if (mctrl & TIOCM_LOOP)
518 		mode |= ATMEL_US_CHMODE_LOC_LOOP;
519 	else
520 		mode |= ATMEL_US_CHMODE_NORMAL;
521 
522 	atmel_uart_writel(port, ATMEL_US_MR, mode);
523 }
524 
525 /*
526  * Get state of the modem control input lines
527  */
atmel_get_mctrl(struct uart_port * port)528 static u_int atmel_get_mctrl(struct uart_port *port)
529 {
530 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
531 	unsigned int ret = 0, status;
532 
533 	status = atmel_uart_readl(port, ATMEL_US_CSR);
534 
535 	/*
536 	 * The control signals are active low.
537 	 */
538 	if (!(status & ATMEL_US_DCD))
539 		ret |= TIOCM_CD;
540 	if (!(status & ATMEL_US_CTS))
541 		ret |= TIOCM_CTS;
542 	if (!(status & ATMEL_US_DSR))
543 		ret |= TIOCM_DSR;
544 	if (!(status & ATMEL_US_RI))
545 		ret |= TIOCM_RI;
546 
547 	return mctrl_gpio_get(atmel_port->gpios, &ret);
548 }
549 
550 /*
551  * Stop transmitting.
552  */
atmel_stop_tx(struct uart_port * port)553 static void atmel_stop_tx(struct uart_port *port)
554 {
555 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
556 	bool is_pdc = atmel_use_pdc_tx(port);
557 	bool is_dma = is_pdc || atmel_use_dma_tx(port);
558 
559 	if (is_pdc) {
560 		/* disable PDC transmit */
561 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
562 	}
563 
564 	if (is_dma) {
565 		/*
566 		 * Disable the transmitter.
567 		 * This is mandatory when DMA is used, otherwise the DMA buffer
568 		 * is fully transmitted.
569 		 */
570 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS);
571 		atmel_port->tx_stopped = true;
572 	}
573 
574 	/* Disable interrupts */
575 	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
576 
577 	if (atmel_uart_is_half_duplex(port))
578 		if (!atomic_read(&atmel_port->tasklet_shutdown))
579 			atmel_start_rx(port);
580 }
581 
582 /*
583  * Start transmitting.
584  */
atmel_start_tx(struct uart_port * port)585 static void atmel_start_tx(struct uart_port *port)
586 {
587 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
588 	bool is_pdc = atmel_use_pdc_tx(port);
589 	bool is_dma = is_pdc || atmel_use_dma_tx(port);
590 
591 	if (is_pdc && (atmel_uart_readl(port, ATMEL_PDC_PTSR)
592 				       & ATMEL_PDC_TXTEN))
593 		/* The transmitter is already running.  Yes, we
594 		   really need this.*/
595 		return;
596 
597 	if (is_dma && atmel_uart_is_half_duplex(port))
598 		atmel_stop_rx(port);
599 
600 	if (is_pdc) {
601 		/* re-enable PDC transmit */
602 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
603 	}
604 
605 	/* Enable interrupts */
606 	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
607 
608 	if (is_dma) {
609 		/* re-enable the transmitter */
610 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
611 		atmel_port->tx_stopped = false;
612 	}
613 }
614 
615 /*
616  * start receiving - port is in process of being opened.
617  */
atmel_start_rx(struct uart_port * port)618 static void atmel_start_rx(struct uart_port *port)
619 {
620 	/* reset status and receiver */
621 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
622 
623 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN);
624 
625 	if (atmel_use_pdc_rx(port)) {
626 		/* enable PDC controller */
627 		atmel_uart_writel(port, ATMEL_US_IER,
628 				  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
629 				  port->read_status_mask);
630 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
631 	} else {
632 		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
633 	}
634 }
635 
636 /*
637  * Stop receiving - port is in process of being closed.
638  */
atmel_stop_rx(struct uart_port * port)639 static void atmel_stop_rx(struct uart_port *port)
640 {
641 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS);
642 
643 	if (atmel_use_pdc_rx(port)) {
644 		/* disable PDC receive */
645 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS);
646 		atmel_uart_writel(port, ATMEL_US_IDR,
647 				  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
648 				  port->read_status_mask);
649 	} else {
650 		atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY);
651 	}
652 }
653 
654 /*
655  * Enable modem status interrupts
656  */
atmel_enable_ms(struct uart_port * port)657 static void atmel_enable_ms(struct uart_port *port)
658 {
659 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
660 	uint32_t ier = 0;
661 
662 	/*
663 	 * Interrupt should not be enabled twice
664 	 */
665 	if (atmel_port->ms_irq_enabled)
666 		return;
667 
668 	atmel_port->ms_irq_enabled = true;
669 
670 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
671 		ier |= ATMEL_US_CTSIC;
672 
673 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR))
674 		ier |= ATMEL_US_DSRIC;
675 
676 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI))
677 		ier |= ATMEL_US_RIIC;
678 
679 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD))
680 		ier |= ATMEL_US_DCDIC;
681 
682 	atmel_uart_writel(port, ATMEL_US_IER, ier);
683 
684 	mctrl_gpio_enable_ms(atmel_port->gpios);
685 }
686 
687 /*
688  * Disable modem status interrupts
689  */
atmel_disable_ms(struct uart_port * port)690 static void atmel_disable_ms(struct uart_port *port)
691 {
692 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
693 	uint32_t idr = 0;
694 
695 	/*
696 	 * Interrupt should not be disabled twice
697 	 */
698 	if (!atmel_port->ms_irq_enabled)
699 		return;
700 
701 	atmel_port->ms_irq_enabled = false;
702 
703 	mctrl_gpio_disable_ms_no_sync(atmel_port->gpios);
704 
705 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
706 		idr |= ATMEL_US_CTSIC;
707 
708 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR))
709 		idr |= ATMEL_US_DSRIC;
710 
711 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI))
712 		idr |= ATMEL_US_RIIC;
713 
714 	if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD))
715 		idr |= ATMEL_US_DCDIC;
716 
717 	atmel_uart_writel(port, ATMEL_US_IDR, idr);
718 }
719 
720 /*
721  * Control the transmission of a break signal
722  */
atmel_break_ctl(struct uart_port * port,int break_state)723 static void atmel_break_ctl(struct uart_port *port, int break_state)
724 {
725 	if (break_state != 0)
726 		/* start break */
727 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK);
728 	else
729 		/* stop break */
730 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK);
731 }
732 
733 /*
734  * Stores the incoming character in the ring buffer
735  */
736 static void
atmel_buffer_rx_char(struct uart_port * port,unsigned int status,unsigned int ch)737 atmel_buffer_rx_char(struct uart_port *port, unsigned int status,
738 		     unsigned int ch)
739 {
740 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
741 	struct circ_buf *ring = &atmel_port->rx_ring;
742 	struct atmel_uart_char *c;
743 
744 	if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE))
745 		/* Buffer overflow, ignore char */
746 		return;
747 
748 	c = &((struct atmel_uart_char *)ring->buf)[ring->head];
749 	c->status	= status;
750 	c->ch		= ch;
751 
752 	/* Make sure the character is stored before we update head. */
753 	smp_wmb();
754 
755 	ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
756 }
757 
758 /*
759  * Deal with parity, framing and overrun errors.
760  */
atmel_pdc_rxerr(struct uart_port * port,unsigned int status)761 static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status)
762 {
763 	/* clear error */
764 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
765 
766 	if (status & ATMEL_US_RXBRK) {
767 		/* ignore side-effect */
768 		status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
769 		port->icount.brk++;
770 	}
771 	if (status & ATMEL_US_PARE)
772 		port->icount.parity++;
773 	if (status & ATMEL_US_FRAME)
774 		port->icount.frame++;
775 	if (status & ATMEL_US_OVRE)
776 		port->icount.overrun++;
777 }
778 
779 /*
780  * Characters received (called from interrupt handler)
781  */
atmel_rx_chars(struct uart_port * port)782 static void atmel_rx_chars(struct uart_port *port)
783 {
784 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
785 	unsigned int status, ch;
786 
787 	status = atmel_uart_readl(port, ATMEL_US_CSR);
788 	while (status & ATMEL_US_RXRDY) {
789 		ch = atmel_uart_read_char(port);
790 
791 		/*
792 		 * note that the error handling code is
793 		 * out of the main execution path
794 		 */
795 		if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
796 				       | ATMEL_US_OVRE | ATMEL_US_RXBRK)
797 			     || atmel_port->break_active)) {
798 
799 			/* clear error */
800 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
801 
802 			if (status & ATMEL_US_RXBRK
803 			    && !atmel_port->break_active) {
804 				atmel_port->break_active = 1;
805 				atmel_uart_writel(port, ATMEL_US_IER,
806 						  ATMEL_US_RXBRK);
807 			} else {
808 				/*
809 				 * This is either the end-of-break
810 				 * condition or we've received at
811 				 * least one character without RXBRK
812 				 * being set. In both cases, the next
813 				 * RXBRK will indicate start-of-break.
814 				 */
815 				atmel_uart_writel(port, ATMEL_US_IDR,
816 						  ATMEL_US_RXBRK);
817 				status &= ~ATMEL_US_RXBRK;
818 				atmel_port->break_active = 0;
819 			}
820 		}
821 
822 		atmel_buffer_rx_char(port, status, ch);
823 		status = atmel_uart_readl(port, ATMEL_US_CSR);
824 	}
825 
826 	atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
827 }
828 
829 /*
830  * Transmit characters (called from tasklet with TXRDY interrupt
831  * disabled)
832  */
atmel_tx_chars(struct uart_port * port)833 static void atmel_tx_chars(struct uart_port *port)
834 {
835 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
836 	bool pending;
837 	u8 ch;
838 
839 	pending = uart_port_tx(port, ch,
840 		atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY,
841 		atmel_uart_write_char(port, ch));
842 	if (pending) {
843 		/* we still have characters to transmit, so we should continue
844 		 * transmitting them when TX is ready, regardless of
845 		 * mode or duplexity
846 		 */
847 		atmel_port->tx_done_mask |= ATMEL_US_TXRDY;
848 
849 		/* Enable interrupts */
850 		atmel_uart_writel(port, ATMEL_US_IER,
851 				  atmel_port->tx_done_mask);
852 	} else {
853 		if (atmel_uart_is_half_duplex(port))
854 			atmel_port->tx_done_mask &= ~ATMEL_US_TXRDY;
855 	}
856 }
857 
atmel_complete_tx_dma(void * arg)858 static void atmel_complete_tx_dma(void *arg)
859 {
860 	struct atmel_uart_port *atmel_port = arg;
861 	struct uart_port *port = &atmel_port->uart;
862 	struct tty_port *tport = &port->state->port;
863 	struct dma_chan *chan = atmel_port->chan_tx;
864 	unsigned long flags;
865 
866 	uart_port_lock_irqsave(port, &flags);
867 
868 	if (chan)
869 		dmaengine_terminate_all(chan);
870 	uart_xmit_advance(port, atmel_port->tx_len);
871 
872 	spin_lock(&atmel_port->lock_tx);
873 	async_tx_ack(atmel_port->desc_tx);
874 	atmel_port->cookie_tx = -EINVAL;
875 	atmel_port->desc_tx = NULL;
876 	spin_unlock(&atmel_port->lock_tx);
877 
878 	if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS)
879 		uart_write_wakeup(port);
880 
881 	/*
882 	 * xmit is a circular buffer so, if we have just send data from the
883 	 * tail to the end, now we have to transmit the remaining data from the
884 	 * beginning to the head.
885 	 */
886 	if (!kfifo_is_empty(&tport->xmit_fifo))
887 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
888 	else if (atmel_uart_is_half_duplex(port)) {
889 		/*
890 		 * DMA done, re-enable TXEMPTY and signal that we can stop
891 		 * TX and start RX for RS485
892 		 */
893 		atmel_port->hd_start_rx = true;
894 		atmel_uart_writel(port, ATMEL_US_IER,
895 				  atmel_port->tx_done_mask);
896 	}
897 
898 	uart_port_unlock_irqrestore(port, flags);
899 }
900 
atmel_release_tx_dma(struct uart_port * port)901 static void atmel_release_tx_dma(struct uart_port *port)
902 {
903 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
904 	struct dma_chan *chan = atmel_port->chan_tx;
905 
906 	if (chan) {
907 		dmaengine_terminate_all(chan);
908 		dma_release_channel(chan);
909 		dma_unmap_single(port->dev, atmel_port->tx_phys,
910 				 UART_XMIT_SIZE, DMA_TO_DEVICE);
911 	}
912 
913 	atmel_port->desc_tx = NULL;
914 	atmel_port->chan_tx = NULL;
915 	atmel_port->cookie_tx = -EINVAL;
916 }
917 
918 /*
919  * Called from tasklet with TXRDY interrupt is disabled.
920  */
atmel_tx_dma(struct uart_port * port)921 static void atmel_tx_dma(struct uart_port *port)
922 {
923 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
924 	struct tty_port *tport = &port->state->port;
925 	struct dma_chan *chan = atmel_port->chan_tx;
926 	struct dma_async_tx_descriptor *desc;
927 	struct scatterlist sgl[2], *sg;
928 	unsigned int tx_len, tail, part1_len, part2_len, sg_len;
929 	dma_addr_t phys_addr;
930 
931 	/* Make sure we have an idle channel */
932 	if (atmel_port->desc_tx != NULL)
933 		return;
934 
935 	if (!kfifo_is_empty(&tport->xmit_fifo) && !uart_tx_stopped(port)) {
936 		/*
937 		 * DMA is idle now.
938 		 * Port xmit buffer is already mapped,
939 		 * and it is one page... Just adjust
940 		 * offsets and lengths. Since it is a circular buffer,
941 		 * we have to transmit till the end, and then the rest.
942 		 * Take the port lock to get a
943 		 * consistent xmit buffer state.
944 		 */
945 		tx_len = kfifo_out_linear(&tport->xmit_fifo, &tail,
946 				UART_XMIT_SIZE);
947 
948 		if (atmel_port->fifo_size) {
949 			/* multi data mode */
950 			part1_len = (tx_len & ~0x3); /* DWORD access */
951 			part2_len = (tx_len & 0x3); /* BYTE access */
952 		} else {
953 			/* single data (legacy) mode */
954 			part1_len = 0;
955 			part2_len = tx_len; /* BYTE access only */
956 		}
957 
958 		sg_init_table(sgl, 2);
959 		sg_len = 0;
960 		phys_addr = atmel_port->tx_phys + tail;
961 		if (part1_len) {
962 			sg = &sgl[sg_len++];
963 			sg_dma_address(sg) = phys_addr;
964 			sg_dma_len(sg) = part1_len;
965 
966 			phys_addr += part1_len;
967 		}
968 
969 		if (part2_len) {
970 			sg = &sgl[sg_len++];
971 			sg_dma_address(sg) = phys_addr;
972 			sg_dma_len(sg) = part2_len;
973 		}
974 
975 		/*
976 		 * save tx_len so atmel_complete_tx_dma() will increase
977 		 * tail correctly
978 		 */
979 		atmel_port->tx_len = tx_len;
980 
981 		desc = dmaengine_prep_slave_sg(chan,
982 					       sgl,
983 					       sg_len,
984 					       DMA_MEM_TO_DEV,
985 					       DMA_PREP_INTERRUPT |
986 					       DMA_CTRL_ACK);
987 		if (!desc) {
988 			dev_err(port->dev, "Failed to send via dma!\n");
989 			return;
990 		}
991 
992 		dma_sync_single_for_device(port->dev, atmel_port->tx_phys,
993 					   UART_XMIT_SIZE, DMA_TO_DEVICE);
994 
995 		atmel_port->desc_tx = desc;
996 		desc->callback = atmel_complete_tx_dma;
997 		desc->callback_param = atmel_port;
998 		atmel_port->cookie_tx = dmaengine_submit(desc);
999 		if (dma_submit_error(atmel_port->cookie_tx)) {
1000 			dev_err(port->dev, "dma_submit_error %d\n",
1001 				atmel_port->cookie_tx);
1002 			return;
1003 		}
1004 
1005 		dma_async_issue_pending(chan);
1006 	}
1007 
1008 	if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS)
1009 		uart_write_wakeup(port);
1010 }
1011 
atmel_prepare_tx_dma(struct uart_port * port)1012 static int atmel_prepare_tx_dma(struct uart_port *port)
1013 {
1014 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1015 	struct tty_port *tport = &port->state->port;
1016 	struct device *mfd_dev = port->dev->parent;
1017 	dma_cap_mask_t		mask;
1018 	struct dma_slave_config config;
1019 	struct dma_chan *chan;
1020 	int ret;
1021 
1022 	dma_cap_zero(mask);
1023 	dma_cap_set(DMA_SLAVE, mask);
1024 
1025 	chan = dma_request_chan(mfd_dev, "tx");
1026 	if (IS_ERR(chan)) {
1027 		atmel_port->chan_tx = NULL;
1028 		goto chan_err;
1029 	}
1030 	atmel_port->chan_tx = chan;
1031 	dev_info(port->dev, "using %s for tx DMA transfers\n",
1032 		dma_chan_name(atmel_port->chan_tx));
1033 
1034 	spin_lock_init(&atmel_port->lock_tx);
1035 	/* UART circular tx buffer is an aligned page. */
1036 	BUG_ON(!PAGE_ALIGNED(tport->xmit_buf));
1037 	atmel_port->tx_phys = dma_map_single(port->dev, tport->xmit_buf,
1038 					     UART_XMIT_SIZE, DMA_TO_DEVICE);
1039 
1040 	if (dma_mapping_error(port->dev, atmel_port->tx_phys)) {
1041 		dev_dbg(port->dev, "need to release resource of dma\n");
1042 		goto chan_err;
1043 	} else {
1044 		dev_dbg(port->dev, "%s: mapped %lu@%p to %pad\n", __func__,
1045 			UART_XMIT_SIZE, tport->xmit_buf,
1046 			&atmel_port->tx_phys);
1047 	}
1048 
1049 	/* Configure the slave DMA */
1050 	memset(&config, 0, sizeof(config));
1051 	config.direction = DMA_MEM_TO_DEV;
1052 	config.dst_addr_width = (atmel_port->fifo_size) ?
1053 				DMA_SLAVE_BUSWIDTH_4_BYTES :
1054 				DMA_SLAVE_BUSWIDTH_1_BYTE;
1055 	config.dst_addr = port->mapbase + ATMEL_US_THR;
1056 	config.dst_maxburst = 1;
1057 
1058 	ret = dmaengine_slave_config(atmel_port->chan_tx,
1059 				     &config);
1060 	if (ret) {
1061 		dev_err(port->dev, "DMA tx slave configuration failed\n");
1062 		goto chan_err;
1063 	}
1064 
1065 	return 0;
1066 
1067 chan_err:
1068 	dev_err(port->dev, "TX channel not available, switch to pio\n");
1069 	atmel_port->use_dma_tx = false;
1070 	if (atmel_port->chan_tx)
1071 		atmel_release_tx_dma(port);
1072 	return -EINVAL;
1073 }
1074 
atmel_complete_rx_dma(void * arg)1075 static void atmel_complete_rx_dma(void *arg)
1076 {
1077 	struct uart_port *port = arg;
1078 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1079 
1080 	atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
1081 }
1082 
atmel_release_rx_dma(struct uart_port * port)1083 static void atmel_release_rx_dma(struct uart_port *port)
1084 {
1085 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1086 	struct dma_chan *chan = atmel_port->chan_rx;
1087 
1088 	if (chan) {
1089 		dmaengine_terminate_all(chan);
1090 		dma_release_channel(chan);
1091 		dma_unmap_single(port->dev, atmel_port->rx_phys,
1092 				 ATMEL_SERIAL_RX_SIZE, DMA_FROM_DEVICE);
1093 	}
1094 
1095 	atmel_port->desc_rx = NULL;
1096 	atmel_port->chan_rx = NULL;
1097 	atmel_port->cookie_rx = -EINVAL;
1098 }
1099 
atmel_rx_from_dma(struct uart_port * port)1100 static void atmel_rx_from_dma(struct uart_port *port)
1101 {
1102 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1103 	struct tty_port *tport = &port->state->port;
1104 	struct circ_buf *ring = &atmel_port->rx_ring;
1105 	struct dma_chan *chan = atmel_port->chan_rx;
1106 	struct dma_tx_state state;
1107 	enum dma_status dmastat;
1108 	size_t count;
1109 
1110 
1111 	/* Reset the UART timeout early so that we don't miss one */
1112 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1113 	dmastat = dmaengine_tx_status(chan,
1114 				atmel_port->cookie_rx,
1115 				&state);
1116 	/* Restart a new tasklet if DMA status is error */
1117 	if (dmastat == DMA_ERROR) {
1118 		dev_dbg(port->dev, "Get residue error, restart tasklet\n");
1119 		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1120 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
1121 		return;
1122 	}
1123 
1124 	/* CPU claims ownership of RX DMA buffer */
1125 	dma_sync_single_for_cpu(port->dev, atmel_port->rx_phys,
1126 				ATMEL_SERIAL_RX_SIZE, DMA_FROM_DEVICE);
1127 
1128 	/*
1129 	 * ring->head points to the end of data already written by the DMA.
1130 	 * ring->tail points to the beginning of data to be read by the
1131 	 * framework.
1132 	 * The current transfer size should not be larger than the dma buffer
1133 	 * length.
1134 	 */
1135 	ring->head = ATMEL_SERIAL_RX_SIZE - state.residue;
1136 	BUG_ON(ring->head > ATMEL_SERIAL_RX_SIZE);
1137 	/*
1138 	 * At this point ring->head may point to the first byte right after the
1139 	 * last byte of the dma buffer:
1140 	 * 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx)
1141 	 *
1142 	 * However ring->tail must always points inside the dma buffer:
1143 	 * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1
1144 	 *
1145 	 * Since we use a ring buffer, we have to handle the case
1146 	 * where head is lower than tail. In such a case, we first read from
1147 	 * tail to the end of the buffer then reset tail.
1148 	 */
1149 	if (ring->head < ring->tail) {
1150 		count = ATMEL_SERIAL_RX_SIZE - ring->tail;
1151 
1152 		tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1153 		ring->tail = 0;
1154 		port->icount.rx += count;
1155 	}
1156 
1157 	/* Finally we read data from tail to head */
1158 	if (ring->tail < ring->head) {
1159 		count = ring->head - ring->tail;
1160 
1161 		tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1162 		/* Wrap ring->head if needed */
1163 		if (ring->head >= ATMEL_SERIAL_RX_SIZE)
1164 			ring->head = 0;
1165 		ring->tail = ring->head;
1166 		port->icount.rx += count;
1167 	}
1168 
1169 	/* USART retrieves ownership of RX DMA buffer */
1170 	dma_sync_single_for_device(port->dev, atmel_port->rx_phys,
1171 				   ATMEL_SERIAL_RX_SIZE, DMA_FROM_DEVICE);
1172 
1173 	tty_flip_buffer_push(tport);
1174 
1175 	atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1176 }
1177 
atmel_prepare_rx_dma(struct uart_port * port)1178 static int atmel_prepare_rx_dma(struct uart_port *port)
1179 {
1180 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1181 	struct device *mfd_dev = port->dev->parent;
1182 	struct dma_async_tx_descriptor *desc;
1183 	dma_cap_mask_t		mask;
1184 	struct dma_slave_config config;
1185 	struct circ_buf		*ring;
1186 	struct dma_chan *chan;
1187 	int ret;
1188 
1189 	ring = &atmel_port->rx_ring;
1190 
1191 	dma_cap_zero(mask);
1192 	dma_cap_set(DMA_CYCLIC, mask);
1193 
1194 	chan = dma_request_chan(mfd_dev, "rx");
1195 	if (IS_ERR(chan)) {
1196 		atmel_port->chan_rx = NULL;
1197 		goto chan_err;
1198 	}
1199 	atmel_port->chan_rx = chan;
1200 	dev_info(port->dev, "using %s for rx DMA transfers\n",
1201 		dma_chan_name(atmel_port->chan_rx));
1202 
1203 	spin_lock_init(&atmel_port->lock_rx);
1204 	/* UART circular rx buffer is an aligned page. */
1205 	BUG_ON(!PAGE_ALIGNED(ring->buf));
1206 	atmel_port->rx_phys = dma_map_single(port->dev, ring->buf,
1207 					     ATMEL_SERIAL_RX_SIZE,
1208 					     DMA_FROM_DEVICE);
1209 
1210 	if (dma_mapping_error(port->dev, atmel_port->rx_phys)) {
1211 		dev_dbg(port->dev, "need to release resource of dma\n");
1212 		goto chan_err;
1213 	} else {
1214 		dev_dbg(port->dev, "%s: mapped %zu@%p to %pad\n", __func__,
1215 			ATMEL_SERIAL_RX_SIZE, ring->buf, &atmel_port->rx_phys);
1216 	}
1217 
1218 	/* Configure the slave DMA */
1219 	memset(&config, 0, sizeof(config));
1220 	config.direction = DMA_DEV_TO_MEM;
1221 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1222 	config.src_addr = port->mapbase + ATMEL_US_RHR;
1223 	config.src_maxburst = 1;
1224 
1225 	ret = dmaengine_slave_config(atmel_port->chan_rx,
1226 				     &config);
1227 	if (ret) {
1228 		dev_err(port->dev, "DMA rx slave configuration failed\n");
1229 		goto chan_err;
1230 	}
1231 	/*
1232 	 * Prepare a cyclic dma transfer, assign 2 descriptors,
1233 	 * each one is half ring buffer size
1234 	 */
1235 	desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx,
1236 					 atmel_port->rx_phys,
1237 					 ATMEL_SERIAL_RX_SIZE,
1238 					 ATMEL_SERIAL_RX_SIZE / 2,
1239 					 DMA_DEV_TO_MEM,
1240 					 DMA_PREP_INTERRUPT);
1241 	if (!desc) {
1242 		dev_err(port->dev, "Preparing DMA cyclic failed\n");
1243 		goto chan_err;
1244 	}
1245 	desc->callback = atmel_complete_rx_dma;
1246 	desc->callback_param = port;
1247 	atmel_port->desc_rx = desc;
1248 	atmel_port->cookie_rx = dmaengine_submit(desc);
1249 	if (dma_submit_error(atmel_port->cookie_rx)) {
1250 		dev_err(port->dev, "dma_submit_error %d\n",
1251 			atmel_port->cookie_rx);
1252 		goto chan_err;
1253 	}
1254 
1255 	dma_async_issue_pending(atmel_port->chan_rx);
1256 
1257 	return 0;
1258 
1259 chan_err:
1260 	dev_err(port->dev, "RX channel not available, switch to pio\n");
1261 	atmel_port->use_dma_rx = false;
1262 	if (atmel_port->chan_rx)
1263 		atmel_release_rx_dma(port);
1264 	return -EINVAL;
1265 }
1266 
atmel_uart_timer_callback(struct timer_list * t)1267 static void atmel_uart_timer_callback(struct timer_list *t)
1268 {
1269 	struct atmel_uart_port *atmel_port = from_timer(atmel_port, t,
1270 							uart_timer);
1271 	struct uart_port *port = &atmel_port->uart;
1272 
1273 	if (!atomic_read(&atmel_port->tasklet_shutdown)) {
1274 		tasklet_schedule(&atmel_port->tasklet_rx);
1275 		mod_timer(&atmel_port->uart_timer,
1276 			  jiffies + uart_poll_timeout(port));
1277 	}
1278 }
1279 
1280 /*
1281  * receive interrupt handler.
1282  */
1283 static void
atmel_handle_receive(struct uart_port * port,unsigned int pending)1284 atmel_handle_receive(struct uart_port *port, unsigned int pending)
1285 {
1286 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1287 
1288 	if (atmel_use_pdc_rx(port)) {
1289 		/*
1290 		 * PDC receive. Just schedule the tasklet and let it
1291 		 * figure out the details.
1292 		 *
1293 		 * TODO: We're not handling error flags correctly at
1294 		 * the moment.
1295 		 */
1296 		if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
1297 			atmel_uart_writel(port, ATMEL_US_IDR,
1298 					  (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT));
1299 			atmel_tasklet_schedule(atmel_port,
1300 					       &atmel_port->tasklet_rx);
1301 		}
1302 
1303 		if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE |
1304 				ATMEL_US_FRAME | ATMEL_US_PARE))
1305 			atmel_pdc_rxerr(port, pending);
1306 	}
1307 
1308 	if (atmel_use_dma_rx(port)) {
1309 		if (pending & ATMEL_US_TIMEOUT) {
1310 			atmel_uart_writel(port, ATMEL_US_IDR,
1311 					  ATMEL_US_TIMEOUT);
1312 			atmel_tasklet_schedule(atmel_port,
1313 					       &atmel_port->tasklet_rx);
1314 		}
1315 	}
1316 
1317 	/* Interrupt receive */
1318 	if (pending & ATMEL_US_RXRDY)
1319 		atmel_rx_chars(port);
1320 	else if (pending & ATMEL_US_RXBRK) {
1321 		/*
1322 		 * End of break detected. If it came along with a
1323 		 * character, atmel_rx_chars will handle it.
1324 		 */
1325 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
1326 		atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK);
1327 		atmel_port->break_active = 0;
1328 	}
1329 }
1330 
1331 /*
1332  * transmit interrupt handler. (Transmit is IRQF_NODELAY safe)
1333  */
1334 static void
atmel_handle_transmit(struct uart_port * port,unsigned int pending)1335 atmel_handle_transmit(struct uart_port *port, unsigned int pending)
1336 {
1337 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1338 
1339 	if (pending & atmel_port->tx_done_mask) {
1340 		atmel_uart_writel(port, ATMEL_US_IDR,
1341 				  atmel_port->tx_done_mask);
1342 
1343 		/* Start RX if flag was set and FIFO is empty */
1344 		if (atmel_port->hd_start_rx) {
1345 			if (!(atmel_uart_readl(port, ATMEL_US_CSR)
1346 					& ATMEL_US_TXEMPTY))
1347 				dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n");
1348 
1349 			atmel_port->hd_start_rx = false;
1350 			atmel_start_rx(port);
1351 		}
1352 
1353 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
1354 	}
1355 }
1356 
1357 /*
1358  * status flags interrupt handler.
1359  */
1360 static void
atmel_handle_status(struct uart_port * port,unsigned int pending,unsigned int status)1361 atmel_handle_status(struct uart_port *port, unsigned int pending,
1362 		    unsigned int status)
1363 {
1364 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1365 	unsigned int status_change;
1366 
1367 	if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
1368 				| ATMEL_US_CTSIC)) {
1369 		status_change = status ^ atmel_port->irq_status_prev;
1370 		atmel_port->irq_status_prev = status;
1371 
1372 		if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
1373 					| ATMEL_US_DCD | ATMEL_US_CTS)) {
1374 			/* TODO: All reads to CSR will clear these interrupts! */
1375 			if (status_change & ATMEL_US_RI)
1376 				port->icount.rng++;
1377 			if (status_change & ATMEL_US_DSR)
1378 				port->icount.dsr++;
1379 			if (status_change & ATMEL_US_DCD)
1380 				uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
1381 			if (status_change & ATMEL_US_CTS)
1382 				uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
1383 
1384 			wake_up_interruptible(&port->state->port.delta_msr_wait);
1385 		}
1386 	}
1387 
1388 	if (pending & (ATMEL_US_NACK | ATMEL_US_ITERATION))
1389 		dev_dbg(port->dev, "ISO7816 ERROR (0x%08x)\n", pending);
1390 }
1391 
1392 /*
1393  * Interrupt handler
1394  */
atmel_interrupt(int irq,void * dev_id)1395 static irqreturn_t atmel_interrupt(int irq, void *dev_id)
1396 {
1397 	struct uart_port *port = dev_id;
1398 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1399 	unsigned int status, pending, mask, pass_counter = 0;
1400 
1401 	spin_lock(&atmel_port->lock_suspended);
1402 
1403 	do {
1404 		status = atmel_uart_readl(port, ATMEL_US_CSR);
1405 		mask = atmel_uart_readl(port, ATMEL_US_IMR);
1406 		pending = status & mask;
1407 		if (!pending)
1408 			break;
1409 
1410 		if (atmel_port->suspended) {
1411 			atmel_port->pending |= pending;
1412 			atmel_port->pending_status = status;
1413 			atmel_uart_writel(port, ATMEL_US_IDR, mask);
1414 			pm_system_wakeup();
1415 			break;
1416 		}
1417 
1418 		atmel_handle_receive(port, pending);
1419 		atmel_handle_status(port, pending, status);
1420 		atmel_handle_transmit(port, pending);
1421 	} while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
1422 
1423 	spin_unlock(&atmel_port->lock_suspended);
1424 
1425 	return pass_counter ? IRQ_HANDLED : IRQ_NONE;
1426 }
1427 
atmel_release_tx_pdc(struct uart_port * port)1428 static void atmel_release_tx_pdc(struct uart_port *port)
1429 {
1430 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1431 	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1432 
1433 	dma_unmap_single(port->dev,
1434 			 pdc->dma_addr,
1435 			 pdc->dma_size,
1436 			 DMA_TO_DEVICE);
1437 }
1438 
1439 /*
1440  * Called from tasklet with ENDTX and TXBUFE interrupts disabled.
1441  */
atmel_tx_pdc(struct uart_port * port)1442 static void atmel_tx_pdc(struct uart_port *port)
1443 {
1444 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1445 	struct tty_port *tport = &port->state->port;
1446 	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1447 
1448 	/* nothing left to transmit? */
1449 	if (atmel_uart_readl(port, ATMEL_PDC_TCR))
1450 		return;
1451 	uart_xmit_advance(port, pdc->ofs);
1452 	pdc->ofs = 0;
1453 
1454 	/* more to transmit - setup next transfer */
1455 
1456 	/* disable PDC transmit */
1457 	atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
1458 
1459 	if (!kfifo_is_empty(&tport->xmit_fifo) && !uart_tx_stopped(port)) {
1460 		unsigned int count, tail;
1461 
1462 		dma_sync_single_for_device(port->dev,
1463 					   pdc->dma_addr,
1464 					   pdc->dma_size,
1465 					   DMA_TO_DEVICE);
1466 
1467 		count = kfifo_out_linear(&tport->xmit_fifo, &tail,
1468 				UART_XMIT_SIZE);
1469 		pdc->ofs = count;
1470 
1471 		atmel_uart_writel(port, ATMEL_PDC_TPR, pdc->dma_addr + tail);
1472 		atmel_uart_writel(port, ATMEL_PDC_TCR, count);
1473 		/* re-enable PDC transmit */
1474 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
1475 		/* Enable interrupts */
1476 		atmel_uart_writel(port, ATMEL_US_IER,
1477 				  atmel_port->tx_done_mask);
1478 	} else {
1479 		if (atmel_uart_is_half_duplex(port)) {
1480 			/* DMA done, stop TX, start RX for RS485 */
1481 			atmel_start_rx(port);
1482 		}
1483 	}
1484 
1485 	if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS)
1486 		uart_write_wakeup(port);
1487 }
1488 
atmel_prepare_tx_pdc(struct uart_port * port)1489 static int atmel_prepare_tx_pdc(struct uart_port *port)
1490 {
1491 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1492 	struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1493 	struct tty_port *tport = &port->state->port;
1494 
1495 	pdc->buf = tport->xmit_buf;
1496 	pdc->dma_addr = dma_map_single(port->dev,
1497 					pdc->buf,
1498 					UART_XMIT_SIZE,
1499 					DMA_TO_DEVICE);
1500 	pdc->dma_size = UART_XMIT_SIZE;
1501 	pdc->ofs = 0;
1502 
1503 	return 0;
1504 }
1505 
atmel_rx_from_ring(struct uart_port * port)1506 static void atmel_rx_from_ring(struct uart_port *port)
1507 {
1508 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1509 	struct circ_buf *ring = &atmel_port->rx_ring;
1510 	unsigned int status;
1511 	u8 flg;
1512 
1513 	while (ring->head != ring->tail) {
1514 		struct atmel_uart_char c;
1515 
1516 		/* Make sure c is loaded after head. */
1517 		smp_rmb();
1518 
1519 		c = ((struct atmel_uart_char *)ring->buf)[ring->tail];
1520 
1521 		ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
1522 
1523 		port->icount.rx++;
1524 		status = c.status;
1525 		flg = TTY_NORMAL;
1526 
1527 		/*
1528 		 * note that the error handling code is
1529 		 * out of the main execution path
1530 		 */
1531 		if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
1532 				       | ATMEL_US_OVRE | ATMEL_US_RXBRK))) {
1533 			if (status & ATMEL_US_RXBRK) {
1534 				/* ignore side-effect */
1535 				status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
1536 
1537 				port->icount.brk++;
1538 				if (uart_handle_break(port))
1539 					continue;
1540 			}
1541 			if (status & ATMEL_US_PARE)
1542 				port->icount.parity++;
1543 			if (status & ATMEL_US_FRAME)
1544 				port->icount.frame++;
1545 			if (status & ATMEL_US_OVRE)
1546 				port->icount.overrun++;
1547 
1548 			status &= port->read_status_mask;
1549 
1550 			if (status & ATMEL_US_RXBRK)
1551 				flg = TTY_BREAK;
1552 			else if (status & ATMEL_US_PARE)
1553 				flg = TTY_PARITY;
1554 			else if (status & ATMEL_US_FRAME)
1555 				flg = TTY_FRAME;
1556 		}
1557 
1558 
1559 		if (uart_handle_sysrq_char(port, c.ch))
1560 			continue;
1561 
1562 		uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg);
1563 	}
1564 
1565 	tty_flip_buffer_push(&port->state->port);
1566 }
1567 
atmel_release_rx_pdc(struct uart_port * port)1568 static void atmel_release_rx_pdc(struct uart_port *port)
1569 {
1570 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1571 	int i;
1572 
1573 	for (i = 0; i < 2; i++) {
1574 		struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1575 
1576 		dma_unmap_single(port->dev,
1577 				 pdc->dma_addr,
1578 				 pdc->dma_size,
1579 				 DMA_FROM_DEVICE);
1580 		kfree(pdc->buf);
1581 	}
1582 }
1583 
atmel_rx_from_pdc(struct uart_port * port)1584 static void atmel_rx_from_pdc(struct uart_port *port)
1585 {
1586 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1587 	struct tty_port *tport = &port->state->port;
1588 	struct atmel_dma_buffer *pdc;
1589 	int rx_idx = atmel_port->pdc_rx_idx;
1590 	unsigned int head;
1591 	unsigned int tail;
1592 	unsigned int count;
1593 
1594 	do {
1595 		/* Reset the UART timeout early so that we don't miss one */
1596 		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1597 
1598 		pdc = &atmel_port->pdc_rx[rx_idx];
1599 		head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr;
1600 		tail = pdc->ofs;
1601 
1602 		/* If the PDC has switched buffers, RPR won't contain
1603 		 * any address within the current buffer. Since head
1604 		 * is unsigned, we just need a one-way comparison to
1605 		 * find out.
1606 		 *
1607 		 * In this case, we just need to consume the entire
1608 		 * buffer and resubmit it for DMA. This will clear the
1609 		 * ENDRX bit as well, so that we can safely re-enable
1610 		 * all interrupts below.
1611 		 */
1612 		head = min(head, pdc->dma_size);
1613 
1614 		if (likely(head != tail)) {
1615 			dma_sync_single_for_cpu(port->dev, pdc->dma_addr,
1616 					pdc->dma_size, DMA_FROM_DEVICE);
1617 
1618 			/*
1619 			 * head will only wrap around when we recycle
1620 			 * the DMA buffer, and when that happens, we
1621 			 * explicitly set tail to 0. So head will
1622 			 * always be greater than tail.
1623 			 */
1624 			count = head - tail;
1625 
1626 			tty_insert_flip_string(tport, pdc->buf + pdc->ofs,
1627 						count);
1628 
1629 			dma_sync_single_for_device(port->dev, pdc->dma_addr,
1630 					pdc->dma_size, DMA_FROM_DEVICE);
1631 
1632 			port->icount.rx += count;
1633 			pdc->ofs = head;
1634 		}
1635 
1636 		/*
1637 		 * If the current buffer is full, we need to check if
1638 		 * the next one contains any additional data.
1639 		 */
1640 		if (head >= pdc->dma_size) {
1641 			pdc->ofs = 0;
1642 			atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr);
1643 			atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size);
1644 
1645 			rx_idx = !rx_idx;
1646 			atmel_port->pdc_rx_idx = rx_idx;
1647 		}
1648 	} while (head >= pdc->dma_size);
1649 
1650 	tty_flip_buffer_push(tport);
1651 
1652 	atmel_uart_writel(port, ATMEL_US_IER,
1653 			  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1654 }
1655 
atmel_prepare_rx_pdc(struct uart_port * port)1656 static int atmel_prepare_rx_pdc(struct uart_port *port)
1657 {
1658 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1659 	int i;
1660 
1661 	for (i = 0; i < 2; i++) {
1662 		struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1663 
1664 		pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL);
1665 		if (pdc->buf == NULL) {
1666 			if (i != 0) {
1667 				dma_unmap_single(port->dev,
1668 					atmel_port->pdc_rx[0].dma_addr,
1669 					PDC_BUFFER_SIZE,
1670 					DMA_FROM_DEVICE);
1671 				kfree(atmel_port->pdc_rx[0].buf);
1672 			}
1673 			atmel_port->use_pdc_rx = false;
1674 			return -ENOMEM;
1675 		}
1676 		pdc->dma_addr = dma_map_single(port->dev,
1677 						pdc->buf,
1678 						PDC_BUFFER_SIZE,
1679 						DMA_FROM_DEVICE);
1680 		pdc->dma_size = PDC_BUFFER_SIZE;
1681 		pdc->ofs = 0;
1682 	}
1683 
1684 	atmel_port->pdc_rx_idx = 0;
1685 
1686 	atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr);
1687 	atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE);
1688 
1689 	atmel_uart_writel(port, ATMEL_PDC_RNPR,
1690 			  atmel_port->pdc_rx[1].dma_addr);
1691 	atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE);
1692 
1693 	return 0;
1694 }
1695 
1696 /*
1697  * tasklet handling tty stuff outside the interrupt handler.
1698  */
atmel_tasklet_rx_func(struct tasklet_struct * t)1699 static void atmel_tasklet_rx_func(struct tasklet_struct *t)
1700 {
1701 	struct atmel_uart_port *atmel_port = from_tasklet(atmel_port, t,
1702 							  tasklet_rx);
1703 	struct uart_port *port = &atmel_port->uart;
1704 
1705 	/* The interrupt handler does not take the lock */
1706 	uart_port_lock(port);
1707 	atmel_port->schedule_rx(port);
1708 	uart_port_unlock(port);
1709 }
1710 
atmel_tasklet_tx_func(struct tasklet_struct * t)1711 static void atmel_tasklet_tx_func(struct tasklet_struct *t)
1712 {
1713 	struct atmel_uart_port *atmel_port = from_tasklet(atmel_port, t,
1714 							  tasklet_tx);
1715 	struct uart_port *port = &atmel_port->uart;
1716 
1717 	/* The interrupt handler does not take the lock */
1718 	uart_port_lock(port);
1719 	atmel_port->schedule_tx(port);
1720 	uart_port_unlock(port);
1721 }
1722 
atmel_init_property(struct atmel_uart_port * atmel_port,struct platform_device * pdev)1723 static void atmel_init_property(struct atmel_uart_port *atmel_port,
1724 				struct platform_device *pdev)
1725 {
1726 	struct device_node *np = pdev->dev.of_node;
1727 
1728 	/* DMA/PDC usage specification */
1729 	if (of_property_read_bool(np, "atmel,use-dma-rx")) {
1730 		atmel_port->use_dma_rx = of_property_present(np, "dmas");
1731 		atmel_port->use_pdc_rx = !atmel_port->use_dma_rx;
1732 	} else {
1733 		atmel_port->use_dma_rx  = false;
1734 		atmel_port->use_pdc_rx  = false;
1735 	}
1736 
1737 	if (of_property_read_bool(np, "atmel,use-dma-tx")) {
1738 		atmel_port->use_dma_tx = of_property_present(np, "dmas");
1739 		atmel_port->use_pdc_tx = !atmel_port->use_dma_tx;
1740 	} else {
1741 		atmel_port->use_dma_tx  = false;
1742 		atmel_port->use_pdc_tx  = false;
1743 	}
1744 }
1745 
atmel_set_ops(struct uart_port * port)1746 static void atmel_set_ops(struct uart_port *port)
1747 {
1748 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1749 
1750 	if (atmel_use_dma_rx(port)) {
1751 		atmel_port->prepare_rx = &atmel_prepare_rx_dma;
1752 		atmel_port->schedule_rx = &atmel_rx_from_dma;
1753 		atmel_port->release_rx = &atmel_release_rx_dma;
1754 	} else if (atmel_use_pdc_rx(port)) {
1755 		atmel_port->prepare_rx = &atmel_prepare_rx_pdc;
1756 		atmel_port->schedule_rx = &atmel_rx_from_pdc;
1757 		atmel_port->release_rx = &atmel_release_rx_pdc;
1758 	} else {
1759 		atmel_port->prepare_rx = NULL;
1760 		atmel_port->schedule_rx = &atmel_rx_from_ring;
1761 		atmel_port->release_rx = NULL;
1762 	}
1763 
1764 	if (atmel_use_dma_tx(port)) {
1765 		atmel_port->prepare_tx = &atmel_prepare_tx_dma;
1766 		atmel_port->schedule_tx = &atmel_tx_dma;
1767 		atmel_port->release_tx = &atmel_release_tx_dma;
1768 	} else if (atmel_use_pdc_tx(port)) {
1769 		atmel_port->prepare_tx = &atmel_prepare_tx_pdc;
1770 		atmel_port->schedule_tx = &atmel_tx_pdc;
1771 		atmel_port->release_tx = &atmel_release_tx_pdc;
1772 	} else {
1773 		atmel_port->prepare_tx = NULL;
1774 		atmel_port->schedule_tx = &atmel_tx_chars;
1775 		atmel_port->release_tx = NULL;
1776 	}
1777 }
1778 
1779 /*
1780  * Get ip name usart or uart
1781  */
atmel_get_ip_name(struct uart_port * port)1782 static void atmel_get_ip_name(struct uart_port *port)
1783 {
1784 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1785 	int name = atmel_uart_readl(port, ATMEL_US_NAME);
1786 	u32 version;
1787 	u32 usart, dbgu_uart, new_uart;
1788 	/* ASCII decoding for IP version */
1789 	usart = 0x55534152;	/* USAR(T) */
1790 	dbgu_uart = 0x44424755;	/* DBGU */
1791 	new_uart = 0x55415254;	/* UART */
1792 
1793 	/*
1794 	 * Only USART devices from at91sam9260 SOC implement fractional
1795 	 * baudrate. It is available for all asynchronous modes, with the
1796 	 * following restriction: the sampling clock's duty cycle is not
1797 	 * constant.
1798 	 */
1799 	atmel_port->has_frac_baudrate = false;
1800 	atmel_port->has_hw_timer = false;
1801 	atmel_port->is_usart = false;
1802 
1803 	if (name == new_uart) {
1804 		dev_dbg(port->dev, "Uart with hw timer");
1805 		atmel_port->has_hw_timer = true;
1806 		atmel_port->rtor = ATMEL_UA_RTOR;
1807 	} else if (name == usart) {
1808 		dev_dbg(port->dev, "Usart\n");
1809 		atmel_port->has_frac_baudrate = true;
1810 		atmel_port->has_hw_timer = true;
1811 		atmel_port->is_usart = true;
1812 		atmel_port->rtor = ATMEL_US_RTOR;
1813 		version = atmel_uart_readl(port, ATMEL_US_VERSION);
1814 		switch (version) {
1815 		case 0x814:	/* sama5d2 */
1816 			fallthrough;
1817 		case 0x701:	/* sama5d4 */
1818 			atmel_port->fidi_min = 3;
1819 			atmel_port->fidi_max = 65535;
1820 			break;
1821 		case 0x502:	/* sam9x5, sama5d3 */
1822 			atmel_port->fidi_min = 3;
1823 			atmel_port->fidi_max = 2047;
1824 			break;
1825 		default:
1826 			atmel_port->fidi_min = 1;
1827 			atmel_port->fidi_max = 2047;
1828 		}
1829 	} else if (name == dbgu_uart) {
1830 		dev_dbg(port->dev, "Dbgu or uart without hw timer\n");
1831 	} else {
1832 		/* fallback for older SoCs: use version field */
1833 		version = atmel_uart_readl(port, ATMEL_US_VERSION);
1834 		switch (version) {
1835 		case 0x302:
1836 		case 0x10213:
1837 		case 0x10302:
1838 			dev_dbg(port->dev, "This version is usart\n");
1839 			atmel_port->has_frac_baudrate = true;
1840 			atmel_port->has_hw_timer = true;
1841 			atmel_port->is_usart = true;
1842 			atmel_port->rtor = ATMEL_US_RTOR;
1843 			break;
1844 		case 0x203:
1845 		case 0x10202:
1846 			dev_dbg(port->dev, "This version is uart\n");
1847 			break;
1848 		default:
1849 			dev_err(port->dev, "Not supported ip name nor version, set to uart\n");
1850 		}
1851 	}
1852 }
1853 
1854 /*
1855  * Perform initialization and enable port for reception
1856  */
atmel_startup(struct uart_port * port)1857 static int atmel_startup(struct uart_port *port)
1858 {
1859 	struct platform_device *pdev = to_platform_device(port->dev);
1860 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1861 	int retval;
1862 
1863 	/*
1864 	 * Ensure that no interrupts are enabled otherwise when
1865 	 * request_irq() is called we could get stuck trying to
1866 	 * handle an unexpected interrupt
1867 	 */
1868 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
1869 	atmel_port->ms_irq_enabled = false;
1870 
1871 	/*
1872 	 * Allocate the IRQ
1873 	 */
1874 	retval = request_irq(port->irq, atmel_interrupt,
1875 			     IRQF_SHARED | IRQF_COND_SUSPEND,
1876 			     dev_name(&pdev->dev), port);
1877 	if (retval) {
1878 		dev_err(port->dev, "atmel_startup - Can't get irq\n");
1879 		return retval;
1880 	}
1881 
1882 	atomic_set(&atmel_port->tasklet_shutdown, 0);
1883 	tasklet_setup(&atmel_port->tasklet_rx, atmel_tasklet_rx_func);
1884 	tasklet_setup(&atmel_port->tasklet_tx, atmel_tasklet_tx_func);
1885 
1886 	/*
1887 	 * Initialize DMA (if necessary)
1888 	 */
1889 	atmel_init_property(atmel_port, pdev);
1890 	atmel_set_ops(port);
1891 
1892 	if (atmel_port->prepare_rx) {
1893 		retval = atmel_port->prepare_rx(port);
1894 		if (retval < 0)
1895 			atmel_set_ops(port);
1896 	}
1897 
1898 	if (atmel_port->prepare_tx) {
1899 		retval = atmel_port->prepare_tx(port);
1900 		if (retval < 0)
1901 			atmel_set_ops(port);
1902 	}
1903 
1904 	/*
1905 	 * Enable FIFO when available
1906 	 */
1907 	if (atmel_port->fifo_size) {
1908 		unsigned int txrdym = ATMEL_US_ONE_DATA;
1909 		unsigned int rxrdym = ATMEL_US_ONE_DATA;
1910 		unsigned int fmr;
1911 
1912 		atmel_uart_writel(port, ATMEL_US_CR,
1913 				  ATMEL_US_FIFOEN |
1914 				  ATMEL_US_RXFCLR |
1915 				  ATMEL_US_TXFLCLR);
1916 
1917 		if (atmel_use_dma_tx(port))
1918 			txrdym = ATMEL_US_FOUR_DATA;
1919 
1920 		fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym);
1921 		if (atmel_port->rts_high &&
1922 		    atmel_port->rts_low)
1923 			fmr |=	ATMEL_US_FRTSC |
1924 				ATMEL_US_RXFTHRES(atmel_port->rts_high) |
1925 				ATMEL_US_RXFTHRES2(atmel_port->rts_low);
1926 
1927 		atmel_uart_writel(port, ATMEL_US_FMR, fmr);
1928 	}
1929 
1930 	/* Save current CSR for comparison in atmel_tasklet_func() */
1931 	atmel_port->irq_status_prev = atmel_uart_readl(port, ATMEL_US_CSR);
1932 
1933 	/*
1934 	 * Finally, enable the serial port
1935 	 */
1936 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
1937 	/* enable xmit & rcvr */
1938 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
1939 	atmel_port->tx_stopped = false;
1940 
1941 	timer_setup(&atmel_port->uart_timer, atmel_uart_timer_callback, 0);
1942 
1943 	if (atmel_use_pdc_rx(port)) {
1944 		/* set UART timeout */
1945 		if (!atmel_port->has_hw_timer) {
1946 			mod_timer(&atmel_port->uart_timer,
1947 					jiffies + uart_poll_timeout(port));
1948 		/* set USART timeout */
1949 		} else {
1950 			atmel_uart_writel(port, atmel_port->rtor,
1951 					  PDC_RX_TIMEOUT);
1952 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1953 
1954 			atmel_uart_writel(port, ATMEL_US_IER,
1955 					  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1956 		}
1957 		/* enable PDC controller */
1958 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
1959 	} else if (atmel_use_dma_rx(port)) {
1960 		/* set UART timeout */
1961 		if (!atmel_port->has_hw_timer) {
1962 			mod_timer(&atmel_port->uart_timer,
1963 					jiffies + uart_poll_timeout(port));
1964 		/* set USART timeout */
1965 		} else {
1966 			atmel_uart_writel(port, atmel_port->rtor,
1967 					  PDC_RX_TIMEOUT);
1968 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1969 
1970 			atmel_uart_writel(port, ATMEL_US_IER,
1971 					  ATMEL_US_TIMEOUT);
1972 		}
1973 	} else {
1974 		/* enable receive only */
1975 		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
1976 	}
1977 
1978 	return 0;
1979 }
1980 
1981 /*
1982  * Flush any TX data submitted for DMA. Called when the TX circular
1983  * buffer is reset.
1984  */
atmel_flush_buffer(struct uart_port * port)1985 static void atmel_flush_buffer(struct uart_port *port)
1986 {
1987 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1988 
1989 	if (atmel_use_pdc_tx(port)) {
1990 		atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
1991 		atmel_port->pdc_tx.ofs = 0;
1992 	}
1993 	/*
1994 	 * in uart_flush_buffer(), the xmit circular buffer has just
1995 	 * been cleared, so we have to reset tx_len accordingly.
1996 	 */
1997 	atmel_port->tx_len = 0;
1998 }
1999 
2000 /*
2001  * Disable the port
2002  */
atmel_shutdown(struct uart_port * port)2003 static void atmel_shutdown(struct uart_port *port)
2004 {
2005 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2006 
2007 	/* Disable modem control lines interrupts */
2008 	atmel_disable_ms(port);
2009 
2010 	/* Disable interrupts at device level */
2011 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
2012 
2013 	/* Prevent spurious interrupts from scheduling the tasklet */
2014 	atomic_inc(&atmel_port->tasklet_shutdown);
2015 
2016 	/*
2017 	 * Prevent any tasklets being scheduled during
2018 	 * cleanup
2019 	 */
2020 	timer_delete_sync(&atmel_port->uart_timer);
2021 
2022 	/* Make sure that no interrupt is on the fly */
2023 	synchronize_irq(port->irq);
2024 
2025 	/*
2026 	 * Clear out any scheduled tasklets before
2027 	 * we destroy the buffers
2028 	 */
2029 	tasklet_kill(&atmel_port->tasklet_rx);
2030 	tasklet_kill(&atmel_port->tasklet_tx);
2031 
2032 	/*
2033 	 * Ensure everything is stopped and
2034 	 * disable port and break condition.
2035 	 */
2036 	atmel_stop_rx(port);
2037 	atmel_stop_tx(port);
2038 
2039 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
2040 
2041 	/*
2042 	 * Shut-down the DMA.
2043 	 */
2044 	if (atmel_port->release_rx)
2045 		atmel_port->release_rx(port);
2046 	if (atmel_port->release_tx)
2047 		atmel_port->release_tx(port);
2048 
2049 	/*
2050 	 * Reset ring buffer pointers
2051 	 */
2052 	atmel_port->rx_ring.head = 0;
2053 	atmel_port->rx_ring.tail = 0;
2054 
2055 	/*
2056 	 * Free the interrupts
2057 	 */
2058 	free_irq(port->irq, port);
2059 
2060 	atmel_flush_buffer(port);
2061 }
2062 
2063 /*
2064  * Power / Clock management.
2065  */
atmel_serial_pm(struct uart_port * port,unsigned int state,unsigned int oldstate)2066 static void atmel_serial_pm(struct uart_port *port, unsigned int state,
2067 			    unsigned int oldstate)
2068 {
2069 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2070 
2071 	switch (state) {
2072 	case UART_PM_STATE_ON:
2073 		/*
2074 		 * Enable the peripheral clock for this serial port.
2075 		 * This is called on uart_open() or a resume event.
2076 		 */
2077 		clk_prepare_enable(atmel_port->clk);
2078 
2079 		/* re-enable interrupts if we disabled some on suspend */
2080 		atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr);
2081 		break;
2082 	case UART_PM_STATE_OFF:
2083 		/* Back up the interrupt mask and disable all interrupts */
2084 		atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR);
2085 		atmel_uart_writel(port, ATMEL_US_IDR, -1);
2086 
2087 		/*
2088 		 * Disable the peripheral clock for this serial port.
2089 		 * This is called on uart_close() or a suspend event.
2090 		 */
2091 		clk_disable_unprepare(atmel_port->clk);
2092 		if (__clk_is_enabled(atmel_port->gclk))
2093 			clk_disable_unprepare(atmel_port->gclk);
2094 		break;
2095 	default:
2096 		dev_err(port->dev, "atmel_serial: unknown pm %d\n", state);
2097 	}
2098 }
2099 
2100 /*
2101  * Change the port parameters
2102  */
atmel_set_termios(struct uart_port * port,struct ktermios * termios,const struct ktermios * old)2103 static void atmel_set_termios(struct uart_port *port,
2104 			      struct ktermios *termios,
2105 			      const struct ktermios *old)
2106 {
2107 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2108 	unsigned long flags;
2109 	unsigned int old_mode, mode, imr, quot, div, cd, fp = 0;
2110 	unsigned int baud, actual_baud, gclk_rate;
2111 	int ret;
2112 
2113 	/* save the current mode register */
2114 	mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR);
2115 
2116 	/* reset the mode, clock divisor, parity, stop bits and data size */
2117 	if (atmel_port->is_usart)
2118 		mode &= ~(ATMEL_US_NBSTOP | ATMEL_US_PAR | ATMEL_US_CHRL |
2119 			  ATMEL_US_USCLKS | ATMEL_US_USMODE);
2120 	else
2121 		mode &= ~(ATMEL_UA_BRSRCCK | ATMEL_US_PAR | ATMEL_UA_FILTER);
2122 
2123 	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
2124 
2125 	/* byte size */
2126 	switch (termios->c_cflag & CSIZE) {
2127 	case CS5:
2128 		mode |= ATMEL_US_CHRL_5;
2129 		break;
2130 	case CS6:
2131 		mode |= ATMEL_US_CHRL_6;
2132 		break;
2133 	case CS7:
2134 		mode |= ATMEL_US_CHRL_7;
2135 		break;
2136 	default:
2137 		mode |= ATMEL_US_CHRL_8;
2138 		break;
2139 	}
2140 
2141 	/* stop bits */
2142 	if (termios->c_cflag & CSTOPB)
2143 		mode |= ATMEL_US_NBSTOP_2;
2144 
2145 	/* parity */
2146 	if (termios->c_cflag & PARENB) {
2147 		/* Mark or Space parity */
2148 		if (termios->c_cflag & CMSPAR) {
2149 			if (termios->c_cflag & PARODD)
2150 				mode |= ATMEL_US_PAR_MARK;
2151 			else
2152 				mode |= ATMEL_US_PAR_SPACE;
2153 		} else if (termios->c_cflag & PARODD)
2154 			mode |= ATMEL_US_PAR_ODD;
2155 		else
2156 			mode |= ATMEL_US_PAR_EVEN;
2157 	} else
2158 		mode |= ATMEL_US_PAR_NONE;
2159 
2160 	uart_port_lock_irqsave(port, &flags);
2161 
2162 	port->read_status_mask = ATMEL_US_OVRE;
2163 	if (termios->c_iflag & INPCK)
2164 		port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2165 	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
2166 		port->read_status_mask |= ATMEL_US_RXBRK;
2167 
2168 	if (atmel_use_pdc_rx(port))
2169 		/* need to enable error interrupts */
2170 		atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask);
2171 
2172 	/*
2173 	 * Characters to ignore
2174 	 */
2175 	port->ignore_status_mask = 0;
2176 	if (termios->c_iflag & IGNPAR)
2177 		port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2178 	if (termios->c_iflag & IGNBRK) {
2179 		port->ignore_status_mask |= ATMEL_US_RXBRK;
2180 		/*
2181 		 * If we're ignoring parity and break indicators,
2182 		 * ignore overruns too (for real raw support).
2183 		 */
2184 		if (termios->c_iflag & IGNPAR)
2185 			port->ignore_status_mask |= ATMEL_US_OVRE;
2186 	}
2187 	/* TODO: Ignore all characters if CREAD is set.*/
2188 
2189 	/* update the per-port timeout */
2190 	uart_update_timeout(port, termios->c_cflag, baud);
2191 
2192 	/*
2193 	 * save/disable interrupts. The tty layer will ensure that the
2194 	 * transmitter is empty if requested by the caller, so there's
2195 	 * no need to wait for it here.
2196 	 */
2197 	imr = atmel_uart_readl(port, ATMEL_US_IMR);
2198 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
2199 
2200 	/* disable receiver and transmitter */
2201 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
2202 	atmel_port->tx_stopped = true;
2203 
2204 	/* mode */
2205 	if (port->rs485.flags & SER_RS485_ENABLED) {
2206 		atmel_uart_writel(port, ATMEL_US_TTGR,
2207 				  port->rs485.delay_rts_after_send);
2208 		mode |= ATMEL_US_USMODE_RS485;
2209 	} else if (port->iso7816.flags & SER_ISO7816_ENABLED) {
2210 		atmel_uart_writel(port, ATMEL_US_TTGR, port->iso7816.tg);
2211 		/* select mck clock, and output  */
2212 		mode |= ATMEL_US_USCLKS_MCK | ATMEL_US_CLKO;
2213 		/* set max iterations */
2214 		mode |= ATMEL_US_MAX_ITER(3);
2215 		if ((port->iso7816.flags & SER_ISO7816_T_PARAM)
2216 				== SER_ISO7816_T(0))
2217 			mode |= ATMEL_US_USMODE_ISO7816_T0;
2218 		else
2219 			mode |= ATMEL_US_USMODE_ISO7816_T1;
2220 	} else if (termios->c_cflag & CRTSCTS) {
2221 		/* RS232 with hardware handshake (RTS/CTS) */
2222 		if (atmel_use_fifo(port) &&
2223 		    !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) {
2224 			/*
2225 			 * with ATMEL_US_USMODE_HWHS set, the controller will
2226 			 * be able to drive the RTS pin high/low when the RX
2227 			 * FIFO is above RXFTHRES/below RXFTHRES2.
2228 			 * It will also disable the transmitter when the CTS
2229 			 * pin is high.
2230 			 * This mode is not activated if CTS pin is a GPIO
2231 			 * because in this case, the transmitter is always
2232 			 * disabled (there must be an internal pull-up
2233 			 * responsible for this behaviour).
2234 			 * If the RTS pin is a GPIO, the controller won't be
2235 			 * able to drive it according to the FIFO thresholds,
2236 			 * but it will be handled by the driver.
2237 			 */
2238 			mode |= ATMEL_US_USMODE_HWHS;
2239 		} else {
2240 			/*
2241 			 * For platforms without FIFO, the flow control is
2242 			 * handled by the driver.
2243 			 */
2244 			mode |= ATMEL_US_USMODE_NORMAL;
2245 		}
2246 	} else {
2247 		/* RS232 without hadware handshake */
2248 		mode |= ATMEL_US_USMODE_NORMAL;
2249 	}
2250 
2251 	/*
2252 	 * Set the baud rate:
2253 	 * Fractional baudrate allows to setup output frequency more
2254 	 * accurately. This feature is enabled only when using normal mode.
2255 	 * baudrate = selected clock / (8 * (2 - OVER) * (CD + FP / 8))
2256 	 * Currently, OVER is always set to 0 so we get
2257 	 * baudrate = selected clock / (16 * (CD + FP / 8))
2258 	 * then
2259 	 * 8 CD + FP = selected clock / (2 * baudrate)
2260 	 */
2261 	if (atmel_port->has_frac_baudrate) {
2262 		div = DIV_ROUND_CLOSEST(port->uartclk, baud * 2);
2263 		cd = div >> 3;
2264 		fp = div & ATMEL_US_FP_MASK;
2265 	} else {
2266 		cd = uart_get_divisor(port, baud);
2267 	}
2268 
2269 	/*
2270 	 * If the current value of the Clock Divisor surpasses the 16 bit
2271 	 * ATMEL_US_CD mask and the IP is USART, switch to the Peripheral
2272 	 * Clock implicitly divided by 8.
2273 	 * If the IP is UART however, keep the highest possible value for
2274 	 * the CD and avoid needless division of CD, since UART IP's do not
2275 	 * support implicit division of the Peripheral Clock.
2276 	 */
2277 	if (atmel_port->is_usart && cd > ATMEL_US_CD) {
2278 		cd /= 8;
2279 		mode |= ATMEL_US_USCLKS_MCK_DIV8;
2280 	} else {
2281 		cd = min_t(unsigned int, cd, ATMEL_US_CD);
2282 	}
2283 
2284 	/*
2285 	 * If there is no Fractional Part, there is a high chance that
2286 	 * we may be able to generate a baudrate closer to the desired one
2287 	 * if we use the GCLK as the clock source driving the baudrate
2288 	 * generator.
2289 	 */
2290 	if (!atmel_port->has_frac_baudrate) {
2291 		if (__clk_is_enabled(atmel_port->gclk))
2292 			clk_disable_unprepare(atmel_port->gclk);
2293 		gclk_rate = clk_round_rate(atmel_port->gclk, 16 * baud);
2294 		actual_baud = clk_get_rate(atmel_port->clk) / (16 * cd);
2295 		if (gclk_rate && abs(atmel_error_rate(baud, actual_baud)) >
2296 		    abs(atmel_error_rate(baud, gclk_rate / 16))) {
2297 			clk_set_rate(atmel_port->gclk, 16 * baud);
2298 			ret = clk_prepare_enable(atmel_port->gclk);
2299 			if (ret)
2300 				goto gclk_fail;
2301 
2302 			if (atmel_port->is_usart) {
2303 				mode &= ~ATMEL_US_USCLKS;
2304 				mode |= ATMEL_US_USCLKS_GCLK;
2305 			} else {
2306 				mode |= ATMEL_UA_BRSRCCK;
2307 			}
2308 
2309 			/*
2310 			 * Set the Clock Divisor for GCLK to 1.
2311 			 * Since we were able to generate the smallest
2312 			 * multiple of the desired baudrate times 16,
2313 			 * then we surely can generate a bigger multiple
2314 			 * with the exact error rate for an equally increased
2315 			 * CD. Thus no need to take into account
2316 			 * a higher value for CD.
2317 			 */
2318 			cd = 1;
2319 		}
2320 	}
2321 
2322 gclk_fail:
2323 	quot = cd | fp << ATMEL_US_FP_OFFSET;
2324 
2325 	if (!(port->iso7816.flags & SER_ISO7816_ENABLED))
2326 		atmel_uart_writel(port, ATMEL_US_BRGR, quot);
2327 
2328 	/* set the mode, clock divisor, parity, stop bits and data size */
2329 	atmel_uart_writel(port, ATMEL_US_MR, mode);
2330 
2331 	/*
2332 	 * when switching the mode, set the RTS line state according to the
2333 	 * new mode, otherwise keep the former state
2334 	 */
2335 	if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
2336 		unsigned int rts_state;
2337 
2338 		if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
2339 			/* let the hardware control the RTS line */
2340 			rts_state = ATMEL_US_RTSDIS;
2341 		} else {
2342 			/* force RTS line to low level */
2343 			rts_state = ATMEL_US_RTSEN;
2344 		}
2345 
2346 		atmel_uart_writel(port, ATMEL_US_CR, rts_state);
2347 	}
2348 
2349 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2350 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2351 	atmel_port->tx_stopped = false;
2352 
2353 	/* restore interrupts */
2354 	atmel_uart_writel(port, ATMEL_US_IER, imr);
2355 
2356 	/* CTS flow-control and modem-status interrupts */
2357 	if (UART_ENABLE_MS(port, termios->c_cflag))
2358 		atmel_enable_ms(port);
2359 	else
2360 		atmel_disable_ms(port);
2361 
2362 	uart_port_unlock_irqrestore(port, flags);
2363 }
2364 
atmel_set_ldisc(struct uart_port * port,struct ktermios * termios)2365 static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios)
2366 {
2367 	if (termios->c_line == N_PPS) {
2368 		port->flags |= UPF_HARDPPS_CD;
2369 		uart_port_lock_irq(port);
2370 		atmel_enable_ms(port);
2371 		uart_port_unlock_irq(port);
2372 	} else {
2373 		port->flags &= ~UPF_HARDPPS_CD;
2374 		if (!UART_ENABLE_MS(port, termios->c_cflag)) {
2375 			uart_port_lock_irq(port);
2376 			atmel_disable_ms(port);
2377 			uart_port_unlock_irq(port);
2378 		}
2379 	}
2380 }
2381 
2382 /*
2383  * Return string describing the specified port
2384  */
atmel_type(struct uart_port * port)2385 static const char *atmel_type(struct uart_port *port)
2386 {
2387 	return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL;
2388 }
2389 
2390 /*
2391  * Release the memory region(s) being used by 'port'.
2392  */
atmel_release_port(struct uart_port * port)2393 static void atmel_release_port(struct uart_port *port)
2394 {
2395 	struct platform_device *mpdev = to_platform_device(port->dev->parent);
2396 	int size = resource_size(mpdev->resource);
2397 
2398 	release_mem_region(port->mapbase, size);
2399 
2400 	if (port->flags & UPF_IOREMAP) {
2401 		iounmap(port->membase);
2402 		port->membase = NULL;
2403 	}
2404 }
2405 
2406 /*
2407  * Request the memory region(s) being used by 'port'.
2408  */
atmel_request_port(struct uart_port * port)2409 static int atmel_request_port(struct uart_port *port)
2410 {
2411 	struct platform_device *mpdev = to_platform_device(port->dev->parent);
2412 
2413 	if (port->flags & UPF_IOREMAP) {
2414 		port->membase = devm_platform_ioremap_resource(mpdev, 0);
2415 		if (IS_ERR(port->membase))
2416 			return PTR_ERR(port->membase);
2417 	}
2418 
2419 	return 0;
2420 }
2421 
2422 /*
2423  * Configure/autoconfigure the port.
2424  */
atmel_config_port(struct uart_port * port,int flags)2425 static void atmel_config_port(struct uart_port *port, int flags)
2426 {
2427 	if (flags & UART_CONFIG_TYPE) {
2428 		port->type = PORT_ATMEL;
2429 		atmel_request_port(port);
2430 	}
2431 }
2432 
2433 /*
2434  * Verify the new serial_struct (for TIOCSSERIAL).
2435  */
atmel_verify_port(struct uart_port * port,struct serial_struct * ser)2436 static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
2437 {
2438 	int ret = 0;
2439 	if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL)
2440 		ret = -EINVAL;
2441 	if (port->irq != ser->irq)
2442 		ret = -EINVAL;
2443 	if (ser->io_type != SERIAL_IO_MEM)
2444 		ret = -EINVAL;
2445 	if (port->uartclk / 16 != ser->baud_base)
2446 		ret = -EINVAL;
2447 	if (port->mapbase != (unsigned long)ser->iomem_base)
2448 		ret = -EINVAL;
2449 	if (port->iobase != ser->port)
2450 		ret = -EINVAL;
2451 	if (ser->hub6 != 0)
2452 		ret = -EINVAL;
2453 	return ret;
2454 }
2455 
2456 #ifdef CONFIG_CONSOLE_POLL
atmel_poll_get_char(struct uart_port * port)2457 static int atmel_poll_get_char(struct uart_port *port)
2458 {
2459 	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY))
2460 		cpu_relax();
2461 
2462 	return atmel_uart_read_char(port);
2463 }
2464 
atmel_poll_put_char(struct uart_port * port,unsigned char ch)2465 static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
2466 {
2467 	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2468 		cpu_relax();
2469 
2470 	atmel_uart_write_char(port, ch);
2471 }
2472 #endif
2473 
2474 static const struct uart_ops atmel_pops = {
2475 	.tx_empty	= atmel_tx_empty,
2476 	.set_mctrl	= atmel_set_mctrl,
2477 	.get_mctrl	= atmel_get_mctrl,
2478 	.stop_tx	= atmel_stop_tx,
2479 	.start_tx	= atmel_start_tx,
2480 	.stop_rx	= atmel_stop_rx,
2481 	.enable_ms	= atmel_enable_ms,
2482 	.break_ctl	= atmel_break_ctl,
2483 	.startup	= atmel_startup,
2484 	.shutdown	= atmel_shutdown,
2485 	.flush_buffer	= atmel_flush_buffer,
2486 	.set_termios	= atmel_set_termios,
2487 	.set_ldisc	= atmel_set_ldisc,
2488 	.type		= atmel_type,
2489 	.release_port	= atmel_release_port,
2490 	.request_port	= atmel_request_port,
2491 	.config_port	= atmel_config_port,
2492 	.verify_port	= atmel_verify_port,
2493 	.pm		= atmel_serial_pm,
2494 #ifdef CONFIG_CONSOLE_POLL
2495 	.poll_get_char	= atmel_poll_get_char,
2496 	.poll_put_char	= atmel_poll_put_char,
2497 #endif
2498 };
2499 
2500 static const struct serial_rs485 atmel_rs485_supported = {
2501 	.flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX,
2502 	.delay_rts_before_send = 1,
2503 	.delay_rts_after_send = 1,
2504 };
2505 
2506 /*
2507  * Configure the port from the platform device resource info.
2508  */
atmel_init_port(struct atmel_uart_port * atmel_port,struct platform_device * pdev)2509 static int atmel_init_port(struct atmel_uart_port *atmel_port,
2510 				      struct platform_device *pdev)
2511 {
2512 	int ret;
2513 	struct uart_port *port = &atmel_port->uart;
2514 	struct platform_device *mpdev = to_platform_device(pdev->dev.parent);
2515 
2516 	atmel_init_property(atmel_port, pdev);
2517 	atmel_set_ops(port);
2518 
2519 	port->iotype		= UPIO_MEM;
2520 	port->flags		= UPF_BOOT_AUTOCONF | UPF_IOREMAP;
2521 	port->ops		= &atmel_pops;
2522 	port->fifosize		= 1;
2523 	port->dev		= &pdev->dev;
2524 	port->mapbase		= mpdev->resource[0].start;
2525 	port->irq		= platform_get_irq(mpdev, 0);
2526 	port->rs485_config	= atmel_config_rs485;
2527 	port->rs485_supported	= atmel_rs485_supported;
2528 	port->iso7816_config	= atmel_config_iso7816;
2529 	port->membase		= NULL;
2530 
2531 	memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
2532 
2533 	ret = uart_get_rs485_mode(port);
2534 	if (ret)
2535 		return ret;
2536 
2537 	port->uartclk = clk_get_rate(atmel_port->clk);
2538 
2539 	/*
2540 	 * Use TXEMPTY for interrupt when rs485 or ISO7816 else TXRDY or
2541 	 * ENDTX|TXBUFE
2542 	 */
2543 	if (atmel_uart_is_half_duplex(port))
2544 		atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
2545 	else if (atmel_use_pdc_tx(port)) {
2546 		port->fifosize = PDC_BUFFER_SIZE;
2547 		atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE;
2548 	} else {
2549 		atmel_port->tx_done_mask = ATMEL_US_TXRDY;
2550 	}
2551 
2552 	return 0;
2553 }
2554 
2555 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
atmel_console_putchar(struct uart_port * port,unsigned char ch)2556 static void atmel_console_putchar(struct uart_port *port, unsigned char ch)
2557 {
2558 	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2559 		cpu_relax();
2560 	atmel_uart_write_char(port, ch);
2561 }
2562 
2563 /*
2564  * Interrupts are disabled on entering
2565  */
atmel_console_write(struct console * co,const char * s,u_int count)2566 static void atmel_console_write(struct console *co, const char *s, u_int count)
2567 {
2568 	struct uart_port *port = &atmel_ports[co->index].uart;
2569 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2570 	unsigned int status, imr;
2571 	unsigned int pdc_tx;
2572 
2573 	/*
2574 	 * First, save IMR and then disable interrupts
2575 	 */
2576 	imr = atmel_uart_readl(port, ATMEL_US_IMR);
2577 	atmel_uart_writel(port, ATMEL_US_IDR,
2578 			  ATMEL_US_RXRDY | atmel_port->tx_done_mask);
2579 
2580 	/* Store PDC transmit status and disable it */
2581 	pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
2582 	atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
2583 
2584 	/* Make sure that tx path is actually able to send characters */
2585 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
2586 	atmel_port->tx_stopped = false;
2587 
2588 	uart_console_write(port, s, count, atmel_console_putchar);
2589 
2590 	/*
2591 	 * Finally, wait for transmitter to become empty
2592 	 * and restore IMR
2593 	 */
2594 	do {
2595 		status = atmel_uart_readl(port, ATMEL_US_CSR);
2596 	} while (!(status & ATMEL_US_TXRDY));
2597 
2598 	/* Restore PDC transmit status */
2599 	if (pdc_tx)
2600 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
2601 
2602 	/* set interrupts back the way they were */
2603 	atmel_uart_writel(port, ATMEL_US_IER, imr);
2604 }
2605 
2606 /*
2607  * If the port was already initialised (eg, by a boot loader),
2608  * try to determine the current setup.
2609  */
atmel_console_get_options(struct uart_port * port,int * baud,int * parity,int * bits)2610 static void __init atmel_console_get_options(struct uart_port *port, int *baud,
2611 					     int *parity, int *bits)
2612 {
2613 	unsigned int mr, quot;
2614 
2615 	/*
2616 	 * If the baud rate generator isn't running, the port wasn't
2617 	 * initialized by the boot loader.
2618 	 */
2619 	quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD;
2620 	if (!quot)
2621 		return;
2622 
2623 	mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL;
2624 	if (mr == ATMEL_US_CHRL_8)
2625 		*bits = 8;
2626 	else
2627 		*bits = 7;
2628 
2629 	mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR;
2630 	if (mr == ATMEL_US_PAR_EVEN)
2631 		*parity = 'e';
2632 	else if (mr == ATMEL_US_PAR_ODD)
2633 		*parity = 'o';
2634 
2635 	*baud = port->uartclk / (16 * quot);
2636 }
2637 
atmel_console_setup(struct console * co,char * options)2638 static int __init atmel_console_setup(struct console *co, char *options)
2639 {
2640 	struct uart_port *port = &atmel_ports[co->index].uart;
2641 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2642 	int baud = 115200;
2643 	int bits = 8;
2644 	int parity = 'n';
2645 	int flow = 'n';
2646 
2647 	if (port->membase == NULL) {
2648 		/* Port not initialized yet - delay setup */
2649 		return -ENODEV;
2650 	}
2651 
2652 	atmel_uart_writel(port, ATMEL_US_IDR, -1);
2653 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2654 	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2655 	atmel_port->tx_stopped = false;
2656 
2657 	if (options)
2658 		uart_parse_options(options, &baud, &parity, &bits, &flow);
2659 	else
2660 		atmel_console_get_options(port, &baud, &parity, &bits);
2661 
2662 	return uart_set_options(port, co, baud, parity, bits, flow);
2663 }
2664 
2665 static struct uart_driver atmel_uart;
2666 
2667 static struct console atmel_console = {
2668 	.name		= ATMEL_DEVICENAME,
2669 	.write		= atmel_console_write,
2670 	.device		= uart_console_device,
2671 	.setup		= atmel_console_setup,
2672 	.flags		= CON_PRINTBUFFER,
2673 	.index		= -1,
2674 	.data		= &atmel_uart,
2675 };
2676 
atmel_serial_early_write(struct console * con,const char * s,unsigned int n)2677 static void atmel_serial_early_write(struct console *con, const char *s,
2678 				     unsigned int n)
2679 {
2680 	struct earlycon_device *dev = con->data;
2681 
2682 	uart_console_write(&dev->port, s, n, atmel_console_putchar);
2683 }
2684 
atmel_early_console_setup(struct earlycon_device * device,const char * options)2685 static int __init atmel_early_console_setup(struct earlycon_device *device,
2686 					    const char *options)
2687 {
2688 	if (!device->port.membase)
2689 		return -ENODEV;
2690 
2691 	device->con->write = atmel_serial_early_write;
2692 
2693 	return 0;
2694 }
2695 
2696 OF_EARLYCON_DECLARE(atmel_serial, "atmel,at91rm9200-usart",
2697 		    atmel_early_console_setup);
2698 OF_EARLYCON_DECLARE(atmel_serial, "atmel,at91sam9260-usart",
2699 		    atmel_early_console_setup);
2700 
2701 #define ATMEL_CONSOLE_DEVICE	(&atmel_console)
2702 
2703 #else
2704 #define ATMEL_CONSOLE_DEVICE	NULL
2705 #endif
2706 
2707 static struct uart_driver atmel_uart = {
2708 	.owner		= THIS_MODULE,
2709 	.driver_name	= "atmel_serial",
2710 	.dev_name	= ATMEL_DEVICENAME,
2711 	.major		= SERIAL_ATMEL_MAJOR,
2712 	.minor		= MINOR_START,
2713 	.nr		= ATMEL_MAX_UART,
2714 	.cons		= ATMEL_CONSOLE_DEVICE,
2715 };
2716 
atmel_serial_clk_will_stop(void)2717 static bool atmel_serial_clk_will_stop(void)
2718 {
2719 #ifdef CONFIG_ARCH_AT91
2720 	return at91_suspend_entering_slow_clock();
2721 #else
2722 	return false;
2723 #endif
2724 }
2725 
atmel_serial_suspend(struct device * dev)2726 static int __maybe_unused atmel_serial_suspend(struct device *dev)
2727 {
2728 	struct uart_port *port = dev_get_drvdata(dev);
2729 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2730 
2731 	if (uart_console(port) && console_suspend_enabled) {
2732 		/* Drain the TX shifter */
2733 		while (!(atmel_uart_readl(port, ATMEL_US_CSR) &
2734 			 ATMEL_US_TXEMPTY))
2735 			cpu_relax();
2736 	}
2737 
2738 	if (uart_console(port) && !console_suspend_enabled) {
2739 		/* Cache register values as we won't get a full shutdown/startup
2740 		 * cycle
2741 		 */
2742 		atmel_port->cache.mr = atmel_uart_readl(port, ATMEL_US_MR);
2743 		atmel_port->cache.imr = atmel_uart_readl(port, ATMEL_US_IMR);
2744 		atmel_port->cache.brgr = atmel_uart_readl(port, ATMEL_US_BRGR);
2745 		atmel_port->cache.rtor = atmel_uart_readl(port,
2746 							  atmel_port->rtor);
2747 		atmel_port->cache.ttgr = atmel_uart_readl(port, ATMEL_US_TTGR);
2748 		atmel_port->cache.fmr = atmel_uart_readl(port, ATMEL_US_FMR);
2749 		atmel_port->cache.fimr = atmel_uart_readl(port, ATMEL_US_FIMR);
2750 	}
2751 
2752 	/* we can not wake up if we're running on slow clock */
2753 	atmel_port->may_wakeup = device_may_wakeup(dev);
2754 	if (atmel_serial_clk_will_stop()) {
2755 		unsigned long flags;
2756 
2757 		spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2758 		atmel_port->suspended = true;
2759 		spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2760 		device_set_wakeup_enable(dev, 0);
2761 	}
2762 
2763 	uart_suspend_port(&atmel_uart, port);
2764 
2765 	return 0;
2766 }
2767 
atmel_serial_resume(struct device * dev)2768 static int __maybe_unused atmel_serial_resume(struct device *dev)
2769 {
2770 	struct uart_port *port = dev_get_drvdata(dev);
2771 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2772 	unsigned long flags;
2773 
2774 	if (uart_console(port) && !console_suspend_enabled) {
2775 		atmel_uart_writel(port, ATMEL_US_MR, atmel_port->cache.mr);
2776 		atmel_uart_writel(port, ATMEL_US_IER, atmel_port->cache.imr);
2777 		atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->cache.brgr);
2778 		atmel_uart_writel(port, atmel_port->rtor,
2779 				  atmel_port->cache.rtor);
2780 		atmel_uart_writel(port, ATMEL_US_TTGR, atmel_port->cache.ttgr);
2781 
2782 		if (atmel_port->fifo_size) {
2783 			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_FIFOEN |
2784 					  ATMEL_US_RXFCLR | ATMEL_US_TXFLCLR);
2785 			atmel_uart_writel(port, ATMEL_US_FMR,
2786 					  atmel_port->cache.fmr);
2787 			atmel_uart_writel(port, ATMEL_US_FIER,
2788 					  atmel_port->cache.fimr);
2789 		}
2790 		atmel_start_rx(port);
2791 	}
2792 
2793 	spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2794 	if (atmel_port->pending) {
2795 		atmel_handle_receive(port, atmel_port->pending);
2796 		atmel_handle_status(port, atmel_port->pending,
2797 				    atmel_port->pending_status);
2798 		atmel_handle_transmit(port, atmel_port->pending);
2799 		atmel_port->pending = 0;
2800 	}
2801 	atmel_port->suspended = false;
2802 	spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2803 
2804 	uart_resume_port(&atmel_uart, port);
2805 	device_set_wakeup_enable(dev, atmel_port->may_wakeup);
2806 
2807 	return 0;
2808 }
2809 
atmel_serial_probe_fifos(struct atmel_uart_port * atmel_port,struct platform_device * pdev)2810 static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port,
2811 				     struct platform_device *pdev)
2812 {
2813 	atmel_port->fifo_size = 0;
2814 	atmel_port->rts_low = 0;
2815 	atmel_port->rts_high = 0;
2816 
2817 	if (of_property_read_u32(pdev->dev.of_node,
2818 				 "atmel,fifo-size",
2819 				 &atmel_port->fifo_size))
2820 		return;
2821 
2822 	if (!atmel_port->fifo_size)
2823 		return;
2824 
2825 	if (atmel_port->fifo_size < ATMEL_MIN_FIFO_SIZE) {
2826 		atmel_port->fifo_size = 0;
2827 		dev_err(&pdev->dev, "Invalid FIFO size\n");
2828 		return;
2829 	}
2830 
2831 	/*
2832 	 * 0 <= rts_low <= rts_high <= fifo_size
2833 	 * Once their CTS line asserted by the remote peer, some x86 UARTs tend
2834 	 * to flush their internal TX FIFO, commonly up to 16 data, before
2835 	 * actually stopping to send new data. So we try to set the RTS High
2836 	 * Threshold to a reasonably high value respecting this 16 data
2837 	 * empirical rule when possible.
2838 	 */
2839 	atmel_port->rts_high = max_t(int, atmel_port->fifo_size >> 1,
2840 			       atmel_port->fifo_size - ATMEL_RTS_HIGH_OFFSET);
2841 	atmel_port->rts_low  = max_t(int, atmel_port->fifo_size >> 2,
2842 			       atmel_port->fifo_size - ATMEL_RTS_LOW_OFFSET);
2843 
2844 	dev_info(&pdev->dev, "Using FIFO (%u data)\n",
2845 		 atmel_port->fifo_size);
2846 	dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n",
2847 		atmel_port->rts_high);
2848 	dev_dbg(&pdev->dev, "RTS Low Threshold  : %2u data\n",
2849 		atmel_port->rts_low);
2850 }
2851 
atmel_serial_probe(struct platform_device * pdev)2852 static int atmel_serial_probe(struct platform_device *pdev)
2853 {
2854 	struct atmel_uart_port *atmel_port;
2855 	struct device_node *np = pdev->dev.parent->of_node;
2856 	void *data;
2857 	int ret;
2858 	bool rs485_enabled;
2859 
2860 	BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
2861 
2862 	/*
2863 	 * In device tree there is no node with "atmel,at91rm9200-usart-serial"
2864 	 * as compatible string. This driver is probed by at91-usart mfd driver
2865 	 * which is just a wrapper over the atmel_serial driver and
2866 	 * spi-at91-usart driver. All attributes needed by this driver are
2867 	 * found in of_node of parent.
2868 	 */
2869 	pdev->dev.of_node = np;
2870 
2871 	ret = of_alias_get_id(np, "serial");
2872 	if (ret < 0)
2873 		/* port id not found in platform data nor device-tree aliases:
2874 		 * auto-enumerate it */
2875 		ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART);
2876 
2877 	if (ret >= ATMEL_MAX_UART) {
2878 		ret = -ENODEV;
2879 		goto err;
2880 	}
2881 
2882 	if (test_and_set_bit(ret, atmel_ports_in_use)) {
2883 		/* port already in use */
2884 		ret = -EBUSY;
2885 		goto err;
2886 	}
2887 
2888 	atmel_port = &atmel_ports[ret];
2889 	atmel_port->backup_imr = 0;
2890 	atmel_port->uart.line = ret;
2891 	atmel_port->uart.has_sysrq = IS_ENABLED(CONFIG_SERIAL_ATMEL_CONSOLE);
2892 	atmel_serial_probe_fifos(atmel_port, pdev);
2893 
2894 	atomic_set(&atmel_port->tasklet_shutdown, 0);
2895 	spin_lock_init(&atmel_port->lock_suspended);
2896 
2897 	atmel_port->clk = devm_clk_get(&pdev->dev, "usart");
2898 	if (IS_ERR(atmel_port->clk)) {
2899 		ret = PTR_ERR(atmel_port->clk);
2900 		goto err;
2901 	}
2902 	ret = clk_prepare_enable(atmel_port->clk);
2903 	if (ret)
2904 		goto err;
2905 
2906 	atmel_port->gclk = devm_clk_get_optional(&pdev->dev, "gclk");
2907 	if (IS_ERR(atmel_port->gclk)) {
2908 		ret = PTR_ERR(atmel_port->gclk);
2909 		goto err_clk_disable_unprepare;
2910 	}
2911 
2912 	ret = atmel_init_port(atmel_port, pdev);
2913 	if (ret)
2914 		goto err_clk_disable_unprepare;
2915 
2916 	atmel_port->gpios = mctrl_gpio_init(&atmel_port->uart, 0);
2917 	if (IS_ERR(atmel_port->gpios)) {
2918 		ret = PTR_ERR(atmel_port->gpios);
2919 		goto err_clk_disable_unprepare;
2920 	}
2921 
2922 	if (!atmel_use_pdc_rx(&atmel_port->uart)) {
2923 		ret = -ENOMEM;
2924 		data = kmalloc(ATMEL_SERIAL_RX_SIZE, GFP_KERNEL);
2925 		if (!data)
2926 			goto err_clk_disable_unprepare;
2927 		atmel_port->rx_ring.buf = data;
2928 	}
2929 
2930 	rs485_enabled = atmel_port->uart.rs485.flags & SER_RS485_ENABLED;
2931 
2932 	ret = uart_add_one_port(&atmel_uart, &atmel_port->uart);
2933 	if (ret)
2934 		goto err_add_port;
2935 
2936 	device_init_wakeup(&pdev->dev, 1);
2937 	platform_set_drvdata(pdev, atmel_port);
2938 
2939 	if (rs485_enabled) {
2940 		atmel_uart_writel(&atmel_port->uart, ATMEL_US_MR,
2941 				  ATMEL_US_USMODE_NORMAL);
2942 		atmel_uart_writel(&atmel_port->uart, ATMEL_US_CR,
2943 				  ATMEL_US_RTSEN);
2944 	}
2945 
2946 	/*
2947 	 * Get port name of usart or uart
2948 	 */
2949 	atmel_get_ip_name(&atmel_port->uart);
2950 
2951 	/*
2952 	 * The peripheral clock can now safely be disabled till the port
2953 	 * is used
2954 	 */
2955 	clk_disable_unprepare(atmel_port->clk);
2956 
2957 	return 0;
2958 
2959 err_add_port:
2960 	kfree(atmel_port->rx_ring.buf);
2961 	atmel_port->rx_ring.buf = NULL;
2962 err_clk_disable_unprepare:
2963 	clk_disable_unprepare(atmel_port->clk);
2964 	clear_bit(atmel_port->uart.line, atmel_ports_in_use);
2965 err:
2966 	return ret;
2967 }
2968 
2969 /*
2970  * Even if the driver is not modular, it makes sense to be able to
2971  * unbind a device: there can be many bound devices, and there are
2972  * situations where dynamic binding and unbinding can be useful.
2973  *
2974  * For example, a connected device can require a specific firmware update
2975  * protocol that needs bitbanging on IO lines, but use the regular serial
2976  * port in the normal case.
2977  */
atmel_serial_remove(struct platform_device * pdev)2978 static void atmel_serial_remove(struct platform_device *pdev)
2979 {
2980 	struct uart_port *port = platform_get_drvdata(pdev);
2981 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2982 
2983 	tasklet_kill(&atmel_port->tasklet_rx);
2984 	tasklet_kill(&atmel_port->tasklet_tx);
2985 
2986 	device_init_wakeup(&pdev->dev, 0);
2987 
2988 	uart_remove_one_port(&atmel_uart, port);
2989 
2990 	kfree(atmel_port->rx_ring.buf);
2991 
2992 	/* "port" is allocated statically, so we shouldn't free it */
2993 
2994 	clear_bit(port->line, atmel_ports_in_use);
2995 
2996 	pdev->dev.of_node = NULL;
2997 }
2998 
2999 static SIMPLE_DEV_PM_OPS(atmel_serial_pm_ops, atmel_serial_suspend,
3000 			 atmel_serial_resume);
3001 
3002 static struct platform_driver atmel_serial_driver = {
3003 	.probe		= atmel_serial_probe,
3004 	.remove		= atmel_serial_remove,
3005 	.driver		= {
3006 		.name			= "atmel_usart_serial",
3007 		.of_match_table		= of_match_ptr(atmel_serial_dt_ids),
3008 		.pm			= pm_ptr(&atmel_serial_pm_ops),
3009 	},
3010 };
3011 
atmel_serial_init(void)3012 static int __init atmel_serial_init(void)
3013 {
3014 	int ret;
3015 
3016 	ret = uart_register_driver(&atmel_uart);
3017 	if (ret)
3018 		return ret;
3019 
3020 	ret = platform_driver_register(&atmel_serial_driver);
3021 	if (ret)
3022 		uart_unregister_driver(&atmel_uart);
3023 
3024 	return ret;
3025 }
3026 device_initcall(atmel_serial_init);
3027