xref: /linux/drivers/tty/serial/8250/8250_dma.c (revision f3a8b6645dc2e60d11f20c1c23afd964ff4e55ae)
1 /*
2  * 8250_dma.c - DMA Engine API support for 8250.c
3  *
4  * Copyright (C) 2013 Intel Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11 #include <linux/tty.h>
12 #include <linux/tty_flip.h>
13 #include <linux/serial_reg.h>
14 #include <linux/dma-mapping.h>
15 
16 #include "8250.h"
17 
18 static void __dma_tx_complete(void *param)
19 {
20 	struct uart_8250_port	*p = param;
21 	struct uart_8250_dma	*dma = p->dma;
22 	struct circ_buf		*xmit = &p->port.state->xmit;
23 	unsigned long	flags;
24 	int		ret;
25 
26 	dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
27 				UART_XMIT_SIZE, DMA_TO_DEVICE);
28 
29 	spin_lock_irqsave(&p->port.lock, flags);
30 
31 	dma->tx_running = 0;
32 
33 	xmit->tail += dma->tx_size;
34 	xmit->tail &= UART_XMIT_SIZE - 1;
35 	p->port.icount.tx += dma->tx_size;
36 
37 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
38 		uart_write_wakeup(&p->port);
39 
40 	ret = serial8250_tx_dma(p);
41 	if (ret) {
42 		p->ier |= UART_IER_THRI;
43 		serial_port_out(&p->port, UART_IER, p->ier);
44 	}
45 
46 	spin_unlock_irqrestore(&p->port.lock, flags);
47 }
48 
49 static void __dma_rx_complete(void *param)
50 {
51 	struct uart_8250_port	*p = param;
52 	struct uart_8250_dma	*dma = p->dma;
53 	struct tty_port		*tty_port = &p->port.state->port;
54 	struct dma_tx_state	state;
55 	int			count;
56 
57 	dma->rx_running = 0;
58 	dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
59 
60 	count = dma->rx_size - state.residue;
61 
62 	tty_insert_flip_string(tty_port, dma->rx_buf, count);
63 	p->port.icount.rx += count;
64 
65 	tty_flip_buffer_push(tty_port);
66 }
67 
68 int serial8250_tx_dma(struct uart_8250_port *p)
69 {
70 	struct uart_8250_dma		*dma = p->dma;
71 	struct circ_buf			*xmit = &p->port.state->xmit;
72 	struct dma_async_tx_descriptor	*desc;
73 	int ret;
74 
75 	if (uart_tx_stopped(&p->port) || dma->tx_running ||
76 	    uart_circ_empty(xmit))
77 		return 0;
78 
79 	dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
80 
81 	desc = dmaengine_prep_slave_single(dma->txchan,
82 					   dma->tx_addr + xmit->tail,
83 					   dma->tx_size, DMA_MEM_TO_DEV,
84 					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
85 	if (!desc) {
86 		ret = -EBUSY;
87 		goto err;
88 	}
89 
90 	dma->tx_running = 1;
91 	desc->callback = __dma_tx_complete;
92 	desc->callback_param = p;
93 
94 	dma->tx_cookie = dmaengine_submit(desc);
95 
96 	dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
97 				   UART_XMIT_SIZE, DMA_TO_DEVICE);
98 
99 	dma_async_issue_pending(dma->txchan);
100 	if (dma->tx_err) {
101 		dma->tx_err = 0;
102 		if (p->ier & UART_IER_THRI) {
103 			p->ier &= ~UART_IER_THRI;
104 			serial_out(p, UART_IER, p->ier);
105 		}
106 	}
107 	return 0;
108 err:
109 	dma->tx_err = 1;
110 	return ret;
111 }
112 
113 int serial8250_rx_dma(struct uart_8250_port *p)
114 {
115 	struct uart_8250_dma		*dma = p->dma;
116 	struct dma_async_tx_descriptor	*desc;
117 
118 	if (dma->rx_running)
119 		return 0;
120 
121 	desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
122 					   dma->rx_size, DMA_DEV_TO_MEM,
123 					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
124 	if (!desc)
125 		return -EBUSY;
126 
127 	dma->rx_running = 1;
128 	desc->callback = __dma_rx_complete;
129 	desc->callback_param = p;
130 
131 	dma->rx_cookie = dmaengine_submit(desc);
132 
133 	dma_async_issue_pending(dma->rxchan);
134 
135 	return 0;
136 }
137 
138 void serial8250_rx_dma_flush(struct uart_8250_port *p)
139 {
140 	struct uart_8250_dma *dma = p->dma;
141 
142 	if (dma->rx_running) {
143 		dmaengine_pause(dma->rxchan);
144 		__dma_rx_complete(p);
145 		dmaengine_terminate_async(dma->rxchan);
146 	}
147 }
148 EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush);
149 
150 int serial8250_request_dma(struct uart_8250_port *p)
151 {
152 	struct uart_8250_dma	*dma = p->dma;
153 	phys_addr_t rx_dma_addr = dma->rx_dma_addr ?
154 				  dma->rx_dma_addr : p->port.mapbase;
155 	phys_addr_t tx_dma_addr = dma->tx_dma_addr ?
156 				  dma->tx_dma_addr : p->port.mapbase;
157 	dma_cap_mask_t		mask;
158 	struct dma_slave_caps	caps;
159 	int			ret;
160 
161 	/* Default slave configuration parameters */
162 	dma->rxconf.direction		= DMA_DEV_TO_MEM;
163 	dma->rxconf.src_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
164 	dma->rxconf.src_addr		= rx_dma_addr + UART_RX;
165 
166 	dma->txconf.direction		= DMA_MEM_TO_DEV;
167 	dma->txconf.dst_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
168 	dma->txconf.dst_addr		= tx_dma_addr + UART_TX;
169 
170 	dma_cap_zero(mask);
171 	dma_cap_set(DMA_SLAVE, mask);
172 
173 	/* Get a channel for RX */
174 	dma->rxchan = dma_request_slave_channel_compat(mask,
175 						       dma->fn, dma->rx_param,
176 						       p->port.dev, "rx");
177 	if (!dma->rxchan)
178 		return -ENODEV;
179 
180 	/* 8250 rx dma requires dmaengine driver to support pause/terminate */
181 	ret = dma_get_slave_caps(dma->rxchan, &caps);
182 	if (ret)
183 		goto release_rx;
184 	if (!caps.cmd_pause || !caps.cmd_terminate ||
185 	    caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
186 		ret = -EINVAL;
187 		goto release_rx;
188 	}
189 
190 	dmaengine_slave_config(dma->rxchan, &dma->rxconf);
191 
192 	/* Get a channel for TX */
193 	dma->txchan = dma_request_slave_channel_compat(mask,
194 						       dma->fn, dma->tx_param,
195 						       p->port.dev, "tx");
196 	if (!dma->txchan) {
197 		ret = -ENODEV;
198 		goto release_rx;
199 	}
200 
201 	/* 8250 tx dma requires dmaengine driver to support terminate */
202 	ret = dma_get_slave_caps(dma->txchan, &caps);
203 	if (ret)
204 		goto err;
205 	if (!caps.cmd_terminate) {
206 		ret = -EINVAL;
207 		goto err;
208 	}
209 
210 	dmaengine_slave_config(dma->txchan, &dma->txconf);
211 
212 	/* RX buffer */
213 	if (!dma->rx_size)
214 		dma->rx_size = PAGE_SIZE;
215 
216 	dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
217 					&dma->rx_addr, GFP_KERNEL);
218 	if (!dma->rx_buf) {
219 		ret = -ENOMEM;
220 		goto err;
221 	}
222 
223 	/* TX buffer */
224 	dma->tx_addr = dma_map_single(dma->txchan->device->dev,
225 					p->port.state->xmit.buf,
226 					UART_XMIT_SIZE,
227 					DMA_TO_DEVICE);
228 	if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
229 		dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
230 				  dma->rx_buf, dma->rx_addr);
231 		ret = -ENOMEM;
232 		goto err;
233 	}
234 
235 	dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
236 
237 	return 0;
238 err:
239 	dma_release_channel(dma->txchan);
240 release_rx:
241 	dma_release_channel(dma->rxchan);
242 	return ret;
243 }
244 EXPORT_SYMBOL_GPL(serial8250_request_dma);
245 
246 void serial8250_release_dma(struct uart_8250_port *p)
247 {
248 	struct uart_8250_dma *dma = p->dma;
249 
250 	if (!dma)
251 		return;
252 
253 	/* Release RX resources */
254 	dmaengine_terminate_sync(dma->rxchan);
255 	dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf,
256 			  dma->rx_addr);
257 	dma_release_channel(dma->rxchan);
258 	dma->rxchan = NULL;
259 
260 	/* Release TX resources */
261 	dmaengine_terminate_sync(dma->txchan);
262 	dma_unmap_single(dma->txchan->device->dev, dma->tx_addr,
263 			 UART_XMIT_SIZE, DMA_TO_DEVICE);
264 	dma_release_channel(dma->txchan);
265 	dma->txchan = NULL;
266 	dma->tx_running = 0;
267 
268 	dev_dbg_ratelimited(p->port.dev, "dma channels released\n");
269 }
270 EXPORT_SYMBOL_GPL(serial8250_release_dma);
271