1 /* 2 * 8250_dma.c - DMA Engine API support for 8250.c 3 * 4 * Copyright (C) 2013 Intel Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 #include <linux/tty.h> 12 #include <linux/tty_flip.h> 13 #include <linux/serial_reg.h> 14 #include <linux/dma-mapping.h> 15 16 #include "8250.h" 17 18 static void __dma_tx_complete(void *param) 19 { 20 struct uart_8250_port *p = param; 21 struct uart_8250_dma *dma = p->dma; 22 struct circ_buf *xmit = &p->port.state->xmit; 23 unsigned long flags; 24 int ret; 25 26 dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr, 27 UART_XMIT_SIZE, DMA_TO_DEVICE); 28 29 spin_lock_irqsave(&p->port.lock, flags); 30 31 dma->tx_running = 0; 32 33 xmit->tail += dma->tx_size; 34 xmit->tail &= UART_XMIT_SIZE - 1; 35 p->port.icount.tx += dma->tx_size; 36 37 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 38 uart_write_wakeup(&p->port); 39 40 ret = serial8250_tx_dma(p); 41 if (ret) { 42 p->ier |= UART_IER_THRI; 43 serial_port_out(&p->port, UART_IER, p->ier); 44 } 45 46 spin_unlock_irqrestore(&p->port.lock, flags); 47 } 48 49 static void __dma_rx_complete(void *param) 50 { 51 struct uart_8250_port *p = param; 52 struct uart_8250_dma *dma = p->dma; 53 struct tty_port *tty_port = &p->port.state->port; 54 struct dma_tx_state state; 55 int count; 56 57 dma->rx_running = 0; 58 dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state); 59 60 count = dma->rx_size - state.residue; 61 62 tty_insert_flip_string(tty_port, dma->rx_buf, count); 63 p->port.icount.rx += count; 64 65 tty_flip_buffer_push(tty_port); 66 } 67 68 int serial8250_tx_dma(struct uart_8250_port *p) 69 { 70 struct uart_8250_dma *dma = p->dma; 71 struct circ_buf *xmit = &p->port.state->xmit; 72 struct dma_async_tx_descriptor *desc; 73 int ret; 74 75 if (dma->tx_running) 76 return 0; 77 78 if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) { 79 /* We have been called from __dma_tx_complete() */ 80 serial8250_rpm_put_tx(p); 81 return 0; 82 } 83 84 dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); 85 86 desc = dmaengine_prep_slave_single(dma->txchan, 87 dma->tx_addr + xmit->tail, 88 dma->tx_size, DMA_MEM_TO_DEV, 89 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 90 if (!desc) { 91 ret = -EBUSY; 92 goto err; 93 } 94 95 dma->tx_running = 1; 96 desc->callback = __dma_tx_complete; 97 desc->callback_param = p; 98 99 dma->tx_cookie = dmaengine_submit(desc); 100 101 dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr, 102 UART_XMIT_SIZE, DMA_TO_DEVICE); 103 104 dma_async_issue_pending(dma->txchan); 105 if (dma->tx_err) { 106 dma->tx_err = 0; 107 if (p->ier & UART_IER_THRI) { 108 p->ier &= ~UART_IER_THRI; 109 serial_out(p, UART_IER, p->ier); 110 } 111 } 112 return 0; 113 err: 114 dma->tx_err = 1; 115 return ret; 116 } 117 118 int serial8250_rx_dma(struct uart_8250_port *p) 119 { 120 struct uart_8250_dma *dma = p->dma; 121 struct dma_async_tx_descriptor *desc; 122 123 if (dma->rx_running) 124 return 0; 125 126 desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr, 127 dma->rx_size, DMA_DEV_TO_MEM, 128 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 129 if (!desc) 130 return -EBUSY; 131 132 dma->rx_running = 1; 133 desc->callback = __dma_rx_complete; 134 desc->callback_param = p; 135 136 dma->rx_cookie = dmaengine_submit(desc); 137 138 dma_async_issue_pending(dma->rxchan); 139 140 return 0; 141 } 142 143 void serial8250_rx_dma_flush(struct uart_8250_port *p) 144 { 145 struct uart_8250_dma *dma = p->dma; 146 147 if (dma->rx_running) { 148 dmaengine_pause(dma->rxchan); 149 __dma_rx_complete(p); 150 dmaengine_terminate_async(dma->rxchan); 151 } 152 } 153 EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush); 154 155 int serial8250_request_dma(struct uart_8250_port *p) 156 { 157 struct uart_8250_dma *dma = p->dma; 158 phys_addr_t rx_dma_addr = dma->rx_dma_addr ? 159 dma->rx_dma_addr : p->port.mapbase; 160 phys_addr_t tx_dma_addr = dma->tx_dma_addr ? 161 dma->tx_dma_addr : p->port.mapbase; 162 dma_cap_mask_t mask; 163 struct dma_slave_caps caps; 164 int ret; 165 166 /* Default slave configuration parameters */ 167 dma->rxconf.direction = DMA_DEV_TO_MEM; 168 dma->rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 169 dma->rxconf.src_addr = rx_dma_addr + UART_RX; 170 171 dma->txconf.direction = DMA_MEM_TO_DEV; 172 dma->txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 173 dma->txconf.dst_addr = tx_dma_addr + UART_TX; 174 175 dma_cap_zero(mask); 176 dma_cap_set(DMA_SLAVE, mask); 177 178 /* Get a channel for RX */ 179 dma->rxchan = dma_request_slave_channel_compat(mask, 180 dma->fn, dma->rx_param, 181 p->port.dev, "rx"); 182 if (!dma->rxchan) 183 return -ENODEV; 184 185 /* 8250 rx dma requires dmaengine driver to support pause/terminate */ 186 ret = dma_get_slave_caps(dma->rxchan, &caps); 187 if (ret) 188 goto release_rx; 189 if (!caps.cmd_pause || !caps.cmd_terminate || 190 caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR) { 191 ret = -EINVAL; 192 goto release_rx; 193 } 194 195 dmaengine_slave_config(dma->rxchan, &dma->rxconf); 196 197 /* Get a channel for TX */ 198 dma->txchan = dma_request_slave_channel_compat(mask, 199 dma->fn, dma->tx_param, 200 p->port.dev, "tx"); 201 if (!dma->txchan) { 202 ret = -ENODEV; 203 goto release_rx; 204 } 205 206 /* 8250 tx dma requires dmaengine driver to support terminate */ 207 ret = dma_get_slave_caps(dma->txchan, &caps); 208 if (ret) 209 goto err; 210 if (!caps.cmd_terminate) { 211 ret = -EINVAL; 212 goto err; 213 } 214 215 dmaengine_slave_config(dma->txchan, &dma->txconf); 216 217 /* RX buffer */ 218 if (!dma->rx_size) 219 dma->rx_size = PAGE_SIZE; 220 221 dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size, 222 &dma->rx_addr, GFP_KERNEL); 223 if (!dma->rx_buf) { 224 ret = -ENOMEM; 225 goto err; 226 } 227 228 /* TX buffer */ 229 dma->tx_addr = dma_map_single(dma->txchan->device->dev, 230 p->port.state->xmit.buf, 231 UART_XMIT_SIZE, 232 DMA_TO_DEVICE); 233 if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) { 234 dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, 235 dma->rx_buf, dma->rx_addr); 236 ret = -ENOMEM; 237 goto err; 238 } 239 240 dev_dbg_ratelimited(p->port.dev, "got both dma channels\n"); 241 242 return 0; 243 err: 244 dma_release_channel(dma->txchan); 245 release_rx: 246 dma_release_channel(dma->rxchan); 247 return ret; 248 } 249 EXPORT_SYMBOL_GPL(serial8250_request_dma); 250 251 void serial8250_release_dma(struct uart_8250_port *p) 252 { 253 struct uart_8250_dma *dma = p->dma; 254 255 if (!dma) 256 return; 257 258 /* Release RX resources */ 259 dmaengine_terminate_sync(dma->rxchan); 260 dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf, 261 dma->rx_addr); 262 dma_release_channel(dma->rxchan); 263 dma->rxchan = NULL; 264 265 /* Release TX resources */ 266 dmaengine_terminate_sync(dma->txchan); 267 dma_unmap_single(dma->txchan->device->dev, dma->tx_addr, 268 UART_XMIT_SIZE, DMA_TO_DEVICE); 269 dma_release_channel(dma->txchan); 270 dma->txchan = NULL; 271 dma->tx_running = 0; 272 273 dev_dbg_ratelimited(p->port.dev, "dma channels released\n"); 274 } 275 EXPORT_SYMBOL_GPL(serial8250_release_dma); 276