fsl_lpuart.c (0fdc50dfab47d525b71a9f0d8310746cdc0c09c5) | fsl_lpuart.c (c150c0f362c1e51c0e3216c9912b85b71d00e70d) |
---|---|
1// SPDX-License-Identifier: GPL-2.0+ 2/* 3 * Freescale lpuart serial port driver 4 * 5 * Copyright 2012-2014 Freescale Semiconductor, Inc. 6 */ 7 8#include <linux/clk.h> --- 220 unchanged lines hidden (view full) --- 229/* IMX lpuart has four extra unused regs located at the beginning */ 230#define IMX_REG_OFF 0x10 231 232static DEFINE_IDA(fsl_lpuart_ida); 233 234enum lpuart_type { 235 VF610_LPUART, 236 LS1021A_LPUART, | 1// SPDX-License-Identifier: GPL-2.0+ 2/* 3 * Freescale lpuart serial port driver 4 * 5 * Copyright 2012-2014 Freescale Semiconductor, Inc. 6 */ 7 8#include <linux/clk.h> --- 220 unchanged lines hidden (view full) --- 229/* IMX lpuart has four extra unused regs located at the beginning */ 230#define IMX_REG_OFF 0x10 231 232static DEFINE_IDA(fsl_lpuart_ida); 233 234enum lpuart_type { 235 VF610_LPUART, 236 LS1021A_LPUART, |
237 LS1028A_LPUART, |
|
237 IMX7ULP_LPUART, 238 IMX8QXP_LPUART, 239}; 240 241struct lpuart_port { 242 struct uart_port port; 243 enum lpuart_type devtype; 244 struct clk *ipg_clk; --- 28 unchanged lines hidden (view full) --- 273 u8 reg_off; 274}; 275 276static const struct lpuart_soc_data vf_data = { 277 .devtype = VF610_LPUART, 278 .iotype = UPIO_MEM, 279}; 280 | 238 IMX7ULP_LPUART, 239 IMX8QXP_LPUART, 240}; 241 242struct lpuart_port { 243 struct uart_port port; 244 enum lpuart_type devtype; 245 struct clk *ipg_clk; --- 28 unchanged lines hidden (view full) --- 274 u8 reg_off; 275}; 276 277static const struct lpuart_soc_data vf_data = { 278 .devtype = VF610_LPUART, 279 .iotype = UPIO_MEM, 280}; 281 |
281static const struct lpuart_soc_data ls_data = { | 282static const struct lpuart_soc_data ls1021a_data = { |
282 .devtype = LS1021A_LPUART, 283 .iotype = UPIO_MEM32BE, 284}; 285 | 283 .devtype = LS1021A_LPUART, 284 .iotype = UPIO_MEM32BE, 285}; 286 |
287static const struct lpuart_soc_data ls1028a_data = { 288 .devtype = LS1028A_LPUART, 289 .iotype = UPIO_MEM32, 290}; 291 |
|
286static struct lpuart_soc_data imx7ulp_data = { 287 .devtype = IMX7ULP_LPUART, 288 .iotype = UPIO_MEM32, 289 .reg_off = IMX_REG_OFF, 290}; 291 292static struct lpuart_soc_data imx8qxp_data = { 293 .devtype = IMX8QXP_LPUART, 294 .iotype = UPIO_MEM32, 295 .reg_off = IMX_REG_OFF, 296}; 297 298static const struct of_device_id lpuart_dt_ids[] = { 299 { .compatible = "fsl,vf610-lpuart", .data = &vf_data, }, | 292static struct lpuart_soc_data imx7ulp_data = { 293 .devtype = IMX7ULP_LPUART, 294 .iotype = UPIO_MEM32, 295 .reg_off = IMX_REG_OFF, 296}; 297 298static struct lpuart_soc_data imx8qxp_data = { 299 .devtype = IMX8QXP_LPUART, 300 .iotype = UPIO_MEM32, 301 .reg_off = IMX_REG_OFF, 302}; 303 304static const struct of_device_id lpuart_dt_ids[] = { 305 { .compatible = "fsl,vf610-lpuart", .data = &vf_data, }, |
300 { .compatible = "fsl,ls1021a-lpuart", .data = &ls_data, }, | 306 { .compatible = "fsl,ls1021a-lpuart", .data = &ls1021a_data, }, 307 { .compatible = "fsl,ls1028a-lpuart", .data = &ls1028a_data, }, |
301 { .compatible = "fsl,imx7ulp-lpuart", .data = &imx7ulp_data, }, 302 { .compatible = "fsl,imx8qxp-lpuart", .data = &imx8qxp_data, }, 303 { /* sentinel */ } 304}; 305MODULE_DEVICE_TABLE(of, lpuart_dt_ids); 306 307/* Forward declare this for the dma callbacks*/ 308static void lpuart_dma_tx_complete(void *arg); 309 | 308 { .compatible = "fsl,imx7ulp-lpuart", .data = &imx7ulp_data, }, 309 { .compatible = "fsl,imx8qxp-lpuart", .data = &imx8qxp_data, }, 310 { /* sentinel */ } 311}; 312MODULE_DEVICE_TABLE(of, lpuart_dt_ids); 313 314/* Forward declare this for the dma callbacks*/ 315static void lpuart_dma_tx_complete(void *arg); 316 |
317static inline bool is_ls1028a_lpuart(struct lpuart_port *sport) 318{ 319 return sport->devtype == LS1028A_LPUART; 320} 321 |
|
310static inline bool is_imx8qxp_lpuart(struct lpuart_port *sport) 311{ 312 return sport->devtype == IMX8QXP_LPUART; 313} 314 315static inline u32 lpuart32_read(struct uart_port *port, u32 off) 316{ 317 switch (port->iotype) { --- 86 unchanged lines hidden (view full) --- 404 lpuart32_write(port, temp & ~UARTCTRL_RE, UARTCTRL); 405} 406 407static void lpuart_dma_tx(struct lpuart_port *sport) 408{ 409 struct circ_buf *xmit = &sport->port.state->xmit; 410 struct scatterlist *sgl = sport->tx_sgl; 411 struct device *dev = sport->port.dev; | 322static inline bool is_imx8qxp_lpuart(struct lpuart_port *sport) 323{ 324 return sport->devtype == IMX8QXP_LPUART; 325} 326 327static inline u32 lpuart32_read(struct uart_port *port, u32 off) 328{ 329 switch (port->iotype) { --- 86 unchanged lines hidden (view full) --- 416 lpuart32_write(port, temp & ~UARTCTRL_RE, UARTCTRL); 417} 418 419static void lpuart_dma_tx(struct lpuart_port *sport) 420{ 421 struct circ_buf *xmit = &sport->port.state->xmit; 422 struct scatterlist *sgl = sport->tx_sgl; 423 struct device *dev = sport->port.dev; |
424 struct dma_chan *chan = sport->dma_tx_chan; |
|
412 int ret; 413 414 if (sport->dma_tx_in_progress) 415 return; 416 417 sport->dma_tx_bytes = uart_circ_chars_pending(xmit); 418 419 if (xmit->tail < xmit->head || xmit->head == 0) { 420 sport->dma_tx_nents = 1; 421 sg_init_one(sgl, xmit->buf + xmit->tail, sport->dma_tx_bytes); 422 } else { 423 sport->dma_tx_nents = 2; 424 sg_init_table(sgl, 2); 425 sg_set_buf(sgl, xmit->buf + xmit->tail, 426 UART_XMIT_SIZE - xmit->tail); 427 sg_set_buf(sgl + 1, xmit->buf, xmit->head); 428 } 429 | 425 int ret; 426 427 if (sport->dma_tx_in_progress) 428 return; 429 430 sport->dma_tx_bytes = uart_circ_chars_pending(xmit); 431 432 if (xmit->tail < xmit->head || xmit->head == 0) { 433 sport->dma_tx_nents = 1; 434 sg_init_one(sgl, xmit->buf + xmit->tail, sport->dma_tx_bytes); 435 } else { 436 sport->dma_tx_nents = 2; 437 sg_init_table(sgl, 2); 438 sg_set_buf(sgl, xmit->buf + xmit->tail, 439 UART_XMIT_SIZE - xmit->tail); 440 sg_set_buf(sgl + 1, xmit->buf, xmit->head); 441 } 442 |
430 ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE); | 443 ret = dma_map_sg(chan->device->dev, sgl, sport->dma_tx_nents, 444 DMA_TO_DEVICE); |
431 if (!ret) { 432 dev_err(dev, "DMA mapping error for TX.\n"); 433 return; 434 } 435 | 445 if (!ret) { 446 dev_err(dev, "DMA mapping error for TX.\n"); 447 return; 448 } 449 |
436 sport->dma_tx_desc = dmaengine_prep_slave_sg(sport->dma_tx_chan, sgl, | 450 sport->dma_tx_desc = dmaengine_prep_slave_sg(chan, sgl, |
437 ret, DMA_MEM_TO_DEV, 438 DMA_PREP_INTERRUPT); 439 if (!sport->dma_tx_desc) { | 451 ret, DMA_MEM_TO_DEV, 452 DMA_PREP_INTERRUPT); 453 if (!sport->dma_tx_desc) { |
440 dma_unmap_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE); | 454 dma_unmap_sg(chan->device->dev, sgl, sport->dma_tx_nents, 455 DMA_TO_DEVICE); |
441 dev_err(dev, "Cannot prepare TX slave DMA!\n"); 442 return; 443 } 444 445 sport->dma_tx_desc->callback = lpuart_dma_tx_complete; 446 sport->dma_tx_desc->callback_param = sport; 447 sport->dma_tx_in_progress = true; 448 sport->dma_tx_cookie = dmaengine_submit(sport->dma_tx_desc); | 456 dev_err(dev, "Cannot prepare TX slave DMA!\n"); 457 return; 458 } 459 460 sport->dma_tx_desc->callback = lpuart_dma_tx_complete; 461 sport->dma_tx_desc->callback_param = sport; 462 sport->dma_tx_in_progress = true; 463 sport->dma_tx_cookie = dmaengine_submit(sport->dma_tx_desc); |
449 dma_async_issue_pending(sport->dma_tx_chan); | 464 dma_async_issue_pending(chan); |
450} 451 452static bool lpuart_stopped_or_empty(struct uart_port *port) 453{ 454 return uart_circ_empty(&port->state->xmit) || uart_tx_stopped(port); 455} 456 457static void lpuart_dma_tx_complete(void *arg) 458{ 459 struct lpuart_port *sport = arg; 460 struct scatterlist *sgl = &sport->tx_sgl[0]; 461 struct circ_buf *xmit = &sport->port.state->xmit; | 465} 466 467static bool lpuart_stopped_or_empty(struct uart_port *port) 468{ 469 return uart_circ_empty(&port->state->xmit) || uart_tx_stopped(port); 470} 471 472static void lpuart_dma_tx_complete(void *arg) 473{ 474 struct lpuart_port *sport = arg; 475 struct scatterlist *sgl = &sport->tx_sgl[0]; 476 struct circ_buf *xmit = &sport->port.state->xmit; |
477 struct dma_chan *chan = sport->dma_tx_chan; |
|
462 unsigned long flags; 463 464 spin_lock_irqsave(&sport->port.lock, flags); 465 | 478 unsigned long flags; 479 480 spin_lock_irqsave(&sport->port.lock, flags); 481 |
466 dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE); | 482 dma_unmap_sg(chan->device->dev, sgl, sport->dma_tx_nents, 483 DMA_TO_DEVICE); |
467 468 xmit->tail = (xmit->tail + sport->dma_tx_bytes) & (UART_XMIT_SIZE - 1); 469 470 sport->port.icount.tx += sport->dma_tx_bytes; 471 sport->dma_tx_in_progress = false; 472 spin_unlock_irqrestore(&sport->port.lock, flags); 473 474 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) --- 49 unchanged lines hidden (view full) --- 524{ 525 return sport->port.iotype == UPIO_MEM32 || 526 sport->port.iotype == UPIO_MEM32BE; 527} 528 529static void lpuart_flush_buffer(struct uart_port *port) 530{ 531 struct lpuart_port *sport = container_of(port, struct lpuart_port, port); | 484 485 xmit->tail = (xmit->tail + sport->dma_tx_bytes) & (UART_XMIT_SIZE - 1); 486 487 sport->port.icount.tx += sport->dma_tx_bytes; 488 sport->dma_tx_in_progress = false; 489 spin_unlock_irqrestore(&sport->port.lock, flags); 490 491 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) --- 49 unchanged lines hidden (view full) --- 541{ 542 return sport->port.iotype == UPIO_MEM32 || 543 sport->port.iotype == UPIO_MEM32BE; 544} 545 546static void lpuart_flush_buffer(struct uart_port *port) 547{ 548 struct lpuart_port *sport = container_of(port, struct lpuart_port, port); |
549 struct dma_chan *chan = sport->dma_tx_chan; |
|
532 u32 val; 533 534 if (sport->lpuart_dma_tx_use) { 535 if (sport->dma_tx_in_progress) { | 550 u32 val; 551 552 if (sport->lpuart_dma_tx_use) { 553 if (sport->dma_tx_in_progress) { |
536 dma_unmap_sg(sport->port.dev, &sport->tx_sgl[0], | 554 dma_unmap_sg(chan->device->dev, &sport->tx_sgl[0], |
537 sport->dma_tx_nents, DMA_TO_DEVICE); 538 sport->dma_tx_in_progress = false; 539 } | 555 sport->dma_tx_nents, DMA_TO_DEVICE); 556 sport->dma_tx_in_progress = false; 557 } |
540 dmaengine_terminate_all(sport->dma_tx_chan); | 558 dmaengine_terminate_all(chan); |
541 } 542 543 if (lpuart_is_32(sport)) { 544 val = lpuart32_read(&sport->port, UARTFIFO); 545 val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH; 546 lpuart32_write(&sport->port, val, UARTFIFO); 547 } else { 548 val = readb(sport->port.membase + UARTCFIFO); --- 439 unchanged lines hidden (view full) --- 988 return IRQ_HANDLED; 989} 990 991static void lpuart_copy_rx_to_tty(struct lpuart_port *sport) 992{ 993 struct tty_port *port = &sport->port.state->port; 994 struct dma_tx_state state; 995 enum dma_status dmastat; | 559 } 560 561 if (lpuart_is_32(sport)) { 562 val = lpuart32_read(&sport->port, UARTFIFO); 563 val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH; 564 lpuart32_write(&sport->port, val, UARTFIFO); 565 } else { 566 val = readb(sport->port.membase + UARTCFIFO); --- 439 unchanged lines hidden (view full) --- 1006 return IRQ_HANDLED; 1007} 1008 1009static void lpuart_copy_rx_to_tty(struct lpuart_port *sport) 1010{ 1011 struct tty_port *port = &sport->port.state->port; 1012 struct dma_tx_state state; 1013 enum dma_status dmastat; |
1014 struct dma_chan *chan = sport->dma_rx_chan; |
|
996 struct circ_buf *ring = &sport->rx_ring; 997 unsigned long flags; 998 int count = 0; 999 1000 if (lpuart_is_32(sport)) { 1001 unsigned long sr = lpuart32_read(&sport->port, UARTSTAT); 1002 1003 if (sr & (UARTSTAT_PE | UARTSTAT_FE)) { --- 44 unchanged lines hidden (view full) --- 1048 writeb(cr2, sport->port.membase + UARTCR2); 1049 } 1050 } 1051 1052 async_tx_ack(sport->dma_rx_desc); 1053 1054 spin_lock_irqsave(&sport->port.lock, flags); 1055 | 1015 struct circ_buf *ring = &sport->rx_ring; 1016 unsigned long flags; 1017 int count = 0; 1018 1019 if (lpuart_is_32(sport)) { 1020 unsigned long sr = lpuart32_read(&sport->port, UARTSTAT); 1021 1022 if (sr & (UARTSTAT_PE | UARTSTAT_FE)) { --- 44 unchanged lines hidden (view full) --- 1067 writeb(cr2, sport->port.membase + UARTCR2); 1068 } 1069 } 1070 1071 async_tx_ack(sport->dma_rx_desc); 1072 1073 spin_lock_irqsave(&sport->port.lock, flags); 1074 |
1056 dmastat = dmaengine_tx_status(sport->dma_rx_chan, 1057 sport->dma_rx_cookie, 1058 &state); 1059 | 1075 dmastat = dmaengine_tx_status(chan, sport->dma_rx_cookie, &state); |
1060 if (dmastat == DMA_ERROR) { 1061 dev_err(sport->port.dev, "Rx DMA transfer failed!\n"); 1062 spin_unlock_irqrestore(&sport->port.lock, flags); 1063 return; 1064 } 1065 1066 /* CPU claims ownership of RX DMA buffer */ | 1076 if (dmastat == DMA_ERROR) { 1077 dev_err(sport->port.dev, "Rx DMA transfer failed!\n"); 1078 spin_unlock_irqrestore(&sport->port.lock, flags); 1079 return; 1080 } 1081 1082 /* CPU claims ownership of RX DMA buffer */ |
1067 dma_sync_sg_for_cpu(sport->port.dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE); | 1083 dma_sync_sg_for_cpu(chan->device->dev, &sport->rx_sgl, 1, 1084 DMA_FROM_DEVICE); |
1068 1069 /* 1070 * ring->head points to the end of data already written by the DMA. 1071 * ring->tail points to the beginning of data to be read by the 1072 * framework. 1073 * The current transfer size should not be larger than the dma buffer 1074 * length. 1075 */ --- 25 unchanged lines hidden (view full) --- 1101 tty_insert_flip_string(port, ring->buf + ring->tail, count); 1102 /* Wrap ring->head if needed */ 1103 if (ring->head >= sport->rx_sgl.length) 1104 ring->head = 0; 1105 ring->tail = ring->head; 1106 sport->port.icount.rx += count; 1107 } 1108 | 1085 1086 /* 1087 * ring->head points to the end of data already written by the DMA. 1088 * ring->tail points to the beginning of data to be read by the 1089 * framework. 1090 * The current transfer size should not be larger than the dma buffer 1091 * length. 1092 */ --- 25 unchanged lines hidden (view full) --- 1118 tty_insert_flip_string(port, ring->buf + ring->tail, count); 1119 /* Wrap ring->head if needed */ 1120 if (ring->head >= sport->rx_sgl.length) 1121 ring->head = 0; 1122 ring->tail = ring->head; 1123 sport->port.icount.rx += count; 1124 } 1125 |
1109 dma_sync_sg_for_device(sport->port.dev, &sport->rx_sgl, 1, | 1126 dma_sync_sg_for_device(chan->device->dev, &sport->rx_sgl, 1, |
1110 DMA_FROM_DEVICE); 1111 1112 spin_unlock_irqrestore(&sport->port.lock, flags); 1113 1114 tty_flip_buffer_push(port); 1115 mod_timer(&sport->lpuart_timer, jiffies + sport->dma_rx_timeout); 1116} 1117 --- 15 unchanged lines hidden (view full) --- 1133{ 1134 struct dma_slave_config dma_rx_sconfig = {}; 1135 struct circ_buf *ring = &sport->rx_ring; 1136 int ret, nent; 1137 int bits, baud; 1138 struct tty_port *port = &sport->port.state->port; 1139 struct tty_struct *tty = port->tty; 1140 struct ktermios *termios = &tty->termios; | 1127 DMA_FROM_DEVICE); 1128 1129 spin_unlock_irqrestore(&sport->port.lock, flags); 1130 1131 tty_flip_buffer_push(port); 1132 mod_timer(&sport->lpuart_timer, jiffies + sport->dma_rx_timeout); 1133} 1134 --- 15 unchanged lines hidden (view full) --- 1150{ 1151 struct dma_slave_config dma_rx_sconfig = {}; 1152 struct circ_buf *ring = &sport->rx_ring; 1153 int ret, nent; 1154 int bits, baud; 1155 struct tty_port *port = &sport->port.state->port; 1156 struct tty_struct *tty = port->tty; 1157 struct ktermios *termios = &tty->termios; |
1158 struct dma_chan *chan = sport->dma_rx_chan; |
|
1141 1142 baud = tty_get_baud_rate(tty); 1143 1144 bits = (termios->c_cflag & CSIZE) == CS7 ? 9 : 10; 1145 if (termios->c_cflag & PARENB) 1146 bits++; 1147 1148 /* --- 5 unchanged lines hidden (view full) --- 1154 if (sport->rx_dma_rng_buf_len < 16) 1155 sport->rx_dma_rng_buf_len = 16; 1156 1157 ring->buf = kzalloc(sport->rx_dma_rng_buf_len, GFP_ATOMIC); 1158 if (!ring->buf) 1159 return -ENOMEM; 1160 1161 sg_init_one(&sport->rx_sgl, ring->buf, sport->rx_dma_rng_buf_len); | 1159 1160 baud = tty_get_baud_rate(tty); 1161 1162 bits = (termios->c_cflag & CSIZE) == CS7 ? 9 : 10; 1163 if (termios->c_cflag & PARENB) 1164 bits++; 1165 1166 /* --- 5 unchanged lines hidden (view full) --- 1172 if (sport->rx_dma_rng_buf_len < 16) 1173 sport->rx_dma_rng_buf_len = 16; 1174 1175 ring->buf = kzalloc(sport->rx_dma_rng_buf_len, GFP_ATOMIC); 1176 if (!ring->buf) 1177 return -ENOMEM; 1178 1179 sg_init_one(&sport->rx_sgl, ring->buf, sport->rx_dma_rng_buf_len); |
1162 nent = dma_map_sg(sport->port.dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE); | 1180 nent = dma_map_sg(chan->device->dev, &sport->rx_sgl, 1, 1181 DMA_FROM_DEVICE); |
1163 1164 if (!nent) { 1165 dev_err(sport->port.dev, "DMA Rx mapping error\n"); 1166 return -EINVAL; 1167 } 1168 1169 dma_rx_sconfig.src_addr = lpuart_dma_datareg_addr(sport); 1170 dma_rx_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1171 dma_rx_sconfig.src_maxburst = 1; 1172 dma_rx_sconfig.direction = DMA_DEV_TO_MEM; | 1182 1183 if (!nent) { 1184 dev_err(sport->port.dev, "DMA Rx mapping error\n"); 1185 return -EINVAL; 1186 } 1187 1188 dma_rx_sconfig.src_addr = lpuart_dma_datareg_addr(sport); 1189 dma_rx_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1190 dma_rx_sconfig.src_maxburst = 1; 1191 dma_rx_sconfig.direction = DMA_DEV_TO_MEM; |
1173 ret = dmaengine_slave_config(sport->dma_rx_chan, &dma_rx_sconfig); | 1192 ret = dmaengine_slave_config(chan, &dma_rx_sconfig); |
1174 1175 if (ret < 0) { 1176 dev_err(sport->port.dev, 1177 "DMA Rx slave config failed, err = %d\n", ret); 1178 return ret; 1179 } 1180 | 1193 1194 if (ret < 0) { 1195 dev_err(sport->port.dev, 1196 "DMA Rx slave config failed, err = %d\n", ret); 1197 return ret; 1198 } 1199 |
1181 sport->dma_rx_desc = dmaengine_prep_dma_cyclic(sport->dma_rx_chan, | 1200 sport->dma_rx_desc = dmaengine_prep_dma_cyclic(chan, |
1182 sg_dma_address(&sport->rx_sgl), 1183 sport->rx_sgl.length, 1184 sport->rx_sgl.length / 2, 1185 DMA_DEV_TO_MEM, 1186 DMA_PREP_INTERRUPT); 1187 if (!sport->dma_rx_desc) { 1188 dev_err(sport->port.dev, "Cannot prepare cyclic DMA\n"); 1189 return -EFAULT; 1190 } 1191 1192 sport->dma_rx_desc->callback = lpuart_dma_rx_complete; 1193 sport->dma_rx_desc->callback_param = sport; 1194 sport->dma_rx_cookie = dmaengine_submit(sport->dma_rx_desc); | 1201 sg_dma_address(&sport->rx_sgl), 1202 sport->rx_sgl.length, 1203 sport->rx_sgl.length / 2, 1204 DMA_DEV_TO_MEM, 1205 DMA_PREP_INTERRUPT); 1206 if (!sport->dma_rx_desc) { 1207 dev_err(sport->port.dev, "Cannot prepare cyclic DMA\n"); 1208 return -EFAULT; 1209 } 1210 1211 sport->dma_rx_desc->callback = lpuart_dma_rx_complete; 1212 sport->dma_rx_desc->callback_param = sport; 1213 sport->dma_rx_cookie = dmaengine_submit(sport->dma_rx_desc); |
1195 dma_async_issue_pending(sport->dma_rx_chan); | 1214 dma_async_issue_pending(chan); |
1196 1197 if (lpuart_is_32(sport)) { 1198 unsigned long temp = lpuart32_read(&sport->port, UARTBAUD); 1199 1200 lpuart32_write(&sport->port, temp | UARTBAUD_RDMAE, UARTBAUD); 1201 } else { 1202 writeb(readb(sport->port.membase + UARTCR5) | UARTCR5_RDMAS, 1203 sport->port.membase + UARTCR5); 1204 } 1205 1206 return 0; 1207} 1208 1209static void lpuart_dma_rx_free(struct uart_port *port) 1210{ 1211 struct lpuart_port *sport = container_of(port, 1212 struct lpuart_port, port); | 1215 1216 if (lpuart_is_32(sport)) { 1217 unsigned long temp = lpuart32_read(&sport->port, UARTBAUD); 1218 1219 lpuart32_write(&sport->port, temp | UARTBAUD_RDMAE, UARTBAUD); 1220 } else { 1221 writeb(readb(sport->port.membase + UARTCR5) | UARTCR5_RDMAS, 1222 sport->port.membase + UARTCR5); 1223 } 1224 1225 return 0; 1226} 1227 1228static void lpuart_dma_rx_free(struct uart_port *port) 1229{ 1230 struct lpuart_port *sport = container_of(port, 1231 struct lpuart_port, port); |
1232 struct dma_chan *chan = sport->dma_rx_chan; |
|
1213 | 1233 |
1214 if (sport->dma_rx_chan) 1215 dmaengine_terminate_all(sport->dma_rx_chan); 1216 1217 dma_unmap_sg(sport->port.dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE); | 1234 dmaengine_terminate_all(chan); 1235 dma_unmap_sg(chan->device->dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE); |
1218 kfree(sport->rx_ring.buf); 1219 sport->rx_ring.tail = 0; 1220 sport->rx_ring.head = 0; 1221 sport->dma_rx_desc = NULL; 1222 sport->dma_rx_cookie = -EINVAL; 1223} 1224 1225static int lpuart_config_rs485(struct uart_port *port, --- 259 unchanged lines hidden (view full) --- 1485 1486static void rx_dma_timer_init(struct lpuart_port *sport) 1487{ 1488 timer_setup(&sport->lpuart_timer, lpuart_timer_func, 0); 1489 sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout; 1490 add_timer(&sport->lpuart_timer); 1491} 1492 | 1236 kfree(sport->rx_ring.buf); 1237 sport->rx_ring.tail = 0; 1238 sport->rx_ring.head = 0; 1239 sport->dma_rx_desc = NULL; 1240 sport->dma_rx_cookie = -EINVAL; 1241} 1242 1243static int lpuart_config_rs485(struct uart_port *port, --- 259 unchanged lines hidden (view full) --- 1503 1504static void rx_dma_timer_init(struct lpuart_port *sport) 1505{ 1506 timer_setup(&sport->lpuart_timer, lpuart_timer_func, 0); 1507 sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout; 1508 add_timer(&sport->lpuart_timer); 1509} 1510 |
1511static void lpuart_request_dma(struct lpuart_port *sport) 1512{ 1513 sport->dma_tx_chan = dma_request_chan(sport->port.dev, "tx"); 1514 if (IS_ERR(sport->dma_tx_chan)) { 1515 dev_dbg_once(sport->port.dev, 1516 "DMA tx channel request failed, operating without tx DMA (%ld)\n", 1517 PTR_ERR(sport->dma_tx_chan)); 1518 sport->dma_tx_chan = NULL; 1519 } 1520 1521 sport->dma_rx_chan = dma_request_chan(sport->port.dev, "rx"); 1522 if (IS_ERR(sport->dma_rx_chan)) { 1523 dev_dbg_once(sport->port.dev, 1524 "DMA rx channel request failed, operating without rx DMA (%ld)\n", 1525 PTR_ERR(sport->dma_rx_chan)); 1526 sport->dma_rx_chan = NULL; 1527 } 1528} 1529 |
|
1493static void lpuart_tx_dma_startup(struct lpuart_port *sport) 1494{ 1495 u32 uartbaud; | 1530static void lpuart_tx_dma_startup(struct lpuart_port *sport) 1531{ 1532 u32 uartbaud; |
1533 int ret; |
|
1496 | 1534 |
1497 if (sport->dma_tx_chan && !lpuart_dma_tx_request(&sport->port)) { 1498 init_waitqueue_head(&sport->dma_wait); 1499 sport->lpuart_dma_tx_use = true; 1500 if (lpuart_is_32(sport)) { 1501 uartbaud = lpuart32_read(&sport->port, UARTBAUD); 1502 lpuart32_write(&sport->port, 1503 uartbaud | UARTBAUD_TDMAE, UARTBAUD); 1504 } else { 1505 writeb(readb(sport->port.membase + UARTCR5) | 1506 UARTCR5_TDMAS, sport->port.membase + UARTCR5); 1507 } | 1535 if (!sport->dma_tx_chan) 1536 goto err; 1537 1538 ret = lpuart_dma_tx_request(&sport->port); 1539 if (ret) 1540 goto err; 1541 1542 init_waitqueue_head(&sport->dma_wait); 1543 sport->lpuart_dma_tx_use = true; 1544 if (lpuart_is_32(sport)) { 1545 uartbaud = lpuart32_read(&sport->port, UARTBAUD); 1546 lpuart32_write(&sport->port, 1547 uartbaud | UARTBAUD_TDMAE, UARTBAUD); |
1508 } else { | 1548 } else { |
1509 sport->lpuart_dma_tx_use = false; | 1549 writeb(readb(sport->port.membase + UARTCR5) | 1550 UARTCR5_TDMAS, sport->port.membase + UARTCR5); |
1510 } | 1551 } |
1552 1553 return; 1554 1555err: 1556 sport->lpuart_dma_tx_use = false; |
|
1511} 1512 1513static void lpuart_rx_dma_startup(struct lpuart_port *sport) 1514{ | 1557} 1558 1559static void lpuart_rx_dma_startup(struct lpuart_port *sport) 1560{ |
1515 if (sport->dma_rx_chan && !lpuart_start_rx_dma(sport)) { 1516 /* set Rx DMA timeout */ 1517 sport->dma_rx_timeout = msecs_to_jiffies(DMA_RX_TIMEOUT); 1518 if (!sport->dma_rx_timeout) 1519 sport->dma_rx_timeout = 1; | 1561 int ret; |
1520 | 1562 |
1521 sport->lpuart_dma_rx_use = true; 1522 rx_dma_timer_init(sport); 1523 } else { 1524 sport->lpuart_dma_rx_use = false; 1525 } | 1563 if (!sport->dma_rx_chan) 1564 goto err; 1565 1566 ret = lpuart_start_rx_dma(sport); 1567 if (ret) 1568 goto err; 1569 1570 /* set Rx DMA timeout */ 1571 sport->dma_rx_timeout = msecs_to_jiffies(DMA_RX_TIMEOUT); 1572 if (!sport->dma_rx_timeout) 1573 sport->dma_rx_timeout = 1; 1574 1575 sport->lpuart_dma_rx_use = true; 1576 rx_dma_timer_init(sport); 1577 1578 return; 1579 1580err: 1581 sport->lpuart_dma_rx_use = false; |
1526} 1527 1528static int lpuart_startup(struct uart_port *port) 1529{ 1530 struct lpuart_port *sport = container_of(port, struct lpuart_port, port); 1531 unsigned long flags; 1532 unsigned char temp; 1533 1534 /* determine FIFO size and enable FIFO mode */ 1535 temp = readb(sport->port.membase + UARTPFIFO); 1536 1537 sport->txfifo_size = UARTFIFO_DEPTH((temp >> UARTPFIFO_TXSIZE_OFF) & 1538 UARTPFIFO_FIFOSIZE_MASK); 1539 sport->port.fifosize = sport->txfifo_size; 1540 1541 sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTPFIFO_RXSIZE_OFF) & 1542 UARTPFIFO_FIFOSIZE_MASK); 1543 | 1582} 1583 1584static int lpuart_startup(struct uart_port *port) 1585{ 1586 struct lpuart_port *sport = container_of(port, struct lpuart_port, port); 1587 unsigned long flags; 1588 unsigned char temp; 1589 1590 /* determine FIFO size and enable FIFO mode */ 1591 temp = readb(sport->port.membase + UARTPFIFO); 1592 1593 sport->txfifo_size = UARTFIFO_DEPTH((temp >> UARTPFIFO_TXSIZE_OFF) & 1594 UARTPFIFO_FIFOSIZE_MASK); 1595 sport->port.fifosize = sport->txfifo_size; 1596 1597 sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTPFIFO_RXSIZE_OFF) & 1598 UARTPFIFO_FIFOSIZE_MASK); 1599 |
1600 lpuart_request_dma(sport); 1601 |
|
1544 spin_lock_irqsave(&sport->port.lock, flags); 1545 1546 lpuart_setup_watermark_enable(sport); 1547 1548 lpuart_rx_dma_startup(sport); 1549 lpuart_tx_dma_startup(sport); 1550 1551 spin_unlock_irqrestore(&sport->port.lock, flags); --- 30 unchanged lines hidden (view full) --- 1582 1583 sport->txfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_TXSIZE_OFF) & 1584 UARTFIFO_FIFOSIZE_MASK); 1585 sport->port.fifosize = sport->txfifo_size; 1586 1587 sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_RXSIZE_OFF) & 1588 UARTFIFO_FIFOSIZE_MASK); 1589 | 1602 spin_lock_irqsave(&sport->port.lock, flags); 1603 1604 lpuart_setup_watermark_enable(sport); 1605 1606 lpuart_rx_dma_startup(sport); 1607 lpuart_tx_dma_startup(sport); 1608 1609 spin_unlock_irqrestore(&sport->port.lock, flags); --- 30 unchanged lines hidden (view full) --- 1640 1641 sport->txfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_TXSIZE_OFF) & 1642 UARTFIFO_FIFOSIZE_MASK); 1643 sport->port.fifosize = sport->txfifo_size; 1644 1645 sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_RXSIZE_OFF) & 1646 UARTFIFO_FIFOSIZE_MASK); 1647 |
1648 /* 1649 * The LS1028A has a fixed length of 16 words. Although it supports the 1650 * RX/TXSIZE fields their encoding is different. Eg the reference manual 1651 * states 0b101 is 16 words. 1652 */ 1653 if (is_ls1028a_lpuart(sport)) { 1654 sport->rxfifo_size = 16; 1655 sport->txfifo_size = 16; 1656 sport->port.fifosize = sport->txfifo_size; 1657 } 1658 1659 lpuart_request_dma(sport); 1660 |
|
1590 spin_lock_irqsave(&sport->port.lock, flags); 1591 1592 lpuart32_setup_watermark_enable(sport); 1593 | 1661 spin_lock_irqsave(&sport->port.lock, flags); 1662 1663 lpuart32_setup_watermark_enable(sport); 1664 |
1594 | |
1595 lpuart_rx_dma_startup(sport); 1596 lpuart_tx_dma_startup(sport); 1597 1598 lpuart32_configure(sport); 1599 1600 spin_unlock_irqrestore(&sport->port.lock, flags); 1601 return 0; 1602} --- 7 unchanged lines hidden (view full) --- 1610 1611 if (sport->lpuart_dma_tx_use) { 1612 if (wait_event_interruptible(sport->dma_wait, 1613 !sport->dma_tx_in_progress) != false) { 1614 sport->dma_tx_in_progress = false; 1615 dmaengine_terminate_all(sport->dma_tx_chan); 1616 } 1617 } | 1665 lpuart_rx_dma_startup(sport); 1666 lpuart_tx_dma_startup(sport); 1667 1668 lpuart32_configure(sport); 1669 1670 spin_unlock_irqrestore(&sport->port.lock, flags); 1671 return 0; 1672} --- 7 unchanged lines hidden (view full) --- 1680 1681 if (sport->lpuart_dma_tx_use) { 1682 if (wait_event_interruptible(sport->dma_wait, 1683 !sport->dma_tx_in_progress) != false) { 1684 sport->dma_tx_in_progress = false; 1685 dmaengine_terminate_all(sport->dma_tx_chan); 1686 } 1687 } |
1688 1689 if (sport->dma_tx_chan) 1690 dma_release_channel(sport->dma_tx_chan); 1691 if (sport->dma_rx_chan) 1692 dma_release_channel(sport->dma_rx_chan); |
|
1618} 1619 1620static void lpuart_shutdown(struct uart_port *port) 1621{ 1622 struct lpuart_port *sport = container_of(port, struct lpuart_port, port); 1623 unsigned char temp; 1624 unsigned long flags; 1625 --- 180 unchanged lines hidden (view full) --- 1806 rx_dma_timer_init(sport); 1807 else 1808 sport->lpuart_dma_rx_use = false; 1809 } 1810 1811 spin_unlock_irqrestore(&sport->port.lock, flags); 1812} 1813 | 1693} 1694 1695static void lpuart_shutdown(struct uart_port *port) 1696{ 1697 struct lpuart_port *sport = container_of(port, struct lpuart_port, port); 1698 unsigned char temp; 1699 unsigned long flags; 1700 --- 180 unchanged lines hidden (view full) --- 1881 rx_dma_timer_init(sport); 1882 else 1883 sport->lpuart_dma_rx_use = false; 1884 } 1885 1886 spin_unlock_irqrestore(&sport->port.lock, flags); 1887} 1888 |
1814static void 1815lpuart32_serial_setbrg(struct lpuart_port *sport, unsigned int baudrate) | 1889static void __lpuart32_serial_setbrg(struct uart_port *port, 1890 unsigned int baudrate, bool use_rx_dma, 1891 bool use_tx_dma) |
1816{ 1817 u32 sbr, osr, baud_diff, tmp_osr, tmp_sbr, tmp_diff, tmp; | 1892{ 1893 u32 sbr, osr, baud_diff, tmp_osr, tmp_sbr, tmp_diff, tmp; |
1818 u32 clk = sport->port.uartclk; | 1894 u32 clk = port->uartclk; |
1819 1820 /* 1821 * The idea is to use the best OSR (over-sampling rate) possible. 1822 * Note, OSR is typically hard-set to 16 in other LPUART instantiations. 1823 * Loop to find the best OSR value possible, one that generates minimum 1824 * baud_diff iterate through the rest of the supported values of OSR. 1825 * 1826 * Calculation Formula: --- 29 unchanged lines hidden (view full) --- 1856 1857 if (!baud_diff) 1858 break; 1859 } 1860 } 1861 1862 /* handle buadrate outside acceptable rate */ 1863 if (baud_diff > ((baudrate / 100) * 3)) | 1895 1896 /* 1897 * The idea is to use the best OSR (over-sampling rate) possible. 1898 * Note, OSR is typically hard-set to 16 in other LPUART instantiations. 1899 * Loop to find the best OSR value possible, one that generates minimum 1900 * baud_diff iterate through the rest of the supported values of OSR. 1901 * 1902 * Calculation Formula: --- 29 unchanged lines hidden (view full) --- 1932 1933 if (!baud_diff) 1934 break; 1935 } 1936 } 1937 1938 /* handle buadrate outside acceptable rate */ 1939 if (baud_diff > ((baudrate / 100) * 3)) |
1864 dev_warn(sport->port.dev, | 1940 dev_warn(port->dev, |
1865 "unacceptable baud rate difference of more than 3%%\n"); 1866 | 1941 "unacceptable baud rate difference of more than 3%%\n"); 1942 |
1867 tmp = lpuart32_read(&sport->port, UARTBAUD); | 1943 tmp = lpuart32_read(port, UARTBAUD); |
1868 1869 if ((osr > 3) && (osr < 8)) 1870 tmp |= UARTBAUD_BOTHEDGE; 1871 1872 tmp &= ~(UARTBAUD_OSR_MASK << UARTBAUD_OSR_SHIFT); 1873 tmp |= ((osr-1) & UARTBAUD_OSR_MASK) << UARTBAUD_OSR_SHIFT; 1874 1875 tmp &= ~UARTBAUD_SBR_MASK; 1876 tmp |= sbr & UARTBAUD_SBR_MASK; 1877 | 1944 1945 if ((osr > 3) && (osr < 8)) 1946 tmp |= UARTBAUD_BOTHEDGE; 1947 1948 tmp &= ~(UARTBAUD_OSR_MASK << UARTBAUD_OSR_SHIFT); 1949 tmp |= ((osr-1) & UARTBAUD_OSR_MASK) << UARTBAUD_OSR_SHIFT; 1950 1951 tmp &= ~UARTBAUD_SBR_MASK; 1952 tmp |= sbr & UARTBAUD_SBR_MASK; 1953 |
1878 if (!sport->lpuart_dma_rx_use) | 1954 if (!use_rx_dma) |
1879 tmp &= ~UARTBAUD_RDMAE; | 1955 tmp &= ~UARTBAUD_RDMAE; |
1880 if (!sport->lpuart_dma_tx_use) | 1956 if (!use_tx_dma) |
1881 tmp &= ~UARTBAUD_TDMAE; 1882 | 1957 tmp &= ~UARTBAUD_TDMAE; 1958 |
1883 lpuart32_write(&sport->port, tmp, UARTBAUD); | 1959 lpuart32_write(port, tmp, UARTBAUD); |
1884} 1885 | 1960} 1961 |
1962static void lpuart32_serial_setbrg(struct lpuart_port *sport, 1963 unsigned int baudrate) 1964{ 1965 __lpuart32_serial_setbrg(&sport->port, baudrate, 1966 sport->lpuart_dma_rx_use, 1967 sport->lpuart_dma_tx_use); 1968} 1969 1970 |
|
1886static void 1887lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, 1888 struct ktermios *old) 1889{ 1890 struct lpuart_port *sport = container_of(port, struct lpuart_port, port); 1891 unsigned long flags; 1892 unsigned long ctrl, old_ctrl, modem; 1893 unsigned int baud; --- 477 unchanged lines hidden (view full) --- 2371 2372 if (device->port.iotype != UPIO_MEM32) 2373 device->port.iotype = UPIO_MEM32BE; 2374 2375 device->con->write = lpuart32_early_write; 2376 return 0; 2377} 2378 | 1971static void 1972lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, 1973 struct ktermios *old) 1974{ 1975 struct lpuart_port *sport = container_of(port, struct lpuart_port, port); 1976 unsigned long flags; 1977 unsigned long ctrl, old_ctrl, modem; 1978 unsigned int baud; --- 477 unchanged lines hidden (view full) --- 2456 2457 if (device->port.iotype != UPIO_MEM32) 2458 device->port.iotype = UPIO_MEM32BE; 2459 2460 device->con->write = lpuart32_early_write; 2461 return 0; 2462} 2463 |
2464static int __init ls1028a_early_console_setup(struct earlycon_device *device, 2465 const char *opt) 2466{ 2467 u32 cr; 2468 2469 if (!device->port.membase) 2470 return -ENODEV; 2471 2472 device->port.iotype = UPIO_MEM32; 2473 device->con->write = lpuart32_early_write; 2474 2475 /* set the baudrate */ 2476 if (device->port.uartclk && device->baud) 2477 __lpuart32_serial_setbrg(&device->port, device->baud, 2478 false, false); 2479 2480 /* enable transmitter */ 2481 cr = lpuart32_read(&device->port, UARTCTRL); 2482 cr |= UARTCTRL_TE; 2483 lpuart32_write(&device->port, cr, UARTCTRL); 2484 2485 return 0; 2486} 2487 |
|
2379static int __init lpuart32_imx_early_console_setup(struct earlycon_device *device, 2380 const char *opt) 2381{ 2382 if (!device->port.membase) 2383 return -ENODEV; 2384 2385 device->port.iotype = UPIO_MEM32; 2386 device->port.membase += IMX_REG_OFF; 2387 device->con->write = lpuart32_early_write; 2388 2389 return 0; 2390} 2391OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup); 2392OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup); | 2488static int __init lpuart32_imx_early_console_setup(struct earlycon_device *device, 2489 const char *opt) 2490{ 2491 if (!device->port.membase) 2492 return -ENODEV; 2493 2494 device->port.iotype = UPIO_MEM32; 2495 device->port.membase += IMX_REG_OFF; 2496 device->con->write = lpuart32_early_write; 2497 2498 return 0; 2499} 2500OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup); 2501OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup); |
2502OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1028a-lpuart", ls1028a_early_console_setup); |
|
2393OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup); 2394EARLYCON_DECLARE(lpuart, lpuart_early_console_setup); 2395EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup); 2396 2397#define LPUART_CONSOLE (&lpuart_console) 2398#define LPUART32_CONSOLE (&lpuart32_console) 2399#else 2400#define LPUART_CONSOLE NULL --- 103 unchanged lines hidden (view full) --- 2504 2505 if (ret) 2506 goto failed_irq_request; 2507 2508 ret = uart_add_one_port(&lpuart_reg, &sport->port); 2509 if (ret) 2510 goto failed_attach_port; 2511 | 2503OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup); 2504EARLYCON_DECLARE(lpuart, lpuart_early_console_setup); 2505EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup); 2506 2507#define LPUART_CONSOLE (&lpuart_console) 2508#define LPUART32_CONSOLE (&lpuart32_console) 2509#else 2510#define LPUART_CONSOLE NULL --- 103 unchanged lines hidden (view full) --- 2614 2615 if (ret) 2616 goto failed_irq_request; 2617 2618 ret = uart_add_one_port(&lpuart_reg, &sport->port); 2619 if (ret) 2620 goto failed_attach_port; 2621 |
2512 uart_get_rs485_mode(&pdev->dev, &sport->port.rs485); | 2622 ret = uart_get_rs485_mode(&sport->port); 2623 if (ret) 2624 goto failed_get_rs485; |
2513 2514 if (sport->port.rs485.flags & SER_RS485_RX_DURING_TX) 2515 dev_err(&pdev->dev, "driver doesn't support RX during TX\n"); 2516 2517 if (sport->port.rs485.delay_rts_before_send || 2518 sport->port.rs485.delay_rts_after_send) 2519 dev_err(&pdev->dev, "driver doesn't support RTS delays\n"); 2520 2521 sport->port.rs485_config(&sport->port, &sport->port.rs485); 2522 | 2625 2626 if (sport->port.rs485.flags & SER_RS485_RX_DURING_TX) 2627 dev_err(&pdev->dev, "driver doesn't support RX during TX\n"); 2628 2629 if (sport->port.rs485.delay_rts_before_send || 2630 sport->port.rs485.delay_rts_after_send) 2631 dev_err(&pdev->dev, "driver doesn't support RTS delays\n"); 2632 2633 sport->port.rs485_config(&sport->port, &sport->port.rs485); 2634 |
2523 sport->dma_tx_chan = dma_request_slave_channel(sport->port.dev, "tx"); 2524 if (!sport->dma_tx_chan) 2525 dev_info(sport->port.dev, "DMA tx channel request failed, " 2526 "operating without tx DMA\n"); 2527 2528 sport->dma_rx_chan = dma_request_slave_channel(sport->port.dev, "rx"); 2529 if (!sport->dma_rx_chan) 2530 dev_info(sport->port.dev, "DMA rx channel request failed, " 2531 "operating without rx DMA\n"); 2532 | |
2533 return 0; 2534 | 2635 return 0; 2636 |
2637failed_get_rs485: |
|
2535failed_attach_port: 2536failed_irq_request: 2537 lpuart_disable_clks(sport); 2538failed_clock_enable: 2539failed_out_of_range: 2540 if (sport->id_allocated) 2541 ida_simple_remove(&fsl_lpuart_ida, sport->port.line); 2542 return ret; --- 152 unchanged lines hidden --- | 2638failed_attach_port: 2639failed_irq_request: 2640 lpuart_disable_clks(sport); 2641failed_clock_enable: 2642failed_out_of_range: 2643 if (sport->id_allocated) 2644 ida_simple_remove(&fsl_lpuart_ida, sport->port.line); 2645 return ret; --- 152 unchanged lines hidden --- |