1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) Maxime Coquelin 2015 4 * Copyright (C) STMicroelectronics SA 2017 5 * Authors: Maxime Coquelin <mcoquelin.stm32@gmail.com> 6 * Gerald Baeza <gerald.baeza@foss.st.com> 7 * Erwan Le Ray <erwan.leray@foss.st.com> 8 * 9 * Inspired by st-asc.c from STMicroelectronics (c) 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/console.h> 14 #include <linux/delay.h> 15 #include <linux/dma-direction.h> 16 #include <linux/dmaengine.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/io.h> 19 #include <linux/iopoll.h> 20 #include <linux/irq.h> 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/of_platform.h> 24 #include <linux/pinctrl/consumer.h> 25 #include <linux/platform_device.h> 26 #include <linux/pm_runtime.h> 27 #include <linux/pm_wakeirq.h> 28 #include <linux/serial_core.h> 29 #include <linux/serial.h> 30 #include <linux/spinlock.h> 31 #include <linux/sysrq.h> 32 #include <linux/tty_flip.h> 33 #include <linux/tty.h> 34 35 #include "serial_mctrl_gpio.h" 36 #include "stm32-usart.h" 37 38 39 /* Register offsets */ 40 static struct stm32_usart_info __maybe_unused stm32f4_info = { 41 .ofs = { 42 .isr = 0x00, 43 .rdr = 0x04, 44 .tdr = 0x04, 45 .brr = 0x08, 46 .cr1 = 0x0c, 47 .cr2 = 0x10, 48 .cr3 = 0x14, 49 .gtpr = 0x18, 50 .rtor = UNDEF_REG, 51 .rqr = UNDEF_REG, 52 .icr = UNDEF_REG, 53 }, 54 .cfg = { 55 .uart_enable_bit = 13, 56 .has_7bits_data = false, 57 .fifosize = 1, 58 } 59 }; 60 61 static struct stm32_usart_info __maybe_unused stm32f7_info = { 62 .ofs = { 63 .cr1 = 0x00, 64 .cr2 = 0x04, 65 .cr3 = 0x08, 66 .brr = 0x0c, 67 .gtpr = 0x10, 68 .rtor = 0x14, 69 .rqr = 0x18, 70 .isr = 0x1c, 71 .icr = 0x20, 72 .rdr = 0x24, 73 .tdr = 0x28, 74 }, 75 .cfg = { 76 .uart_enable_bit = 0, 77 .has_7bits_data = true, 78 .has_swap = true, 79 .fifosize = 1, 80 } 81 }; 82 83 static struct stm32_usart_info __maybe_unused stm32h7_info = { 84 .ofs = { 85 .cr1 = 0x00, 86 .cr2 = 0x04, 87 .cr3 = 0x08, 88 .brr = 0x0c, 89 .gtpr = 0x10, 90 .rtor = 0x14, 91 .rqr = 0x18, 92 .isr = 0x1c, 93 .icr = 0x20, 94 .rdr = 0x24, 95 .tdr = 0x28, 96 }, 97 .cfg = { 98 .uart_enable_bit = 0, 99 .has_7bits_data = true, 100 .has_swap = true, 101 .has_wakeup = true, 102 .has_fifo = true, 103 .fifosize = 16, 104 } 105 }; 106 107 static void stm32_usart_stop_tx(struct uart_port *port); 108 static void stm32_usart_transmit_chars(struct uart_port *port); 109 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch); 110 111 static inline struct stm32_port *to_stm32_port(struct uart_port *port) 112 { 113 return container_of(port, struct stm32_port, port); 114 } 115 116 static void stm32_usart_set_bits(struct uart_port *port, u32 reg, u32 bits) 117 { 118 u32 val; 119 120 val = readl_relaxed(port->membase + reg); 121 val |= bits; 122 writel_relaxed(val, port->membase + reg); 123 } 124 125 static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits) 126 { 127 u32 val; 128 129 val = readl_relaxed(port->membase + reg); 130 val &= ~bits; 131 writel_relaxed(val, port->membase + reg); 132 } 133 134 static unsigned int stm32_usart_tx_empty(struct uart_port *port) 135 { 136 struct stm32_port *stm32_port = to_stm32_port(port); 137 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 138 139 if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC) 140 return TIOCSER_TEMT; 141 142 return 0; 143 } 144 145 static void stm32_usart_rs485_rts_enable(struct uart_port *port) 146 { 147 struct stm32_port *stm32_port = to_stm32_port(port); 148 struct serial_rs485 *rs485conf = &port->rs485; 149 150 if (stm32_port->hw_flow_control || 151 !(rs485conf->flags & SER_RS485_ENABLED)) 152 return; 153 154 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 155 mctrl_gpio_set(stm32_port->gpios, 156 stm32_port->port.mctrl | TIOCM_RTS); 157 } else { 158 mctrl_gpio_set(stm32_port->gpios, 159 stm32_port->port.mctrl & ~TIOCM_RTS); 160 } 161 } 162 163 static void stm32_usart_rs485_rts_disable(struct uart_port *port) 164 { 165 struct stm32_port *stm32_port = to_stm32_port(port); 166 struct serial_rs485 *rs485conf = &port->rs485; 167 168 if (stm32_port->hw_flow_control || 169 !(rs485conf->flags & SER_RS485_ENABLED)) 170 return; 171 172 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 173 mctrl_gpio_set(stm32_port->gpios, 174 stm32_port->port.mctrl & ~TIOCM_RTS); 175 } else { 176 mctrl_gpio_set(stm32_port->gpios, 177 stm32_port->port.mctrl | TIOCM_RTS); 178 } 179 } 180 181 static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE, 182 u32 delay_DDE, u32 baud) 183 { 184 u32 rs485_deat_dedt; 185 u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT); 186 bool over8; 187 188 *cr3 |= USART_CR3_DEM; 189 over8 = *cr1 & USART_CR1_OVER8; 190 191 *cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); 192 193 if (over8) 194 rs485_deat_dedt = delay_ADE * baud * 8; 195 else 196 rs485_deat_dedt = delay_ADE * baud * 16; 197 198 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000); 199 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ? 200 rs485_deat_dedt_max : rs485_deat_dedt; 201 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEAT_SHIFT) & 202 USART_CR1_DEAT_MASK; 203 *cr1 |= rs485_deat_dedt; 204 205 if (over8) 206 rs485_deat_dedt = delay_DDE * baud * 8; 207 else 208 rs485_deat_dedt = delay_DDE * baud * 16; 209 210 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000); 211 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ? 212 rs485_deat_dedt_max : rs485_deat_dedt; 213 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEDT_SHIFT) & 214 USART_CR1_DEDT_MASK; 215 *cr1 |= rs485_deat_dedt; 216 } 217 218 static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *termios, 219 struct serial_rs485 *rs485conf) 220 { 221 struct stm32_port *stm32_port = to_stm32_port(port); 222 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 223 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 224 u32 usartdiv, baud, cr1, cr3; 225 bool over8; 226 227 stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 228 229 if (port->rs485_rx_during_tx_gpio) 230 gpiod_set_value_cansleep(port->rs485_rx_during_tx_gpio, 231 !!(rs485conf->flags & SER_RS485_RX_DURING_TX)); 232 else 233 rs485conf->flags |= SER_RS485_RX_DURING_TX; 234 235 if (rs485conf->flags & SER_RS485_ENABLED) { 236 cr1 = readl_relaxed(port->membase + ofs->cr1); 237 cr3 = readl_relaxed(port->membase + ofs->cr3); 238 usartdiv = readl_relaxed(port->membase + ofs->brr); 239 usartdiv = usartdiv & GENMASK(15, 0); 240 over8 = cr1 & USART_CR1_OVER8; 241 242 if (over8) 243 usartdiv = usartdiv | (usartdiv & GENMASK(4, 0)) 244 << USART_BRR_04_R_SHIFT; 245 246 baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv); 247 stm32_usart_config_reg_rs485(&cr1, &cr3, 248 rs485conf->delay_rts_before_send, 249 rs485conf->delay_rts_after_send, 250 baud); 251 252 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) 253 cr3 &= ~USART_CR3_DEP; 254 else 255 cr3 |= USART_CR3_DEP; 256 257 writel_relaxed(cr3, port->membase + ofs->cr3); 258 writel_relaxed(cr1, port->membase + ofs->cr1); 259 } else { 260 stm32_usart_clr_bits(port, ofs->cr3, 261 USART_CR3_DEM | USART_CR3_DEP); 262 stm32_usart_clr_bits(port, ofs->cr1, 263 USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); 264 } 265 266 stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 267 268 /* Adjust RTS polarity in case it's driven in software */ 269 if (stm32_usart_tx_empty(port)) 270 stm32_usart_rs485_rts_disable(port); 271 else 272 stm32_usart_rs485_rts_enable(port); 273 274 return 0; 275 } 276 277 static int stm32_usart_init_rs485(struct uart_port *port, 278 struct platform_device *pdev) 279 { 280 struct serial_rs485 *rs485conf = &port->rs485; 281 282 rs485conf->flags = 0; 283 rs485conf->delay_rts_before_send = 0; 284 rs485conf->delay_rts_after_send = 0; 285 286 if (!pdev->dev.of_node) 287 return -ENODEV; 288 289 return uart_get_rs485_mode(port); 290 } 291 292 static bool stm32_usart_rx_dma_started(struct stm32_port *stm32_port) 293 { 294 return stm32_port->rx_ch ? stm32_port->rx_dma_busy : false; 295 } 296 297 static void stm32_usart_rx_dma_terminate(struct stm32_port *stm32_port) 298 { 299 dmaengine_terminate_async(stm32_port->rx_ch); 300 stm32_port->rx_dma_busy = false; 301 } 302 303 static int stm32_usart_dma_pause_resume(struct stm32_port *stm32_port, 304 struct dma_chan *chan, 305 enum dma_status expected_status, 306 int dmaengine_pause_or_resume(struct dma_chan *), 307 bool stm32_usart_xx_dma_started(struct stm32_port *), 308 void stm32_usart_xx_dma_terminate(struct stm32_port *)) 309 { 310 struct uart_port *port = &stm32_port->port; 311 enum dma_status dma_status; 312 int ret; 313 314 if (!stm32_usart_xx_dma_started(stm32_port)) 315 return -EPERM; 316 317 dma_status = dmaengine_tx_status(chan, chan->cookie, NULL); 318 if (dma_status != expected_status) 319 return -EAGAIN; 320 321 ret = dmaengine_pause_or_resume(chan); 322 if (ret) { 323 dev_err(port->dev, "DMA failed with error code: %d\n", ret); 324 stm32_usart_xx_dma_terminate(stm32_port); 325 } 326 return ret; 327 } 328 329 static int stm32_usart_rx_dma_pause(struct stm32_port *stm32_port) 330 { 331 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch, 332 DMA_IN_PROGRESS, dmaengine_pause, 333 stm32_usart_rx_dma_started, 334 stm32_usart_rx_dma_terminate); 335 } 336 337 static int stm32_usart_rx_dma_resume(struct stm32_port *stm32_port) 338 { 339 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch, 340 DMA_PAUSED, dmaengine_resume, 341 stm32_usart_rx_dma_started, 342 stm32_usart_rx_dma_terminate); 343 } 344 345 /* Return true when data is pending (in pio mode), and false when no data is pending. */ 346 static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr) 347 { 348 struct stm32_port *stm32_port = to_stm32_port(port); 349 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 350 351 *sr = readl_relaxed(port->membase + ofs->isr); 352 /* Get pending characters in RDR or FIFO */ 353 if (*sr & USART_SR_RXNE) { 354 /* Get all pending characters from the RDR or the FIFO when using interrupts */ 355 if (!stm32_usart_rx_dma_started(stm32_port)) 356 return true; 357 358 /* Handle only RX data errors when using DMA */ 359 if (*sr & USART_SR_ERR_MASK) 360 return true; 361 } 362 363 return false; 364 } 365 366 static u8 stm32_usart_get_char_pio(struct uart_port *port) 367 { 368 struct stm32_port *stm32_port = to_stm32_port(port); 369 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 370 unsigned long c; 371 372 c = readl_relaxed(port->membase + ofs->rdr); 373 /* Apply RDR data mask */ 374 c &= stm32_port->rdr_mask; 375 376 return c; 377 } 378 379 static unsigned int stm32_usart_receive_chars_pio(struct uart_port *port) 380 { 381 struct stm32_port *stm32_port = to_stm32_port(port); 382 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 383 unsigned int size = 0; 384 u32 sr; 385 u8 c, flag; 386 387 while (stm32_usart_pending_rx_pio(port, &sr)) { 388 sr |= USART_SR_DUMMY_RX; 389 flag = TTY_NORMAL; 390 391 /* 392 * Status bits has to be cleared before reading the RDR: 393 * In FIFO mode, reading the RDR will pop the next data 394 * (if any) along with its status bits into the SR. 395 * Not doing so leads to misalignement between RDR and SR, 396 * and clear status bits of the next rx data. 397 * 398 * Clear errors flags for stm32f7 and stm32h7 compatible 399 * devices. On stm32f4 compatible devices, the error bit is 400 * cleared by the sequence [read SR - read DR]. 401 */ 402 if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG) 403 writel_relaxed(sr & USART_SR_ERR_MASK, 404 port->membase + ofs->icr); 405 406 c = stm32_usart_get_char_pio(port); 407 port->icount.rx++; 408 size++; 409 if (sr & USART_SR_ERR_MASK) { 410 if (sr & USART_SR_ORE) { 411 port->icount.overrun++; 412 } else if (sr & USART_SR_PE) { 413 port->icount.parity++; 414 } else if (sr & USART_SR_FE) { 415 /* Break detection if character is null */ 416 if (!c) { 417 port->icount.brk++; 418 if (uart_handle_break(port)) 419 continue; 420 } else { 421 port->icount.frame++; 422 } 423 } 424 425 sr &= port->read_status_mask; 426 427 if (sr & USART_SR_PE) { 428 flag = TTY_PARITY; 429 } else if (sr & USART_SR_FE) { 430 if (!c) 431 flag = TTY_BREAK; 432 else 433 flag = TTY_FRAME; 434 } 435 } 436 437 if (uart_prepare_sysrq_char(port, c)) 438 continue; 439 uart_insert_char(port, sr, USART_SR_ORE, c, flag); 440 } 441 442 return size; 443 } 444 445 static void stm32_usart_push_buffer_dma(struct uart_port *port, unsigned int dma_size) 446 { 447 struct stm32_port *stm32_port = to_stm32_port(port); 448 struct tty_port *ttyport = &stm32_port->port.state->port; 449 unsigned char *dma_start; 450 int dma_count, i; 451 452 dma_start = stm32_port->rx_buf + (RX_BUF_L - stm32_port->last_res); 453 454 /* 455 * Apply rdr_mask on buffer in order to mask parity bit. 456 * This loop is useless in cs8 mode because DMA copies only 457 * 8 bits and already ignores parity bit. 458 */ 459 if (!(stm32_port->rdr_mask == (BIT(8) - 1))) 460 for (i = 0; i < dma_size; i++) 461 *(dma_start + i) &= stm32_port->rdr_mask; 462 463 dma_count = tty_insert_flip_string(ttyport, dma_start, dma_size); 464 port->icount.rx += dma_count; 465 if (dma_count != dma_size) 466 port->icount.buf_overrun++; 467 stm32_port->last_res -= dma_count; 468 if (stm32_port->last_res == 0) 469 stm32_port->last_res = RX_BUF_L; 470 } 471 472 static unsigned int stm32_usart_receive_chars_dma(struct uart_port *port) 473 { 474 struct stm32_port *stm32_port = to_stm32_port(port); 475 unsigned int dma_size, size = 0; 476 477 /* DMA buffer is configured in cyclic mode and handles the rollback of the buffer. */ 478 if (stm32_port->rx_dma_state.residue > stm32_port->last_res) { 479 /* Conditional first part: from last_res to end of DMA buffer */ 480 dma_size = stm32_port->last_res; 481 stm32_usart_push_buffer_dma(port, dma_size); 482 size = dma_size; 483 } 484 485 dma_size = stm32_port->last_res - stm32_port->rx_dma_state.residue; 486 stm32_usart_push_buffer_dma(port, dma_size); 487 size += dma_size; 488 489 return size; 490 } 491 492 static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force_dma_flush) 493 { 494 struct stm32_port *stm32_port = to_stm32_port(port); 495 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 496 enum dma_status rx_dma_status; 497 u32 sr; 498 unsigned int size = 0; 499 500 if (stm32_usart_rx_dma_started(stm32_port) || force_dma_flush) { 501 rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch, 502 stm32_port->rx_ch->cookie, 503 &stm32_port->rx_dma_state); 504 if (rx_dma_status == DMA_IN_PROGRESS || 505 rx_dma_status == DMA_PAUSED) { 506 /* Empty DMA buffer */ 507 size = stm32_usart_receive_chars_dma(port); 508 sr = readl_relaxed(port->membase + ofs->isr); 509 if (sr & USART_SR_ERR_MASK) { 510 /* Disable DMA request line */ 511 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 512 513 /* Switch to PIO mode to handle the errors */ 514 size += stm32_usart_receive_chars_pio(port); 515 516 /* Switch back to DMA mode */ 517 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR); 518 } 519 } else { 520 /* Disable RX DMA */ 521 stm32_usart_rx_dma_terminate(stm32_port); 522 /* Fall back to interrupt mode */ 523 dev_dbg(port->dev, "DMA error, fallback to irq mode\n"); 524 size = stm32_usart_receive_chars_pio(port); 525 } 526 } else { 527 size = stm32_usart_receive_chars_pio(port); 528 } 529 530 return size; 531 } 532 533 static void stm32_usart_rx_dma_complete(void *arg) 534 { 535 struct uart_port *port = arg; 536 struct tty_port *tport = &port->state->port; 537 unsigned int size; 538 unsigned long flags; 539 540 uart_port_lock_irqsave(port, &flags); 541 size = stm32_usart_receive_chars(port, false); 542 uart_unlock_and_check_sysrq_irqrestore(port, flags); 543 if (size) 544 tty_flip_buffer_push(tport); 545 } 546 547 static int stm32_usart_rx_dma_start_or_resume(struct uart_port *port) 548 { 549 struct stm32_port *stm32_port = to_stm32_port(port); 550 struct dma_async_tx_descriptor *desc; 551 enum dma_status rx_dma_status; 552 int ret; 553 554 if (stm32_port->throttled) 555 return 0; 556 557 if (stm32_port->rx_dma_busy) { 558 rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch, 559 stm32_port->rx_ch->cookie, 560 NULL); 561 if (rx_dma_status == DMA_IN_PROGRESS) 562 return 0; 563 564 if (rx_dma_status == DMA_PAUSED && !stm32_usart_rx_dma_resume(stm32_port)) 565 return 0; 566 567 dev_err(port->dev, "DMA failed : status error.\n"); 568 stm32_usart_rx_dma_terminate(stm32_port); 569 } 570 571 stm32_port->rx_dma_busy = true; 572 573 stm32_port->last_res = RX_BUF_L; 574 /* Prepare a DMA cyclic transaction */ 575 desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch, 576 stm32_port->rx_dma_buf, 577 RX_BUF_L, RX_BUF_P, 578 DMA_DEV_TO_MEM, 579 DMA_PREP_INTERRUPT); 580 if (!desc) { 581 dev_err(port->dev, "rx dma prep cyclic failed\n"); 582 stm32_port->rx_dma_busy = false; 583 return -ENODEV; 584 } 585 586 desc->callback = stm32_usart_rx_dma_complete; 587 desc->callback_param = port; 588 589 /* Push current DMA transaction in the pending queue */ 590 ret = dma_submit_error(dmaengine_submit(desc)); 591 if (ret) { 592 dmaengine_terminate_sync(stm32_port->rx_ch); 593 stm32_port->rx_dma_busy = false; 594 return ret; 595 } 596 597 /* Issue pending DMA requests */ 598 dma_async_issue_pending(stm32_port->rx_ch); 599 600 return 0; 601 } 602 603 static void stm32_usart_tx_dma_terminate(struct stm32_port *stm32_port) 604 { 605 dmaengine_terminate_async(stm32_port->tx_ch); 606 stm32_port->tx_dma_busy = false; 607 } 608 609 static bool stm32_usart_tx_dma_started(struct stm32_port *stm32_port) 610 { 611 /* 612 * We cannot use the function "dmaengine_tx_status" to know the 613 * status of DMA. This function does not show if the "dma complete" 614 * callback of the DMA transaction has been called. So we prefer 615 * to use "tx_dma_busy" flag to prevent dual DMA transaction at the 616 * same time. 617 */ 618 return stm32_port->tx_dma_busy; 619 } 620 621 static int stm32_usart_tx_dma_pause(struct stm32_port *stm32_port) 622 { 623 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->tx_ch, 624 DMA_IN_PROGRESS, dmaengine_pause, 625 stm32_usart_tx_dma_started, 626 stm32_usart_tx_dma_terminate); 627 } 628 629 static int stm32_usart_tx_dma_resume(struct stm32_port *stm32_port) 630 { 631 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->tx_ch, 632 DMA_PAUSED, dmaengine_resume, 633 stm32_usart_tx_dma_started, 634 stm32_usart_tx_dma_terminate); 635 } 636 637 static void stm32_usart_tx_dma_complete(void *arg) 638 { 639 struct uart_port *port = arg; 640 struct stm32_port *stm32port = to_stm32_port(port); 641 unsigned long flags; 642 643 stm32_usart_tx_dma_terminate(stm32port); 644 645 /* Let's see if we have pending data to send */ 646 uart_port_lock_irqsave(port, &flags); 647 stm32_usart_transmit_chars(port); 648 uart_port_unlock_irqrestore(port, flags); 649 } 650 651 static void stm32_usart_tx_interrupt_enable(struct uart_port *port) 652 { 653 struct stm32_port *stm32_port = to_stm32_port(port); 654 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 655 656 /* 657 * Enables TX FIFO threashold irq when FIFO is enabled, 658 * or TX empty irq when FIFO is disabled 659 */ 660 if (stm32_port->fifoen && stm32_port->txftcfg >= 0) 661 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_TXFTIE); 662 else 663 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE); 664 } 665 666 static void stm32_usart_tc_interrupt_enable(struct uart_port *port) 667 { 668 struct stm32_port *stm32_port = to_stm32_port(port); 669 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 670 671 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TCIE); 672 } 673 674 static void stm32_usart_tx_interrupt_disable(struct uart_port *port) 675 { 676 struct stm32_port *stm32_port = to_stm32_port(port); 677 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 678 679 if (stm32_port->fifoen && stm32_port->txftcfg >= 0) 680 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE); 681 else 682 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE); 683 } 684 685 static void stm32_usart_tc_interrupt_disable(struct uart_port *port) 686 { 687 struct stm32_port *stm32_port = to_stm32_port(port); 688 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 689 690 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TCIE); 691 } 692 693 static void stm32_usart_transmit_chars_pio(struct uart_port *port) 694 { 695 struct stm32_port *stm32_port = to_stm32_port(port); 696 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 697 struct circ_buf *xmit = &port->state->xmit; 698 699 while (!uart_circ_empty(xmit)) { 700 /* Check that TDR is empty before filling FIFO */ 701 if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE)) 702 break; 703 writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr); 704 uart_xmit_advance(port, 1); 705 } 706 707 /* rely on TXE irq (mask or unmask) for sending remaining data */ 708 if (uart_circ_empty(xmit)) 709 stm32_usart_tx_interrupt_disable(port); 710 else 711 stm32_usart_tx_interrupt_enable(port); 712 } 713 714 static void stm32_usart_transmit_chars_dma(struct uart_port *port) 715 { 716 struct stm32_port *stm32port = to_stm32_port(port); 717 struct circ_buf *xmit = &port->state->xmit; 718 struct dma_async_tx_descriptor *desc = NULL; 719 unsigned int count; 720 int ret; 721 722 if (stm32_usart_tx_dma_started(stm32port)) { 723 ret = stm32_usart_tx_dma_resume(stm32port); 724 if (ret < 0 && ret != -EAGAIN) 725 goto fallback_err; 726 return; 727 } 728 729 count = uart_circ_chars_pending(xmit); 730 731 if (count > TX_BUF_L) 732 count = TX_BUF_L; 733 734 if (xmit->tail < xmit->head) { 735 memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count); 736 } else { 737 size_t one = UART_XMIT_SIZE - xmit->tail; 738 size_t two; 739 740 if (one > count) 741 one = count; 742 two = count - one; 743 744 memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one); 745 if (two) 746 memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two); 747 } 748 749 desc = dmaengine_prep_slave_single(stm32port->tx_ch, 750 stm32port->tx_dma_buf, 751 count, 752 DMA_MEM_TO_DEV, 753 DMA_PREP_INTERRUPT); 754 755 if (!desc) 756 goto fallback_err; 757 758 /* 759 * Set "tx_dma_busy" flag. This flag will be released when 760 * dmaengine_terminate_async will be called. This flag helps 761 * transmit_chars_dma not to start another DMA transaction 762 * if the callback of the previous is not yet called. 763 */ 764 stm32port->tx_dma_busy = true; 765 766 desc->callback = stm32_usart_tx_dma_complete; 767 desc->callback_param = port; 768 769 /* Push current DMA TX transaction in the pending queue */ 770 /* DMA no yet started, safe to free resources */ 771 ret = dma_submit_error(dmaengine_submit(desc)); 772 if (ret) { 773 dev_err(port->dev, "DMA failed with error code: %d\n", ret); 774 stm32_usart_tx_dma_terminate(stm32port); 775 goto fallback_err; 776 } 777 778 /* Issue pending DMA TX requests */ 779 dma_async_issue_pending(stm32port->tx_ch); 780 781 uart_xmit_advance(port, count); 782 783 return; 784 785 fallback_err: 786 stm32_usart_transmit_chars_pio(port); 787 } 788 789 static void stm32_usart_transmit_chars(struct uart_port *port) 790 { 791 struct stm32_port *stm32_port = to_stm32_port(port); 792 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 793 struct circ_buf *xmit = &port->state->xmit; 794 u32 isr; 795 int ret; 796 797 if (!stm32_port->hw_flow_control && 798 port->rs485.flags & SER_RS485_ENABLED && 799 (port->x_char || 800 !(uart_circ_empty(xmit) || uart_tx_stopped(port)))) { 801 stm32_usart_tc_interrupt_disable(port); 802 stm32_usart_rs485_rts_enable(port); 803 } 804 805 if (port->x_char) { 806 /* dma terminate may have been called in case of dma pause failure */ 807 stm32_usart_tx_dma_pause(stm32_port); 808 809 /* Check that TDR is empty before filling FIFO */ 810 ret = 811 readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, 812 isr, 813 (isr & USART_SR_TXE), 814 10, 1000); 815 if (ret) 816 dev_warn(port->dev, "1 character may be erased\n"); 817 818 writel_relaxed(port->x_char, port->membase + ofs->tdr); 819 port->x_char = 0; 820 port->icount.tx++; 821 822 /* dma terminate may have been called in case of dma resume failure */ 823 stm32_usart_tx_dma_resume(stm32_port); 824 return; 825 } 826 827 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { 828 stm32_usart_tx_interrupt_disable(port); 829 return; 830 } 831 832 if (ofs->icr == UNDEF_REG) 833 stm32_usart_clr_bits(port, ofs->isr, USART_SR_TC); 834 else 835 writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr); 836 837 if (stm32_port->tx_ch) 838 stm32_usart_transmit_chars_dma(port); 839 else 840 stm32_usart_transmit_chars_pio(port); 841 842 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 843 uart_write_wakeup(port); 844 845 if (uart_circ_empty(xmit)) { 846 stm32_usart_tx_interrupt_disable(port); 847 if (!stm32_port->hw_flow_control && 848 port->rs485.flags & SER_RS485_ENABLED) { 849 stm32_usart_tc_interrupt_enable(port); 850 } 851 } 852 } 853 854 static irqreturn_t stm32_usart_interrupt(int irq, void *ptr) 855 { 856 struct uart_port *port = ptr; 857 struct tty_port *tport = &port->state->port; 858 struct stm32_port *stm32_port = to_stm32_port(port); 859 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 860 u32 sr; 861 unsigned int size; 862 863 sr = readl_relaxed(port->membase + ofs->isr); 864 865 if (!stm32_port->hw_flow_control && 866 port->rs485.flags & SER_RS485_ENABLED && 867 (sr & USART_SR_TC)) { 868 stm32_usart_tc_interrupt_disable(port); 869 stm32_usart_rs485_rts_disable(port); 870 } 871 872 if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG) 873 writel_relaxed(USART_ICR_RTOCF, 874 port->membase + ofs->icr); 875 876 if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) { 877 /* Clear wake up flag and disable wake up interrupt */ 878 writel_relaxed(USART_ICR_WUCF, 879 port->membase + ofs->icr); 880 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE); 881 if (irqd_is_wakeup_set(irq_get_irq_data(port->irq))) 882 pm_wakeup_event(tport->tty->dev, 0); 883 } 884 885 /* 886 * rx errors in dma mode has to be handled ASAP to avoid overrun as the DMA request 887 * line has been masked by HW and rx data are stacking in FIFO. 888 */ 889 if (!stm32_port->throttled) { 890 if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_started(stm32_port)) || 891 ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_started(stm32_port))) { 892 uart_port_lock(port); 893 size = stm32_usart_receive_chars(port, false); 894 uart_unlock_and_check_sysrq(port); 895 if (size) 896 tty_flip_buffer_push(tport); 897 } 898 } 899 900 if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) { 901 uart_port_lock(port); 902 stm32_usart_transmit_chars(port); 903 uart_port_unlock(port); 904 } 905 906 /* Receiver timeout irq for DMA RX */ 907 if (stm32_usart_rx_dma_started(stm32_port) && !stm32_port->throttled) { 908 uart_port_lock(port); 909 size = stm32_usart_receive_chars(port, false); 910 uart_unlock_and_check_sysrq(port); 911 if (size) 912 tty_flip_buffer_push(tport); 913 } 914 915 return IRQ_HANDLED; 916 } 917 918 static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl) 919 { 920 struct stm32_port *stm32_port = to_stm32_port(port); 921 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 922 923 if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS)) 924 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_RTSE); 925 else 926 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_RTSE); 927 928 mctrl_gpio_set(stm32_port->gpios, mctrl); 929 } 930 931 static unsigned int stm32_usart_get_mctrl(struct uart_port *port) 932 { 933 struct stm32_port *stm32_port = to_stm32_port(port); 934 unsigned int ret; 935 936 /* This routine is used to get signals of: DCD, DSR, RI, and CTS */ 937 ret = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; 938 939 return mctrl_gpio_get(stm32_port->gpios, &ret); 940 } 941 942 static void stm32_usart_enable_ms(struct uart_port *port) 943 { 944 mctrl_gpio_enable_ms(to_stm32_port(port)->gpios); 945 } 946 947 static void stm32_usart_disable_ms(struct uart_port *port) 948 { 949 mctrl_gpio_disable_ms(to_stm32_port(port)->gpios); 950 } 951 952 /* Transmit stop */ 953 static void stm32_usart_stop_tx(struct uart_port *port) 954 { 955 struct stm32_port *stm32_port = to_stm32_port(port); 956 957 stm32_usart_tx_interrupt_disable(port); 958 959 /* dma terminate may have been called in case of dma pause failure */ 960 stm32_usart_tx_dma_pause(stm32_port); 961 962 stm32_usart_rs485_rts_disable(port); 963 } 964 965 /* There are probably characters waiting to be transmitted. */ 966 static void stm32_usart_start_tx(struct uart_port *port) 967 { 968 struct circ_buf *xmit = &port->state->xmit; 969 970 if (uart_circ_empty(xmit) && !port->x_char) { 971 stm32_usart_rs485_rts_disable(port); 972 return; 973 } 974 975 stm32_usart_rs485_rts_enable(port); 976 977 stm32_usart_transmit_chars(port); 978 } 979 980 /* Flush the transmit buffer. */ 981 static void stm32_usart_flush_buffer(struct uart_port *port) 982 { 983 struct stm32_port *stm32_port = to_stm32_port(port); 984 985 if (stm32_port->tx_ch) 986 stm32_usart_tx_dma_terminate(stm32_port); 987 } 988 989 /* Throttle the remote when input buffer is about to overflow. */ 990 static void stm32_usart_throttle(struct uart_port *port) 991 { 992 struct stm32_port *stm32_port = to_stm32_port(port); 993 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 994 unsigned long flags; 995 996 uart_port_lock_irqsave(port, &flags); 997 998 /* 999 * Pause DMA transfer, so the RX data gets queued into the FIFO. 1000 * Hardware flow control is triggered when RX FIFO is full. 1001 */ 1002 stm32_usart_rx_dma_pause(stm32_port); 1003 1004 stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq); 1005 if (stm32_port->cr3_irq) 1006 stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq); 1007 1008 stm32_port->throttled = true; 1009 uart_port_unlock_irqrestore(port, flags); 1010 } 1011 1012 /* Unthrottle the remote, the input buffer can now accept data. */ 1013 static void stm32_usart_unthrottle(struct uart_port *port) 1014 { 1015 struct stm32_port *stm32_port = to_stm32_port(port); 1016 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1017 unsigned long flags; 1018 1019 uart_port_lock_irqsave(port, &flags); 1020 stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq); 1021 if (stm32_port->cr3_irq) 1022 stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq); 1023 1024 stm32_port->throttled = false; 1025 1026 /* 1027 * Switch back to DMA mode (resume DMA). 1028 * Hardware flow control is stopped when FIFO is not full any more. 1029 */ 1030 if (stm32_port->rx_ch) 1031 stm32_usart_rx_dma_start_or_resume(port); 1032 1033 uart_port_unlock_irqrestore(port, flags); 1034 } 1035 1036 /* Receive stop */ 1037 static void stm32_usart_stop_rx(struct uart_port *port) 1038 { 1039 struct stm32_port *stm32_port = to_stm32_port(port); 1040 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1041 1042 /* Disable DMA request line. */ 1043 stm32_usart_rx_dma_pause(stm32_port); 1044 1045 stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq); 1046 if (stm32_port->cr3_irq) 1047 stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq); 1048 } 1049 1050 static void stm32_usart_break_ctl(struct uart_port *port, int break_state) 1051 { 1052 struct stm32_port *stm32_port = to_stm32_port(port); 1053 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1054 unsigned long flags; 1055 1056 spin_lock_irqsave(&port->lock, flags); 1057 1058 if (break_state) 1059 stm32_usart_set_bits(port, ofs->rqr, USART_RQR_SBKRQ); 1060 else 1061 stm32_usart_clr_bits(port, ofs->rqr, USART_RQR_SBKRQ); 1062 1063 spin_unlock_irqrestore(&port->lock, flags); 1064 } 1065 1066 static int stm32_usart_startup(struct uart_port *port) 1067 { 1068 struct stm32_port *stm32_port = to_stm32_port(port); 1069 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1070 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1071 const char *name = to_platform_device(port->dev)->name; 1072 u32 val; 1073 int ret; 1074 1075 ret = request_irq(port->irq, stm32_usart_interrupt, 1076 IRQF_NO_SUSPEND, name, port); 1077 if (ret) 1078 return ret; 1079 1080 if (stm32_port->swap) { 1081 val = readl_relaxed(port->membase + ofs->cr2); 1082 val |= USART_CR2_SWAP; 1083 writel_relaxed(val, port->membase + ofs->cr2); 1084 } 1085 1086 /* RX FIFO Flush */ 1087 if (ofs->rqr != UNDEF_REG) 1088 writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr); 1089 1090 if (stm32_port->rx_ch) { 1091 ret = stm32_usart_rx_dma_start_or_resume(port); 1092 if (ret) { 1093 free_irq(port->irq, port); 1094 return ret; 1095 } 1096 } 1097 1098 /* RX enabling */ 1099 val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit); 1100 stm32_usart_set_bits(port, ofs->cr1, val); 1101 1102 return 0; 1103 } 1104 1105 static void stm32_usart_shutdown(struct uart_port *port) 1106 { 1107 struct stm32_port *stm32_port = to_stm32_port(port); 1108 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1109 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1110 u32 val, isr; 1111 int ret; 1112 1113 if (stm32_usart_tx_dma_started(stm32_port)) 1114 stm32_usart_tx_dma_terminate(stm32_port); 1115 1116 if (stm32_port->tx_ch) 1117 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 1118 1119 /* Disable modem control interrupts */ 1120 stm32_usart_disable_ms(port); 1121 1122 val = USART_CR1_TXEIE | USART_CR1_TE; 1123 val |= stm32_port->cr1_irq | USART_CR1_RE; 1124 val |= BIT(cfg->uart_enable_bit); 1125 if (stm32_port->fifoen) 1126 val |= USART_CR1_FIFOEN; 1127 1128 ret = readl_relaxed_poll_timeout(port->membase + ofs->isr, 1129 isr, (isr & USART_SR_TC), 1130 10, 100000); 1131 1132 /* Send the TC error message only when ISR_TC is not set */ 1133 if (ret) 1134 dev_err(port->dev, "Transmission is not complete\n"); 1135 1136 /* Disable RX DMA. */ 1137 if (stm32_port->rx_ch) { 1138 stm32_usart_rx_dma_terminate(stm32_port); 1139 dmaengine_synchronize(stm32_port->rx_ch); 1140 } 1141 1142 /* flush RX & TX FIFO */ 1143 if (ofs->rqr != UNDEF_REG) 1144 writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ, 1145 port->membase + ofs->rqr); 1146 1147 stm32_usart_clr_bits(port, ofs->cr1, val); 1148 1149 free_irq(port->irq, port); 1150 } 1151 1152 static void stm32_usart_set_termios(struct uart_port *port, 1153 struct ktermios *termios, 1154 const struct ktermios *old) 1155 { 1156 struct stm32_port *stm32_port = to_stm32_port(port); 1157 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1158 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1159 struct serial_rs485 *rs485conf = &port->rs485; 1160 unsigned int baud, bits; 1161 u32 usartdiv, mantissa, fraction, oversampling; 1162 tcflag_t cflag = termios->c_cflag; 1163 u32 cr1, cr2, cr3, isr; 1164 unsigned long flags; 1165 int ret; 1166 1167 if (!stm32_port->hw_flow_control) 1168 cflag &= ~CRTSCTS; 1169 1170 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8); 1171 1172 uart_port_lock_irqsave(port, &flags); 1173 1174 ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, 1175 isr, 1176 (isr & USART_SR_TC), 1177 10, 100000); 1178 1179 /* Send the TC error message only when ISR_TC is not set. */ 1180 if (ret) 1181 dev_err(port->dev, "Transmission is not complete\n"); 1182 1183 /* Stop serial port and reset value */ 1184 writel_relaxed(0, port->membase + ofs->cr1); 1185 1186 /* flush RX & TX FIFO */ 1187 if (ofs->rqr != UNDEF_REG) 1188 writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ, 1189 port->membase + ofs->rqr); 1190 1191 cr1 = USART_CR1_TE | USART_CR1_RE; 1192 if (stm32_port->fifoen) 1193 cr1 |= USART_CR1_FIFOEN; 1194 cr2 = stm32_port->swap ? USART_CR2_SWAP : 0; 1195 1196 /* Tx and RX FIFO configuration */ 1197 cr3 = readl_relaxed(port->membase + ofs->cr3); 1198 cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE; 1199 if (stm32_port->fifoen) { 1200 if (stm32_port->txftcfg >= 0) 1201 cr3 |= stm32_port->txftcfg << USART_CR3_TXFTCFG_SHIFT; 1202 if (stm32_port->rxftcfg >= 0) 1203 cr3 |= stm32_port->rxftcfg << USART_CR3_RXFTCFG_SHIFT; 1204 } 1205 1206 if (cflag & CSTOPB) 1207 cr2 |= USART_CR2_STOP_2B; 1208 1209 bits = tty_get_char_size(cflag); 1210 stm32_port->rdr_mask = (BIT(bits) - 1); 1211 1212 if (cflag & PARENB) { 1213 bits++; 1214 cr1 |= USART_CR1_PCE; 1215 } 1216 1217 /* 1218 * Word length configuration: 1219 * CS8 + parity, 9 bits word aka [M1:M0] = 0b01 1220 * CS7 or (CS6 + parity), 7 bits word aka [M1:M0] = 0b10 1221 * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00 1222 * M0 and M1 already cleared by cr1 initialization. 1223 */ 1224 if (bits == 9) { 1225 cr1 |= USART_CR1_M0; 1226 } else if ((bits == 7) && cfg->has_7bits_data) { 1227 cr1 |= USART_CR1_M1; 1228 } else if (bits != 8) { 1229 dev_dbg(port->dev, "Unsupported data bits config: %u bits\n" 1230 , bits); 1231 cflag &= ~CSIZE; 1232 cflag |= CS8; 1233 termios->c_cflag = cflag; 1234 bits = 8; 1235 if (cflag & PARENB) { 1236 bits++; 1237 cr1 |= USART_CR1_M0; 1238 } 1239 } 1240 1241 if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch || 1242 (stm32_port->fifoen && 1243 stm32_port->rxftcfg >= 0))) { 1244 if (cflag & CSTOPB) 1245 bits = bits + 3; /* 1 start bit + 2 stop bits */ 1246 else 1247 bits = bits + 2; /* 1 start bit + 1 stop bit */ 1248 1249 /* RX timeout irq to occur after last stop bit + bits */ 1250 stm32_port->cr1_irq = USART_CR1_RTOIE; 1251 writel_relaxed(bits, port->membase + ofs->rtor); 1252 cr2 |= USART_CR2_RTOEN; 1253 /* 1254 * Enable fifo threshold irq in two cases, either when there is no DMA, or when 1255 * wake up over usart, from low power until the DMA gets re-enabled by resume. 1256 */ 1257 stm32_port->cr3_irq = USART_CR3_RXFTIE; 1258 } 1259 1260 cr1 |= stm32_port->cr1_irq; 1261 cr3 |= stm32_port->cr3_irq; 1262 1263 if (cflag & PARODD) 1264 cr1 |= USART_CR1_PS; 1265 1266 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); 1267 if (cflag & CRTSCTS) { 1268 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; 1269 cr3 |= USART_CR3_CTSE | USART_CR3_RTSE; 1270 } 1271 1272 usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud); 1273 1274 /* 1275 * The USART supports 16 or 8 times oversampling. 1276 * By default we prefer 16 times oversampling, so that the receiver 1277 * has a better tolerance to clock deviations. 1278 * 8 times oversampling is only used to achieve higher speeds. 1279 */ 1280 if (usartdiv < 16) { 1281 oversampling = 8; 1282 cr1 |= USART_CR1_OVER8; 1283 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8); 1284 } else { 1285 oversampling = 16; 1286 cr1 &= ~USART_CR1_OVER8; 1287 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8); 1288 } 1289 1290 mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT; 1291 fraction = usartdiv % oversampling; 1292 writel_relaxed(mantissa | fraction, port->membase + ofs->brr); 1293 1294 uart_update_timeout(port, cflag, baud); 1295 1296 port->read_status_mask = USART_SR_ORE; 1297 if (termios->c_iflag & INPCK) 1298 port->read_status_mask |= USART_SR_PE | USART_SR_FE; 1299 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) 1300 port->read_status_mask |= USART_SR_FE; 1301 1302 /* Characters to ignore */ 1303 port->ignore_status_mask = 0; 1304 if (termios->c_iflag & IGNPAR) 1305 port->ignore_status_mask = USART_SR_PE | USART_SR_FE; 1306 if (termios->c_iflag & IGNBRK) { 1307 port->ignore_status_mask |= USART_SR_FE; 1308 /* 1309 * If we're ignoring parity and break indicators, 1310 * ignore overruns too (for real raw support). 1311 */ 1312 if (termios->c_iflag & IGNPAR) 1313 port->ignore_status_mask |= USART_SR_ORE; 1314 } 1315 1316 /* Ignore all characters if CREAD is not set */ 1317 if ((termios->c_cflag & CREAD) == 0) 1318 port->ignore_status_mask |= USART_SR_DUMMY_RX; 1319 1320 if (stm32_port->rx_ch) { 1321 /* 1322 * Setup DMA to collect only valid data and enable error irqs. 1323 * This also enables break reception when using DMA. 1324 */ 1325 cr1 |= USART_CR1_PEIE; 1326 cr3 |= USART_CR3_EIE; 1327 cr3 |= USART_CR3_DMAR; 1328 cr3 |= USART_CR3_DDRE; 1329 } 1330 1331 if (stm32_port->tx_ch) 1332 cr3 |= USART_CR3_DMAT; 1333 1334 if (rs485conf->flags & SER_RS485_ENABLED) { 1335 stm32_usart_config_reg_rs485(&cr1, &cr3, 1336 rs485conf->delay_rts_before_send, 1337 rs485conf->delay_rts_after_send, 1338 baud); 1339 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 1340 cr3 &= ~USART_CR3_DEP; 1341 rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND; 1342 } else { 1343 cr3 |= USART_CR3_DEP; 1344 rs485conf->flags |= SER_RS485_RTS_AFTER_SEND; 1345 } 1346 1347 } else { 1348 cr3 &= ~(USART_CR3_DEM | USART_CR3_DEP); 1349 cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); 1350 } 1351 1352 /* Configure wake up from low power on start bit detection */ 1353 if (stm32_port->wakeup_src) { 1354 cr3 &= ~USART_CR3_WUS_MASK; 1355 cr3 |= USART_CR3_WUS_START_BIT; 1356 } 1357 1358 writel_relaxed(cr3, port->membase + ofs->cr3); 1359 writel_relaxed(cr2, port->membase + ofs->cr2); 1360 writel_relaxed(cr1, port->membase + ofs->cr1); 1361 1362 stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 1363 uart_port_unlock_irqrestore(port, flags); 1364 1365 /* Handle modem control interrupts */ 1366 if (UART_ENABLE_MS(port, termios->c_cflag)) 1367 stm32_usart_enable_ms(port); 1368 else 1369 stm32_usart_disable_ms(port); 1370 } 1371 1372 static const char *stm32_usart_type(struct uart_port *port) 1373 { 1374 return (port->type == PORT_STM32) ? DRIVER_NAME : NULL; 1375 } 1376 1377 static void stm32_usart_release_port(struct uart_port *port) 1378 { 1379 } 1380 1381 static int stm32_usart_request_port(struct uart_port *port) 1382 { 1383 return 0; 1384 } 1385 1386 static void stm32_usart_config_port(struct uart_port *port, int flags) 1387 { 1388 if (flags & UART_CONFIG_TYPE) 1389 port->type = PORT_STM32; 1390 } 1391 1392 static int 1393 stm32_usart_verify_port(struct uart_port *port, struct serial_struct *ser) 1394 { 1395 /* No user changeable parameters */ 1396 return -EINVAL; 1397 } 1398 1399 static void stm32_usart_pm(struct uart_port *port, unsigned int state, 1400 unsigned int oldstate) 1401 { 1402 struct stm32_port *stm32port = container_of(port, 1403 struct stm32_port, port); 1404 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1405 const struct stm32_usart_config *cfg = &stm32port->info->cfg; 1406 unsigned long flags; 1407 1408 switch (state) { 1409 case UART_PM_STATE_ON: 1410 pm_runtime_get_sync(port->dev); 1411 break; 1412 case UART_PM_STATE_OFF: 1413 uart_port_lock_irqsave(port, &flags); 1414 stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 1415 uart_port_unlock_irqrestore(port, flags); 1416 pm_runtime_put_sync(port->dev); 1417 break; 1418 } 1419 } 1420 1421 #if defined(CONFIG_CONSOLE_POLL) 1422 1423 /* Callbacks for characters polling in debug context (i.e. KGDB). */ 1424 static int stm32_usart_poll_init(struct uart_port *port) 1425 { 1426 struct stm32_port *stm32_port = to_stm32_port(port); 1427 1428 return clk_prepare_enable(stm32_port->clk); 1429 } 1430 1431 static int stm32_usart_poll_get_char(struct uart_port *port) 1432 { 1433 struct stm32_port *stm32_port = to_stm32_port(port); 1434 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1435 1436 if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_RXNE)) 1437 return NO_POLL_CHAR; 1438 1439 return readl_relaxed(port->membase + ofs->rdr) & stm32_port->rdr_mask; 1440 } 1441 1442 static void stm32_usart_poll_put_char(struct uart_port *port, unsigned char ch) 1443 { 1444 stm32_usart_console_putchar(port, ch); 1445 } 1446 #endif /* CONFIG_CONSOLE_POLL */ 1447 1448 static const struct uart_ops stm32_uart_ops = { 1449 .tx_empty = stm32_usart_tx_empty, 1450 .set_mctrl = stm32_usart_set_mctrl, 1451 .get_mctrl = stm32_usart_get_mctrl, 1452 .stop_tx = stm32_usart_stop_tx, 1453 .start_tx = stm32_usart_start_tx, 1454 .throttle = stm32_usart_throttle, 1455 .unthrottle = stm32_usart_unthrottle, 1456 .stop_rx = stm32_usart_stop_rx, 1457 .enable_ms = stm32_usart_enable_ms, 1458 .break_ctl = stm32_usart_break_ctl, 1459 .startup = stm32_usart_startup, 1460 .shutdown = stm32_usart_shutdown, 1461 .flush_buffer = stm32_usart_flush_buffer, 1462 .set_termios = stm32_usart_set_termios, 1463 .pm = stm32_usart_pm, 1464 .type = stm32_usart_type, 1465 .release_port = stm32_usart_release_port, 1466 .request_port = stm32_usart_request_port, 1467 .config_port = stm32_usart_config_port, 1468 .verify_port = stm32_usart_verify_port, 1469 #if defined(CONFIG_CONSOLE_POLL) 1470 .poll_init = stm32_usart_poll_init, 1471 .poll_get_char = stm32_usart_poll_get_char, 1472 .poll_put_char = stm32_usart_poll_put_char, 1473 #endif /* CONFIG_CONSOLE_POLL */ 1474 }; 1475 1476 /* 1477 * STM32H7 RX & TX FIFO threshold configuration (CR3 RXFTCFG / TXFTCFG) 1478 * Note: 1 isn't a valid value in RXFTCFG / TXFTCFG. In this case, 1479 * RXNEIE / TXEIE can be used instead of threshold irqs: RXFTIE / TXFTIE. 1480 * So, RXFTCFG / TXFTCFG bitfields values are encoded as array index + 1. 1481 */ 1482 static const u32 stm32h7_usart_fifo_thresh_cfg[] = { 1, 2, 4, 8, 12, 14, 16 }; 1483 1484 static void stm32_usart_get_ftcfg(struct platform_device *pdev, const char *p, 1485 int *ftcfg) 1486 { 1487 u32 bytes, i; 1488 1489 /* DT option to get RX & TX FIFO threshold (default to 8 bytes) */ 1490 if (of_property_read_u32(pdev->dev.of_node, p, &bytes)) 1491 bytes = 8; 1492 1493 for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++) 1494 if (stm32h7_usart_fifo_thresh_cfg[i] >= bytes) 1495 break; 1496 if (i >= ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg)) 1497 i = ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg) - 1; 1498 1499 dev_dbg(&pdev->dev, "%s set to %d bytes\n", p, 1500 stm32h7_usart_fifo_thresh_cfg[i]); 1501 1502 /* Provide FIFO threshold ftcfg (1 is invalid: threshold irq unused) */ 1503 if (i) 1504 *ftcfg = i - 1; 1505 else 1506 *ftcfg = -EINVAL; 1507 } 1508 1509 static void stm32_usart_deinit_port(struct stm32_port *stm32port) 1510 { 1511 clk_disable_unprepare(stm32port->clk); 1512 } 1513 1514 static const struct serial_rs485 stm32_rs485_supported = { 1515 .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND | 1516 SER_RS485_RX_DURING_TX, 1517 .delay_rts_before_send = 1, 1518 .delay_rts_after_send = 1, 1519 }; 1520 1521 static int stm32_usart_init_port(struct stm32_port *stm32port, 1522 struct platform_device *pdev) 1523 { 1524 struct uart_port *port = &stm32port->port; 1525 struct resource *res; 1526 int ret, irq; 1527 1528 irq = platform_get_irq(pdev, 0); 1529 if (irq < 0) 1530 return irq; 1531 1532 port->iotype = UPIO_MEM; 1533 port->flags = UPF_BOOT_AUTOCONF; 1534 port->ops = &stm32_uart_ops; 1535 port->dev = &pdev->dev; 1536 port->fifosize = stm32port->info->cfg.fifosize; 1537 port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE); 1538 port->irq = irq; 1539 port->rs485_config = stm32_usart_config_rs485; 1540 port->rs485_supported = stm32_rs485_supported; 1541 1542 ret = stm32_usart_init_rs485(port, pdev); 1543 if (ret) 1544 return ret; 1545 1546 stm32port->wakeup_src = stm32port->info->cfg.has_wakeup && 1547 of_property_read_bool(pdev->dev.of_node, "wakeup-source"); 1548 1549 stm32port->swap = stm32port->info->cfg.has_swap && 1550 of_property_read_bool(pdev->dev.of_node, "rx-tx-swap"); 1551 1552 stm32port->fifoen = stm32port->info->cfg.has_fifo; 1553 if (stm32port->fifoen) { 1554 stm32_usart_get_ftcfg(pdev, "rx-threshold", 1555 &stm32port->rxftcfg); 1556 stm32_usart_get_ftcfg(pdev, "tx-threshold", 1557 &stm32port->txftcfg); 1558 } 1559 1560 port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 1561 if (IS_ERR(port->membase)) 1562 return PTR_ERR(port->membase); 1563 port->mapbase = res->start; 1564 1565 spin_lock_init(&port->lock); 1566 1567 stm32port->clk = devm_clk_get(&pdev->dev, NULL); 1568 if (IS_ERR(stm32port->clk)) 1569 return PTR_ERR(stm32port->clk); 1570 1571 /* Ensure that clk rate is correct by enabling the clk */ 1572 ret = clk_prepare_enable(stm32port->clk); 1573 if (ret) 1574 return ret; 1575 1576 stm32port->port.uartclk = clk_get_rate(stm32port->clk); 1577 if (!stm32port->port.uartclk) { 1578 ret = -EINVAL; 1579 goto err_clk; 1580 } 1581 1582 stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0); 1583 if (IS_ERR(stm32port->gpios)) { 1584 ret = PTR_ERR(stm32port->gpios); 1585 goto err_clk; 1586 } 1587 1588 /* 1589 * Both CTS/RTS gpios and "st,hw-flow-ctrl" (deprecated) or "uart-has-rtscts" 1590 * properties should not be specified. 1591 */ 1592 if (stm32port->hw_flow_control) { 1593 if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) || 1594 mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) { 1595 dev_err(&pdev->dev, "Conflicting RTS/CTS config\n"); 1596 ret = -EINVAL; 1597 goto err_clk; 1598 } 1599 } 1600 1601 return ret; 1602 1603 err_clk: 1604 clk_disable_unprepare(stm32port->clk); 1605 1606 return ret; 1607 } 1608 1609 static struct stm32_port *stm32_usart_of_get_port(struct platform_device *pdev) 1610 { 1611 struct device_node *np = pdev->dev.of_node; 1612 int id; 1613 1614 if (!np) 1615 return NULL; 1616 1617 id = of_alias_get_id(np, "serial"); 1618 if (id < 0) { 1619 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", id); 1620 return NULL; 1621 } 1622 1623 if (WARN_ON(id >= STM32_MAX_PORTS)) 1624 return NULL; 1625 1626 stm32_ports[id].hw_flow_control = 1627 of_property_read_bool (np, "st,hw-flow-ctrl") /*deprecated*/ || 1628 of_property_read_bool (np, "uart-has-rtscts"); 1629 stm32_ports[id].port.line = id; 1630 stm32_ports[id].cr1_irq = USART_CR1_RXNEIE; 1631 stm32_ports[id].cr3_irq = 0; 1632 stm32_ports[id].last_res = RX_BUF_L; 1633 return &stm32_ports[id]; 1634 } 1635 1636 #ifdef CONFIG_OF 1637 static const struct of_device_id stm32_match[] = { 1638 { .compatible = "st,stm32-uart", .data = &stm32f4_info}, 1639 { .compatible = "st,stm32f7-uart", .data = &stm32f7_info}, 1640 { .compatible = "st,stm32h7-uart", .data = &stm32h7_info}, 1641 {}, 1642 }; 1643 1644 MODULE_DEVICE_TABLE(of, stm32_match); 1645 #endif 1646 1647 static void stm32_usart_of_dma_rx_remove(struct stm32_port *stm32port, 1648 struct platform_device *pdev) 1649 { 1650 if (stm32port->rx_buf) 1651 dma_free_coherent(&pdev->dev, RX_BUF_L, stm32port->rx_buf, 1652 stm32port->rx_dma_buf); 1653 } 1654 1655 static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port, 1656 struct platform_device *pdev) 1657 { 1658 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1659 struct uart_port *port = &stm32port->port; 1660 struct device *dev = &pdev->dev; 1661 struct dma_slave_config config; 1662 int ret; 1663 1664 stm32port->rx_buf = dma_alloc_coherent(dev, RX_BUF_L, 1665 &stm32port->rx_dma_buf, 1666 GFP_KERNEL); 1667 if (!stm32port->rx_buf) 1668 return -ENOMEM; 1669 1670 /* Configure DMA channel */ 1671 memset(&config, 0, sizeof(config)); 1672 config.src_addr = port->mapbase + ofs->rdr; 1673 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1674 1675 ret = dmaengine_slave_config(stm32port->rx_ch, &config); 1676 if (ret < 0) { 1677 dev_err(dev, "rx dma channel config failed\n"); 1678 stm32_usart_of_dma_rx_remove(stm32port, pdev); 1679 return ret; 1680 } 1681 1682 return 0; 1683 } 1684 1685 static void stm32_usart_of_dma_tx_remove(struct stm32_port *stm32port, 1686 struct platform_device *pdev) 1687 { 1688 if (stm32port->tx_buf) 1689 dma_free_coherent(&pdev->dev, TX_BUF_L, stm32port->tx_buf, 1690 stm32port->tx_dma_buf); 1691 } 1692 1693 static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port, 1694 struct platform_device *pdev) 1695 { 1696 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1697 struct uart_port *port = &stm32port->port; 1698 struct device *dev = &pdev->dev; 1699 struct dma_slave_config config; 1700 int ret; 1701 1702 stm32port->tx_buf = dma_alloc_coherent(dev, TX_BUF_L, 1703 &stm32port->tx_dma_buf, 1704 GFP_KERNEL); 1705 if (!stm32port->tx_buf) 1706 return -ENOMEM; 1707 1708 /* Configure DMA channel */ 1709 memset(&config, 0, sizeof(config)); 1710 config.dst_addr = port->mapbase + ofs->tdr; 1711 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1712 1713 ret = dmaengine_slave_config(stm32port->tx_ch, &config); 1714 if (ret < 0) { 1715 dev_err(dev, "tx dma channel config failed\n"); 1716 stm32_usart_of_dma_tx_remove(stm32port, pdev); 1717 return ret; 1718 } 1719 1720 return 0; 1721 } 1722 1723 static int stm32_usart_serial_probe(struct platform_device *pdev) 1724 { 1725 struct stm32_port *stm32port; 1726 int ret; 1727 1728 stm32port = stm32_usart_of_get_port(pdev); 1729 if (!stm32port) 1730 return -ENODEV; 1731 1732 stm32port->info = of_device_get_match_data(&pdev->dev); 1733 if (!stm32port->info) 1734 return -EINVAL; 1735 1736 stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx"); 1737 if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER) 1738 return -EPROBE_DEFER; 1739 1740 /* Fall back in interrupt mode for any non-deferral error */ 1741 if (IS_ERR(stm32port->rx_ch)) 1742 stm32port->rx_ch = NULL; 1743 1744 stm32port->tx_ch = dma_request_chan(&pdev->dev, "tx"); 1745 if (PTR_ERR(stm32port->tx_ch) == -EPROBE_DEFER) { 1746 ret = -EPROBE_DEFER; 1747 goto err_dma_rx; 1748 } 1749 /* Fall back in interrupt mode for any non-deferral error */ 1750 if (IS_ERR(stm32port->tx_ch)) 1751 stm32port->tx_ch = NULL; 1752 1753 ret = stm32_usart_init_port(stm32port, pdev); 1754 if (ret) 1755 goto err_dma_tx; 1756 1757 if (stm32port->wakeup_src) { 1758 device_set_wakeup_capable(&pdev->dev, true); 1759 ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq); 1760 if (ret) 1761 goto err_deinit_port; 1762 } 1763 1764 if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) { 1765 /* Fall back in interrupt mode */ 1766 dma_release_channel(stm32port->rx_ch); 1767 stm32port->rx_ch = NULL; 1768 } 1769 1770 if (stm32port->tx_ch && stm32_usart_of_dma_tx_probe(stm32port, pdev)) { 1771 /* Fall back in interrupt mode */ 1772 dma_release_channel(stm32port->tx_ch); 1773 stm32port->tx_ch = NULL; 1774 } 1775 1776 if (!stm32port->rx_ch) 1777 dev_info(&pdev->dev, "interrupt mode for rx (no dma)\n"); 1778 if (!stm32port->tx_ch) 1779 dev_info(&pdev->dev, "interrupt mode for tx (no dma)\n"); 1780 1781 platform_set_drvdata(pdev, &stm32port->port); 1782 1783 pm_runtime_get_noresume(&pdev->dev); 1784 pm_runtime_set_active(&pdev->dev); 1785 pm_runtime_enable(&pdev->dev); 1786 1787 ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port); 1788 if (ret) 1789 goto err_port; 1790 1791 pm_runtime_put_sync(&pdev->dev); 1792 1793 return 0; 1794 1795 err_port: 1796 pm_runtime_disable(&pdev->dev); 1797 pm_runtime_set_suspended(&pdev->dev); 1798 pm_runtime_put_noidle(&pdev->dev); 1799 1800 if (stm32port->tx_ch) 1801 stm32_usart_of_dma_tx_remove(stm32port, pdev); 1802 if (stm32port->rx_ch) 1803 stm32_usart_of_dma_rx_remove(stm32port, pdev); 1804 1805 if (stm32port->wakeup_src) 1806 dev_pm_clear_wake_irq(&pdev->dev); 1807 1808 err_deinit_port: 1809 if (stm32port->wakeup_src) 1810 device_set_wakeup_capable(&pdev->dev, false); 1811 1812 stm32_usart_deinit_port(stm32port); 1813 1814 err_dma_tx: 1815 if (stm32port->tx_ch) 1816 dma_release_channel(stm32port->tx_ch); 1817 1818 err_dma_rx: 1819 if (stm32port->rx_ch) 1820 dma_release_channel(stm32port->rx_ch); 1821 1822 return ret; 1823 } 1824 1825 static int stm32_usart_serial_remove(struct platform_device *pdev) 1826 { 1827 struct uart_port *port = platform_get_drvdata(pdev); 1828 struct stm32_port *stm32_port = to_stm32_port(port); 1829 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1830 u32 cr3; 1831 1832 pm_runtime_get_sync(&pdev->dev); 1833 uart_remove_one_port(&stm32_usart_driver, port); 1834 1835 pm_runtime_disable(&pdev->dev); 1836 pm_runtime_set_suspended(&pdev->dev); 1837 pm_runtime_put_noidle(&pdev->dev); 1838 1839 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_PEIE); 1840 1841 if (stm32_port->tx_ch) { 1842 stm32_usart_of_dma_tx_remove(stm32_port, pdev); 1843 dma_release_channel(stm32_port->tx_ch); 1844 } 1845 1846 if (stm32_port->rx_ch) { 1847 stm32_usart_of_dma_rx_remove(stm32_port, pdev); 1848 dma_release_channel(stm32_port->rx_ch); 1849 } 1850 1851 cr3 = readl_relaxed(port->membase + ofs->cr3); 1852 cr3 &= ~USART_CR3_EIE; 1853 cr3 &= ~USART_CR3_DMAR; 1854 cr3 &= ~USART_CR3_DMAT; 1855 cr3 &= ~USART_CR3_DDRE; 1856 writel_relaxed(cr3, port->membase + ofs->cr3); 1857 1858 if (stm32_port->wakeup_src) { 1859 dev_pm_clear_wake_irq(&pdev->dev); 1860 device_init_wakeup(&pdev->dev, false); 1861 } 1862 1863 stm32_usart_deinit_port(stm32_port); 1864 1865 return 0; 1866 } 1867 1868 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch) 1869 { 1870 struct stm32_port *stm32_port = to_stm32_port(port); 1871 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1872 u32 isr; 1873 int ret; 1874 1875 ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, isr, 1876 (isr & USART_SR_TXE), 100, 1877 STM32_USART_TIMEOUT_USEC); 1878 if (ret != 0) { 1879 dev_err(port->dev, "Error while sending data in UART TX : %d\n", ret); 1880 return; 1881 } 1882 writel_relaxed(ch, port->membase + ofs->tdr); 1883 } 1884 1885 #ifdef CONFIG_SERIAL_STM32_CONSOLE 1886 static void stm32_usart_console_write(struct console *co, const char *s, 1887 unsigned int cnt) 1888 { 1889 struct uart_port *port = &stm32_ports[co->index].port; 1890 struct stm32_port *stm32_port = to_stm32_port(port); 1891 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1892 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1893 unsigned long flags; 1894 u32 old_cr1, new_cr1; 1895 int locked = 1; 1896 1897 if (oops_in_progress) 1898 locked = uart_port_trylock_irqsave(port, &flags); 1899 else 1900 uart_port_lock_irqsave(port, &flags); 1901 1902 /* Save and disable interrupts, enable the transmitter */ 1903 old_cr1 = readl_relaxed(port->membase + ofs->cr1); 1904 new_cr1 = old_cr1 & ~USART_CR1_IE_MASK; 1905 new_cr1 |= USART_CR1_TE | BIT(cfg->uart_enable_bit); 1906 writel_relaxed(new_cr1, port->membase + ofs->cr1); 1907 1908 uart_console_write(port, s, cnt, stm32_usart_console_putchar); 1909 1910 /* Restore interrupt state */ 1911 writel_relaxed(old_cr1, port->membase + ofs->cr1); 1912 1913 if (locked) 1914 uart_port_unlock_irqrestore(port, flags); 1915 } 1916 1917 static int stm32_usart_console_setup(struct console *co, char *options) 1918 { 1919 struct stm32_port *stm32port; 1920 int baud = 9600; 1921 int bits = 8; 1922 int parity = 'n'; 1923 int flow = 'n'; 1924 1925 if (co->index >= STM32_MAX_PORTS) 1926 return -ENODEV; 1927 1928 stm32port = &stm32_ports[co->index]; 1929 1930 /* 1931 * This driver does not support early console initialization 1932 * (use ARM early printk support instead), so we only expect 1933 * this to be called during the uart port registration when the 1934 * driver gets probed and the port should be mapped at that point. 1935 */ 1936 if (stm32port->port.mapbase == 0 || !stm32port->port.membase) 1937 return -ENXIO; 1938 1939 if (options) 1940 uart_parse_options(options, &baud, &parity, &bits, &flow); 1941 1942 return uart_set_options(&stm32port->port, co, baud, parity, bits, flow); 1943 } 1944 1945 static struct console stm32_console = { 1946 .name = STM32_SERIAL_NAME, 1947 .device = uart_console_device, 1948 .write = stm32_usart_console_write, 1949 .setup = stm32_usart_console_setup, 1950 .flags = CON_PRINTBUFFER, 1951 .index = -1, 1952 .data = &stm32_usart_driver, 1953 }; 1954 1955 #define STM32_SERIAL_CONSOLE (&stm32_console) 1956 1957 #else 1958 #define STM32_SERIAL_CONSOLE NULL 1959 #endif /* CONFIG_SERIAL_STM32_CONSOLE */ 1960 1961 #ifdef CONFIG_SERIAL_EARLYCON 1962 static void early_stm32_usart_console_putchar(struct uart_port *port, unsigned char ch) 1963 { 1964 struct stm32_usart_info *info = port->private_data; 1965 1966 while (!(readl_relaxed(port->membase + info->ofs.isr) & USART_SR_TXE)) 1967 cpu_relax(); 1968 1969 writel_relaxed(ch, port->membase + info->ofs.tdr); 1970 } 1971 1972 static void early_stm32_serial_write(struct console *console, const char *s, unsigned int count) 1973 { 1974 struct earlycon_device *device = console->data; 1975 struct uart_port *port = &device->port; 1976 1977 uart_console_write(port, s, count, early_stm32_usart_console_putchar); 1978 } 1979 1980 static int __init early_stm32_h7_serial_setup(struct earlycon_device *device, const char *options) 1981 { 1982 if (!(device->port.membase || device->port.iobase)) 1983 return -ENODEV; 1984 device->port.private_data = &stm32h7_info; 1985 device->con->write = early_stm32_serial_write; 1986 return 0; 1987 } 1988 1989 static int __init early_stm32_f7_serial_setup(struct earlycon_device *device, const char *options) 1990 { 1991 if (!(device->port.membase || device->port.iobase)) 1992 return -ENODEV; 1993 device->port.private_data = &stm32f7_info; 1994 device->con->write = early_stm32_serial_write; 1995 return 0; 1996 } 1997 1998 static int __init early_stm32_f4_serial_setup(struct earlycon_device *device, const char *options) 1999 { 2000 if (!(device->port.membase || device->port.iobase)) 2001 return -ENODEV; 2002 device->port.private_data = &stm32f4_info; 2003 device->con->write = early_stm32_serial_write; 2004 return 0; 2005 } 2006 2007 OF_EARLYCON_DECLARE(stm32, "st,stm32h7-uart", early_stm32_h7_serial_setup); 2008 OF_EARLYCON_DECLARE(stm32, "st,stm32f7-uart", early_stm32_f7_serial_setup); 2009 OF_EARLYCON_DECLARE(stm32, "st,stm32-uart", early_stm32_f4_serial_setup); 2010 #endif /* CONFIG_SERIAL_EARLYCON */ 2011 2012 static struct uart_driver stm32_usart_driver = { 2013 .driver_name = DRIVER_NAME, 2014 .dev_name = STM32_SERIAL_NAME, 2015 .major = 0, 2016 .minor = 0, 2017 .nr = STM32_MAX_PORTS, 2018 .cons = STM32_SERIAL_CONSOLE, 2019 }; 2020 2021 static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port, 2022 bool enable) 2023 { 2024 struct stm32_port *stm32_port = to_stm32_port(port); 2025 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 2026 struct tty_port *tport = &port->state->port; 2027 int ret; 2028 unsigned int size = 0; 2029 unsigned long flags; 2030 2031 if (!stm32_port->wakeup_src || !tty_port_initialized(tport)) 2032 return 0; 2033 2034 /* 2035 * Enable low-power wake-up and wake-up irq if argument is set to 2036 * "enable", disable low-power wake-up and wake-up irq otherwise 2037 */ 2038 if (enable) { 2039 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM); 2040 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE); 2041 mctrl_gpio_enable_irq_wake(stm32_port->gpios); 2042 2043 /* 2044 * When DMA is used for reception, it must be disabled before 2045 * entering low-power mode and re-enabled when exiting from 2046 * low-power mode. 2047 */ 2048 if (stm32_port->rx_ch) { 2049 uart_port_lock_irqsave(port, &flags); 2050 /* Poll data from DMA RX buffer if any */ 2051 if (!stm32_usart_rx_dma_pause(stm32_port)) 2052 size += stm32_usart_receive_chars(port, true); 2053 stm32_usart_rx_dma_terminate(stm32_port); 2054 uart_unlock_and_check_sysrq_irqrestore(port, flags); 2055 if (size) 2056 tty_flip_buffer_push(tport); 2057 } 2058 2059 /* Poll data from RX FIFO if any */ 2060 stm32_usart_receive_chars(port, false); 2061 } else { 2062 if (stm32_port->rx_ch) { 2063 ret = stm32_usart_rx_dma_start_or_resume(port); 2064 if (ret) 2065 return ret; 2066 } 2067 mctrl_gpio_disable_irq_wake(stm32_port->gpios); 2068 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM); 2069 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE); 2070 } 2071 2072 return 0; 2073 } 2074 2075 static int __maybe_unused stm32_usart_serial_suspend(struct device *dev) 2076 { 2077 struct uart_port *port = dev_get_drvdata(dev); 2078 int ret; 2079 2080 uart_suspend_port(&stm32_usart_driver, port); 2081 2082 if (device_may_wakeup(dev) || device_wakeup_path(dev)) { 2083 ret = stm32_usart_serial_en_wakeup(port, true); 2084 if (ret) 2085 return ret; 2086 } 2087 2088 /* 2089 * When "no_console_suspend" is enabled, keep the pinctrl default state 2090 * and rely on bootloader stage to restore this state upon resume. 2091 * Otherwise, apply the idle or sleep states depending on wakeup 2092 * capabilities. 2093 */ 2094 if (console_suspend_enabled || !uart_console(port)) { 2095 if (device_may_wakeup(dev) || device_wakeup_path(dev)) 2096 pinctrl_pm_select_idle_state(dev); 2097 else 2098 pinctrl_pm_select_sleep_state(dev); 2099 } 2100 2101 return 0; 2102 } 2103 2104 static int __maybe_unused stm32_usart_serial_resume(struct device *dev) 2105 { 2106 struct uart_port *port = dev_get_drvdata(dev); 2107 int ret; 2108 2109 pinctrl_pm_select_default_state(dev); 2110 2111 if (device_may_wakeup(dev) || device_wakeup_path(dev)) { 2112 ret = stm32_usart_serial_en_wakeup(port, false); 2113 if (ret) 2114 return ret; 2115 } 2116 2117 return uart_resume_port(&stm32_usart_driver, port); 2118 } 2119 2120 static int __maybe_unused stm32_usart_runtime_suspend(struct device *dev) 2121 { 2122 struct uart_port *port = dev_get_drvdata(dev); 2123 struct stm32_port *stm32port = container_of(port, 2124 struct stm32_port, port); 2125 2126 clk_disable_unprepare(stm32port->clk); 2127 2128 return 0; 2129 } 2130 2131 static int __maybe_unused stm32_usart_runtime_resume(struct device *dev) 2132 { 2133 struct uart_port *port = dev_get_drvdata(dev); 2134 struct stm32_port *stm32port = container_of(port, 2135 struct stm32_port, port); 2136 2137 return clk_prepare_enable(stm32port->clk); 2138 } 2139 2140 static const struct dev_pm_ops stm32_serial_pm_ops = { 2141 SET_RUNTIME_PM_OPS(stm32_usart_runtime_suspend, 2142 stm32_usart_runtime_resume, NULL) 2143 SET_SYSTEM_SLEEP_PM_OPS(stm32_usart_serial_suspend, 2144 stm32_usart_serial_resume) 2145 }; 2146 2147 static struct platform_driver stm32_serial_driver = { 2148 .probe = stm32_usart_serial_probe, 2149 .remove = stm32_usart_serial_remove, 2150 .driver = { 2151 .name = DRIVER_NAME, 2152 .pm = &stm32_serial_pm_ops, 2153 .of_match_table = of_match_ptr(stm32_match), 2154 }, 2155 }; 2156 2157 static int __init stm32_usart_init(void) 2158 { 2159 static char banner[] __initdata = "STM32 USART driver initialized"; 2160 int ret; 2161 2162 pr_info("%s\n", banner); 2163 2164 ret = uart_register_driver(&stm32_usart_driver); 2165 if (ret) 2166 return ret; 2167 2168 ret = platform_driver_register(&stm32_serial_driver); 2169 if (ret) 2170 uart_unregister_driver(&stm32_usart_driver); 2171 2172 return ret; 2173 } 2174 2175 static void __exit stm32_usart_exit(void) 2176 { 2177 platform_driver_unregister(&stm32_serial_driver); 2178 uart_unregister_driver(&stm32_usart_driver); 2179 } 2180 2181 module_init(stm32_usart_init); 2182 module_exit(stm32_usart_exit); 2183 2184 MODULE_ALIAS("platform:" DRIVER_NAME); 2185 MODULE_DESCRIPTION("STMicroelectronics STM32 serial port driver"); 2186 MODULE_LICENSE("GPL v2"); 2187