1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) Maxime Coquelin 2015 4 * Copyright (C) STMicroelectronics SA 2017 5 * Authors: Maxime Coquelin <mcoquelin.stm32@gmail.com> 6 * Gerald Baeza <gerald.baeza@foss.st.com> 7 * Erwan Le Ray <erwan.leray@foss.st.com> 8 * 9 * Inspired by st-asc.c from STMicroelectronics (c) 10 */ 11 12 #include <linux/bitfield.h> 13 #include <linux/clk.h> 14 #include <linux/console.h> 15 #include <linux/delay.h> 16 #include <linux/dma-direction.h> 17 #include <linux/dmaengine.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/io.h> 20 #include <linux/iopoll.h> 21 #include <linux/irq.h> 22 #include <linux/module.h> 23 #include <linux/of.h> 24 #include <linux/of_platform.h> 25 #include <linux/pinctrl/consumer.h> 26 #include <linux/platform_device.h> 27 #include <linux/pm_runtime.h> 28 #include <linux/pm_wakeirq.h> 29 #include <linux/serial_core.h> 30 #include <linux/serial.h> 31 #include <linux/spinlock.h> 32 #include <linux/sysrq.h> 33 #include <linux/tty_flip.h> 34 #include <linux/tty.h> 35 36 #include "serial_mctrl_gpio.h" 37 #include "stm32-usart.h" 38 39 40 /* Register offsets */ 41 static struct stm32_usart_info __maybe_unused stm32f4_info = { 42 .ofs = { 43 .isr = 0x00, 44 .rdr = 0x04, 45 .tdr = 0x04, 46 .brr = 0x08, 47 .cr1 = 0x0c, 48 .cr2 = 0x10, 49 .cr3 = 0x14, 50 .gtpr = 0x18, 51 .rtor = UNDEF_REG, 52 .rqr = UNDEF_REG, 53 .icr = UNDEF_REG, 54 .presc = UNDEF_REG, 55 .hwcfgr1 = UNDEF_REG, 56 }, 57 .cfg = { 58 .uart_enable_bit = 13, 59 .has_7bits_data = false, 60 } 61 }; 62 63 static struct stm32_usart_info __maybe_unused stm32f7_info = { 64 .ofs = { 65 .cr1 = 0x00, 66 .cr2 = 0x04, 67 .cr3 = 0x08, 68 .brr = 0x0c, 69 .gtpr = 0x10, 70 .rtor = 0x14, 71 .rqr = 0x18, 72 .isr = 0x1c, 73 .icr = 0x20, 74 .rdr = 0x24, 75 .tdr = 0x28, 76 .presc = UNDEF_REG, 77 .hwcfgr1 = UNDEF_REG, 78 }, 79 .cfg = { 80 .uart_enable_bit = 0, 81 .has_7bits_data = true, 82 .has_swap = true, 83 } 84 }; 85 86 static struct stm32_usart_info __maybe_unused stm32h7_info = { 87 .ofs = { 88 .cr1 = 0x00, 89 .cr2 = 0x04, 90 .cr3 = 0x08, 91 .brr = 0x0c, 92 .gtpr = 0x10, 93 .rtor = 0x14, 94 .rqr = 0x18, 95 .isr = 0x1c, 96 .icr = 0x20, 97 .rdr = 0x24, 98 .tdr = 0x28, 99 .presc = 0x2c, 100 .hwcfgr1 = 0x3f0, 101 }, 102 .cfg = { 103 .uart_enable_bit = 0, 104 .has_7bits_data = true, 105 .has_swap = true, 106 .has_wakeup = true, 107 .has_fifo = true, 108 } 109 }; 110 111 static void stm32_usart_stop_tx(struct uart_port *port); 112 static void stm32_usart_transmit_chars(struct uart_port *port); 113 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch); 114 115 static inline struct stm32_port *to_stm32_port(struct uart_port *port) 116 { 117 return container_of(port, struct stm32_port, port); 118 } 119 120 static void stm32_usart_set_bits(struct uart_port *port, u32 reg, u32 bits) 121 { 122 u32 val; 123 124 val = readl_relaxed(port->membase + reg); 125 val |= bits; 126 writel_relaxed(val, port->membase + reg); 127 } 128 129 static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits) 130 { 131 u32 val; 132 133 val = readl_relaxed(port->membase + reg); 134 val &= ~bits; 135 writel_relaxed(val, port->membase + reg); 136 } 137 138 static unsigned int stm32_usart_tx_empty(struct uart_port *port) 139 { 140 struct stm32_port *stm32_port = to_stm32_port(port); 141 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 142 143 if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC) 144 return TIOCSER_TEMT; 145 146 return 0; 147 } 148 149 static void stm32_usart_rs485_rts_enable(struct uart_port *port) 150 { 151 struct stm32_port *stm32_port = to_stm32_port(port); 152 struct serial_rs485 *rs485conf = &port->rs485; 153 154 if (stm32_port->hw_flow_control || 155 !(rs485conf->flags & SER_RS485_ENABLED)) 156 return; 157 158 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 159 mctrl_gpio_set(stm32_port->gpios, 160 stm32_port->port.mctrl | TIOCM_RTS); 161 } else { 162 mctrl_gpio_set(stm32_port->gpios, 163 stm32_port->port.mctrl & ~TIOCM_RTS); 164 } 165 } 166 167 static void stm32_usart_rs485_rts_disable(struct uart_port *port) 168 { 169 struct stm32_port *stm32_port = to_stm32_port(port); 170 struct serial_rs485 *rs485conf = &port->rs485; 171 172 if (stm32_port->hw_flow_control || 173 !(rs485conf->flags & SER_RS485_ENABLED)) 174 return; 175 176 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 177 mctrl_gpio_set(stm32_port->gpios, 178 stm32_port->port.mctrl & ~TIOCM_RTS); 179 } else { 180 mctrl_gpio_set(stm32_port->gpios, 181 stm32_port->port.mctrl | TIOCM_RTS); 182 } 183 } 184 185 static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE, 186 u32 delay_DDE, u32 baud) 187 { 188 u32 rs485_deat_dedt; 189 u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT); 190 bool over8; 191 192 *cr3 |= USART_CR3_DEM; 193 over8 = *cr1 & USART_CR1_OVER8; 194 195 *cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); 196 197 if (over8) 198 rs485_deat_dedt = delay_ADE * baud * 8; 199 else 200 rs485_deat_dedt = delay_ADE * baud * 16; 201 202 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000); 203 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ? 204 rs485_deat_dedt_max : rs485_deat_dedt; 205 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEAT_SHIFT) & 206 USART_CR1_DEAT_MASK; 207 *cr1 |= rs485_deat_dedt; 208 209 if (over8) 210 rs485_deat_dedt = delay_DDE * baud * 8; 211 else 212 rs485_deat_dedt = delay_DDE * baud * 16; 213 214 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000); 215 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ? 216 rs485_deat_dedt_max : rs485_deat_dedt; 217 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEDT_SHIFT) & 218 USART_CR1_DEDT_MASK; 219 *cr1 |= rs485_deat_dedt; 220 } 221 222 static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *termios, 223 struct serial_rs485 *rs485conf) 224 { 225 struct stm32_port *stm32_port = to_stm32_port(port); 226 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 227 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 228 u32 usartdiv, baud, cr1, cr3; 229 bool over8; 230 231 stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 232 233 if (rs485conf->flags & SER_RS485_ENABLED) { 234 cr1 = readl_relaxed(port->membase + ofs->cr1); 235 cr3 = readl_relaxed(port->membase + ofs->cr3); 236 usartdiv = readl_relaxed(port->membase + ofs->brr); 237 usartdiv = usartdiv & GENMASK(15, 0); 238 over8 = cr1 & USART_CR1_OVER8; 239 240 if (over8) 241 usartdiv = usartdiv | (usartdiv & GENMASK(4, 0)) 242 << USART_BRR_04_R_SHIFT; 243 244 baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv); 245 stm32_usart_config_reg_rs485(&cr1, &cr3, 246 rs485conf->delay_rts_before_send, 247 rs485conf->delay_rts_after_send, 248 baud); 249 250 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) 251 cr3 &= ~USART_CR3_DEP; 252 else 253 cr3 |= USART_CR3_DEP; 254 255 writel_relaxed(cr3, port->membase + ofs->cr3); 256 writel_relaxed(cr1, port->membase + ofs->cr1); 257 258 if (!port->rs485_rx_during_tx_gpio) 259 rs485conf->flags |= SER_RS485_RX_DURING_TX; 260 261 } else { 262 stm32_usart_clr_bits(port, ofs->cr3, 263 USART_CR3_DEM | USART_CR3_DEP); 264 stm32_usart_clr_bits(port, ofs->cr1, 265 USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); 266 } 267 268 stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 269 270 /* Adjust RTS polarity in case it's driven in software */ 271 if (stm32_usart_tx_empty(port)) 272 stm32_usart_rs485_rts_disable(port); 273 else 274 stm32_usart_rs485_rts_enable(port); 275 276 return 0; 277 } 278 279 static int stm32_usart_init_rs485(struct uart_port *port, 280 struct platform_device *pdev) 281 { 282 struct serial_rs485 *rs485conf = &port->rs485; 283 284 rs485conf->flags = 0; 285 rs485conf->delay_rts_before_send = 0; 286 rs485conf->delay_rts_after_send = 0; 287 288 if (!pdev->dev.of_node) 289 return -ENODEV; 290 291 return uart_get_rs485_mode(port); 292 } 293 294 static bool stm32_usart_rx_dma_started(struct stm32_port *stm32_port) 295 { 296 return stm32_port->rx_ch ? stm32_port->rx_dma_busy : false; 297 } 298 299 static void stm32_usart_rx_dma_terminate(struct stm32_port *stm32_port) 300 { 301 dmaengine_terminate_async(stm32_port->rx_ch); 302 stm32_port->rx_dma_busy = false; 303 } 304 305 static int stm32_usart_dma_pause_resume(struct stm32_port *stm32_port, 306 struct dma_chan *chan, 307 enum dma_status expected_status, 308 int dmaengine_pause_or_resume(struct dma_chan *), 309 bool stm32_usart_xx_dma_started(struct stm32_port *), 310 void stm32_usart_xx_dma_terminate(struct stm32_port *)) 311 { 312 struct uart_port *port = &stm32_port->port; 313 enum dma_status dma_status; 314 int ret; 315 316 if (!stm32_usart_xx_dma_started(stm32_port)) 317 return -EPERM; 318 319 dma_status = dmaengine_tx_status(chan, chan->cookie, NULL); 320 if (dma_status != expected_status) 321 return -EAGAIN; 322 323 ret = dmaengine_pause_or_resume(chan); 324 if (ret) { 325 dev_err(port->dev, "DMA failed with error code: %d\n", ret); 326 stm32_usart_xx_dma_terminate(stm32_port); 327 } 328 return ret; 329 } 330 331 static int stm32_usart_rx_dma_pause(struct stm32_port *stm32_port) 332 { 333 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch, 334 DMA_IN_PROGRESS, dmaengine_pause, 335 stm32_usart_rx_dma_started, 336 stm32_usart_rx_dma_terminate); 337 } 338 339 static int stm32_usart_rx_dma_resume(struct stm32_port *stm32_port) 340 { 341 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch, 342 DMA_PAUSED, dmaengine_resume, 343 stm32_usart_rx_dma_started, 344 stm32_usart_rx_dma_terminate); 345 } 346 347 /* Return true when data is pending (in pio mode), and false when no data is pending. */ 348 static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr) 349 { 350 struct stm32_port *stm32_port = to_stm32_port(port); 351 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 352 353 *sr = readl_relaxed(port->membase + ofs->isr); 354 /* Get pending characters in RDR or FIFO */ 355 if (*sr & USART_SR_RXNE) { 356 /* Get all pending characters from the RDR or the FIFO when using interrupts */ 357 if (!stm32_usart_rx_dma_started(stm32_port)) 358 return true; 359 360 /* Handle only RX data errors when using DMA */ 361 if (*sr & USART_SR_ERR_MASK) 362 return true; 363 } 364 365 return false; 366 } 367 368 static u8 stm32_usart_get_char_pio(struct uart_port *port) 369 { 370 struct stm32_port *stm32_port = to_stm32_port(port); 371 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 372 unsigned long c; 373 374 c = readl_relaxed(port->membase + ofs->rdr); 375 /* Apply RDR data mask */ 376 c &= stm32_port->rdr_mask; 377 378 return c; 379 } 380 381 static unsigned int stm32_usart_receive_chars_pio(struct uart_port *port) 382 { 383 struct stm32_port *stm32_port = to_stm32_port(port); 384 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 385 unsigned int size = 0; 386 u32 sr; 387 u8 c, flag; 388 389 while (stm32_usart_pending_rx_pio(port, &sr)) { 390 sr |= USART_SR_DUMMY_RX; 391 flag = TTY_NORMAL; 392 393 /* 394 * Status bits has to be cleared before reading the RDR: 395 * In FIFO mode, reading the RDR will pop the next data 396 * (if any) along with its status bits into the SR. 397 * Not doing so leads to misalignement between RDR and SR, 398 * and clear status bits of the next rx data. 399 * 400 * Clear errors flags for stm32f7 and stm32h7 compatible 401 * devices. On stm32f4 compatible devices, the error bit is 402 * cleared by the sequence [read SR - read DR]. 403 */ 404 if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG) 405 writel_relaxed(sr & USART_SR_ERR_MASK, 406 port->membase + ofs->icr); 407 408 c = stm32_usart_get_char_pio(port); 409 port->icount.rx++; 410 size++; 411 if (sr & USART_SR_ERR_MASK) { 412 if (sr & USART_SR_ORE) { 413 port->icount.overrun++; 414 } else if (sr & USART_SR_PE) { 415 port->icount.parity++; 416 } else if (sr & USART_SR_FE) { 417 /* Break detection if character is null */ 418 if (!c) { 419 port->icount.brk++; 420 if (uart_handle_break(port)) 421 continue; 422 } else { 423 port->icount.frame++; 424 } 425 } 426 427 sr &= port->read_status_mask; 428 429 if (sr & USART_SR_PE) { 430 flag = TTY_PARITY; 431 } else if (sr & USART_SR_FE) { 432 if (!c) 433 flag = TTY_BREAK; 434 else 435 flag = TTY_FRAME; 436 } 437 } 438 439 if (uart_prepare_sysrq_char(port, c)) 440 continue; 441 uart_insert_char(port, sr, USART_SR_ORE, c, flag); 442 } 443 444 return size; 445 } 446 447 static void stm32_usart_push_buffer_dma(struct uart_port *port, unsigned int dma_size) 448 { 449 struct stm32_port *stm32_port = to_stm32_port(port); 450 struct tty_port *ttyport = &stm32_port->port.state->port; 451 unsigned char *dma_start; 452 int dma_count, i; 453 454 dma_start = stm32_port->rx_buf + (RX_BUF_L - stm32_port->last_res); 455 456 /* 457 * Apply rdr_mask on buffer in order to mask parity bit. 458 * This loop is useless in cs8 mode because DMA copies only 459 * 8 bits and already ignores parity bit. 460 */ 461 if (!(stm32_port->rdr_mask == (BIT(8) - 1))) 462 for (i = 0; i < dma_size; i++) 463 *(dma_start + i) &= stm32_port->rdr_mask; 464 465 dma_count = tty_insert_flip_string(ttyport, dma_start, dma_size); 466 port->icount.rx += dma_count; 467 if (dma_count != dma_size) 468 port->icount.buf_overrun++; 469 stm32_port->last_res -= dma_count; 470 if (stm32_port->last_res == 0) 471 stm32_port->last_res = RX_BUF_L; 472 } 473 474 static unsigned int stm32_usart_receive_chars_dma(struct uart_port *port) 475 { 476 struct stm32_port *stm32_port = to_stm32_port(port); 477 unsigned int dma_size, size = 0; 478 479 /* DMA buffer is configured in cyclic mode and handles the rollback of the buffer. */ 480 if (stm32_port->rx_dma_state.residue > stm32_port->last_res) { 481 /* Conditional first part: from last_res to end of DMA buffer */ 482 dma_size = stm32_port->last_res; 483 stm32_usart_push_buffer_dma(port, dma_size); 484 size = dma_size; 485 } 486 487 dma_size = stm32_port->last_res - stm32_port->rx_dma_state.residue; 488 stm32_usart_push_buffer_dma(port, dma_size); 489 size += dma_size; 490 491 return size; 492 } 493 494 static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force_dma_flush) 495 { 496 struct stm32_port *stm32_port = to_stm32_port(port); 497 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 498 enum dma_status rx_dma_status; 499 u32 sr; 500 unsigned int size = 0; 501 502 if (stm32_usart_rx_dma_started(stm32_port) || force_dma_flush) { 503 rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch, 504 stm32_port->rx_ch->cookie, 505 &stm32_port->rx_dma_state); 506 if (rx_dma_status == DMA_IN_PROGRESS || 507 rx_dma_status == DMA_PAUSED) { 508 /* Empty DMA buffer */ 509 size = stm32_usart_receive_chars_dma(port); 510 sr = readl_relaxed(port->membase + ofs->isr); 511 if (sr & USART_SR_ERR_MASK) { 512 /* Disable DMA request line */ 513 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 514 515 /* Switch to PIO mode to handle the errors */ 516 size += stm32_usart_receive_chars_pio(port); 517 518 /* Switch back to DMA mode */ 519 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR); 520 } 521 } else { 522 /* Disable RX DMA */ 523 stm32_usart_rx_dma_terminate(stm32_port); 524 /* Fall back to interrupt mode */ 525 dev_dbg(port->dev, "DMA error, fallback to irq mode\n"); 526 size = stm32_usart_receive_chars_pio(port); 527 } 528 } else { 529 size = stm32_usart_receive_chars_pio(port); 530 } 531 532 return size; 533 } 534 535 static void stm32_usart_rx_dma_complete(void *arg) 536 { 537 struct uart_port *port = arg; 538 struct tty_port *tport = &port->state->port; 539 unsigned int size; 540 unsigned long flags; 541 542 uart_port_lock_irqsave(port, &flags); 543 size = stm32_usart_receive_chars(port, false); 544 uart_unlock_and_check_sysrq_irqrestore(port, flags); 545 if (size) 546 tty_flip_buffer_push(tport); 547 } 548 549 static int stm32_usart_rx_dma_start_or_resume(struct uart_port *port) 550 { 551 struct stm32_port *stm32_port = to_stm32_port(port); 552 struct dma_async_tx_descriptor *desc; 553 enum dma_status rx_dma_status; 554 int ret; 555 556 if (stm32_port->throttled) 557 return 0; 558 559 if (stm32_port->rx_dma_busy) { 560 rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch, 561 stm32_port->rx_ch->cookie, 562 NULL); 563 if (rx_dma_status == DMA_IN_PROGRESS) 564 return 0; 565 566 if (rx_dma_status == DMA_PAUSED && !stm32_usart_rx_dma_resume(stm32_port)) 567 return 0; 568 569 dev_err(port->dev, "DMA failed : status error.\n"); 570 stm32_usart_rx_dma_terminate(stm32_port); 571 } 572 573 stm32_port->rx_dma_busy = true; 574 575 stm32_port->last_res = RX_BUF_L; 576 /* Prepare a DMA cyclic transaction */ 577 desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch, 578 stm32_port->rx_dma_buf, 579 RX_BUF_L, RX_BUF_P, 580 DMA_DEV_TO_MEM, 581 DMA_PREP_INTERRUPT); 582 if (!desc) { 583 dev_err(port->dev, "rx dma prep cyclic failed\n"); 584 stm32_port->rx_dma_busy = false; 585 return -ENODEV; 586 } 587 588 desc->callback = stm32_usart_rx_dma_complete; 589 desc->callback_param = port; 590 591 /* Push current DMA transaction in the pending queue */ 592 ret = dma_submit_error(dmaengine_submit(desc)); 593 if (ret) { 594 dmaengine_terminate_sync(stm32_port->rx_ch); 595 stm32_port->rx_dma_busy = false; 596 return ret; 597 } 598 599 /* Issue pending DMA requests */ 600 dma_async_issue_pending(stm32_port->rx_ch); 601 602 return 0; 603 } 604 605 static void stm32_usart_tx_dma_terminate(struct stm32_port *stm32_port) 606 { 607 dmaengine_terminate_async(stm32_port->tx_ch); 608 stm32_port->tx_dma_busy = false; 609 } 610 611 static bool stm32_usart_tx_dma_started(struct stm32_port *stm32_port) 612 { 613 /* 614 * We cannot use the function "dmaengine_tx_status" to know the 615 * status of DMA. This function does not show if the "dma complete" 616 * callback of the DMA transaction has been called. So we prefer 617 * to use "tx_dma_busy" flag to prevent dual DMA transaction at the 618 * same time. 619 */ 620 return stm32_port->tx_dma_busy; 621 } 622 623 static int stm32_usart_tx_dma_pause(struct stm32_port *stm32_port) 624 { 625 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->tx_ch, 626 DMA_IN_PROGRESS, dmaengine_pause, 627 stm32_usart_tx_dma_started, 628 stm32_usart_tx_dma_terminate); 629 } 630 631 static int stm32_usart_tx_dma_resume(struct stm32_port *stm32_port) 632 { 633 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->tx_ch, 634 DMA_PAUSED, dmaengine_resume, 635 stm32_usart_tx_dma_started, 636 stm32_usart_tx_dma_terminate); 637 } 638 639 static void stm32_usart_tx_dma_complete(void *arg) 640 { 641 struct uart_port *port = arg; 642 struct stm32_port *stm32port = to_stm32_port(port); 643 unsigned long flags; 644 645 stm32_usart_tx_dma_terminate(stm32port); 646 647 /* Let's see if we have pending data to send */ 648 uart_port_lock_irqsave(port, &flags); 649 stm32_usart_transmit_chars(port); 650 uart_port_unlock_irqrestore(port, flags); 651 } 652 653 static void stm32_usart_tx_interrupt_enable(struct uart_port *port) 654 { 655 struct stm32_port *stm32_port = to_stm32_port(port); 656 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 657 658 /* 659 * Enables TX FIFO threashold irq when FIFO is enabled, 660 * or TX empty irq when FIFO is disabled 661 */ 662 if (stm32_port->fifoen && stm32_port->txftcfg >= 0) 663 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_TXFTIE); 664 else 665 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE); 666 } 667 668 static void stm32_usart_tc_interrupt_enable(struct uart_port *port) 669 { 670 struct stm32_port *stm32_port = to_stm32_port(port); 671 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 672 673 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TCIE); 674 } 675 676 static void stm32_usart_tx_interrupt_disable(struct uart_port *port) 677 { 678 struct stm32_port *stm32_port = to_stm32_port(port); 679 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 680 681 if (stm32_port->fifoen && stm32_port->txftcfg >= 0) 682 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE); 683 else 684 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE); 685 } 686 687 static void stm32_usart_tc_interrupt_disable(struct uart_port *port) 688 { 689 struct stm32_port *stm32_port = to_stm32_port(port); 690 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 691 692 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TCIE); 693 } 694 695 static void stm32_usart_transmit_chars_pio(struct uart_port *port) 696 { 697 struct stm32_port *stm32_port = to_stm32_port(port); 698 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 699 struct circ_buf *xmit = &port->state->xmit; 700 701 while (!uart_circ_empty(xmit)) { 702 /* Check that TDR is empty before filling FIFO */ 703 if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE)) 704 break; 705 writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr); 706 uart_xmit_advance(port, 1); 707 } 708 709 /* rely on TXE irq (mask or unmask) for sending remaining data */ 710 if (uart_circ_empty(xmit)) 711 stm32_usart_tx_interrupt_disable(port); 712 else 713 stm32_usart_tx_interrupt_enable(port); 714 } 715 716 static void stm32_usart_transmit_chars_dma(struct uart_port *port) 717 { 718 struct stm32_port *stm32port = to_stm32_port(port); 719 struct circ_buf *xmit = &port->state->xmit; 720 struct dma_async_tx_descriptor *desc = NULL; 721 unsigned int count; 722 int ret; 723 724 if (stm32_usart_tx_dma_started(stm32port)) { 725 ret = stm32_usart_tx_dma_resume(stm32port); 726 if (ret < 0 && ret != -EAGAIN) 727 goto fallback_err; 728 return; 729 } 730 731 count = uart_circ_chars_pending(xmit); 732 733 if (count > TX_BUF_L) 734 count = TX_BUF_L; 735 736 if (xmit->tail < xmit->head) { 737 memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count); 738 } else { 739 size_t one = UART_XMIT_SIZE - xmit->tail; 740 size_t two; 741 742 if (one > count) 743 one = count; 744 two = count - one; 745 746 memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one); 747 if (two) 748 memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two); 749 } 750 751 desc = dmaengine_prep_slave_single(stm32port->tx_ch, 752 stm32port->tx_dma_buf, 753 count, 754 DMA_MEM_TO_DEV, 755 DMA_PREP_INTERRUPT); 756 757 if (!desc) 758 goto fallback_err; 759 760 /* 761 * Set "tx_dma_busy" flag. This flag will be released when 762 * dmaengine_terminate_async will be called. This flag helps 763 * transmit_chars_dma not to start another DMA transaction 764 * if the callback of the previous is not yet called. 765 */ 766 stm32port->tx_dma_busy = true; 767 768 desc->callback = stm32_usart_tx_dma_complete; 769 desc->callback_param = port; 770 771 /* Push current DMA TX transaction in the pending queue */ 772 /* DMA no yet started, safe to free resources */ 773 ret = dma_submit_error(dmaengine_submit(desc)); 774 if (ret) { 775 dev_err(port->dev, "DMA failed with error code: %d\n", ret); 776 stm32_usart_tx_dma_terminate(stm32port); 777 goto fallback_err; 778 } 779 780 /* Issue pending DMA TX requests */ 781 dma_async_issue_pending(stm32port->tx_ch); 782 783 uart_xmit_advance(port, count); 784 785 return; 786 787 fallback_err: 788 stm32_usart_transmit_chars_pio(port); 789 } 790 791 static void stm32_usart_transmit_chars(struct uart_port *port) 792 { 793 struct stm32_port *stm32_port = to_stm32_port(port); 794 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 795 struct circ_buf *xmit = &port->state->xmit; 796 u32 isr; 797 int ret; 798 799 if (!stm32_port->hw_flow_control && 800 port->rs485.flags & SER_RS485_ENABLED && 801 (port->x_char || 802 !(uart_circ_empty(xmit) || uart_tx_stopped(port)))) { 803 stm32_usart_tc_interrupt_disable(port); 804 stm32_usart_rs485_rts_enable(port); 805 } 806 807 if (port->x_char) { 808 /* dma terminate may have been called in case of dma pause failure */ 809 stm32_usart_tx_dma_pause(stm32_port); 810 811 /* Check that TDR is empty before filling FIFO */ 812 ret = 813 readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, 814 isr, 815 (isr & USART_SR_TXE), 816 10, 1000); 817 if (ret) 818 dev_warn(port->dev, "1 character may be erased\n"); 819 820 writel_relaxed(port->x_char, port->membase + ofs->tdr); 821 port->x_char = 0; 822 port->icount.tx++; 823 824 /* dma terminate may have been called in case of dma resume failure */ 825 stm32_usart_tx_dma_resume(stm32_port); 826 return; 827 } 828 829 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { 830 stm32_usart_tx_interrupt_disable(port); 831 return; 832 } 833 834 if (ofs->icr == UNDEF_REG) 835 stm32_usart_clr_bits(port, ofs->isr, USART_SR_TC); 836 else 837 writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr); 838 839 if (stm32_port->tx_ch) 840 stm32_usart_transmit_chars_dma(port); 841 else 842 stm32_usart_transmit_chars_pio(port); 843 844 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 845 uart_write_wakeup(port); 846 847 if (uart_circ_empty(xmit)) { 848 stm32_usart_tx_interrupt_disable(port); 849 if (!stm32_port->hw_flow_control && 850 port->rs485.flags & SER_RS485_ENABLED) { 851 stm32_usart_tc_interrupt_enable(port); 852 } 853 } 854 } 855 856 static irqreturn_t stm32_usart_interrupt(int irq, void *ptr) 857 { 858 struct uart_port *port = ptr; 859 struct tty_port *tport = &port->state->port; 860 struct stm32_port *stm32_port = to_stm32_port(port); 861 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 862 u32 sr; 863 unsigned int size; 864 irqreturn_t ret = IRQ_NONE; 865 866 sr = readl_relaxed(port->membase + ofs->isr); 867 868 if (!stm32_port->hw_flow_control && 869 port->rs485.flags & SER_RS485_ENABLED && 870 (sr & USART_SR_TC)) { 871 stm32_usart_tc_interrupt_disable(port); 872 stm32_usart_rs485_rts_disable(port); 873 ret = IRQ_HANDLED; 874 } 875 876 if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG) { 877 writel_relaxed(USART_ICR_RTOCF, 878 port->membase + ofs->icr); 879 ret = IRQ_HANDLED; 880 } 881 882 if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) { 883 /* Clear wake up flag and disable wake up interrupt */ 884 writel_relaxed(USART_ICR_WUCF, 885 port->membase + ofs->icr); 886 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE); 887 if (irqd_is_wakeup_set(irq_get_irq_data(port->irq))) 888 pm_wakeup_event(tport->tty->dev, 0); 889 ret = IRQ_HANDLED; 890 } 891 892 /* 893 * rx errors in dma mode has to be handled ASAP to avoid overrun as the DMA request 894 * line has been masked by HW and rx data are stacking in FIFO. 895 */ 896 if (!stm32_port->throttled) { 897 if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_started(stm32_port)) || 898 ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_started(stm32_port))) { 899 uart_port_lock(port); 900 size = stm32_usart_receive_chars(port, false); 901 uart_unlock_and_check_sysrq(port); 902 if (size) 903 tty_flip_buffer_push(tport); 904 ret = IRQ_HANDLED; 905 } 906 } 907 908 if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) { 909 uart_port_lock(port); 910 stm32_usart_transmit_chars(port); 911 uart_port_unlock(port); 912 ret = IRQ_HANDLED; 913 } 914 915 /* Receiver timeout irq for DMA RX */ 916 if (stm32_usart_rx_dma_started(stm32_port) && !stm32_port->throttled) { 917 uart_port_lock(port); 918 size = stm32_usart_receive_chars(port, false); 919 uart_unlock_and_check_sysrq(port); 920 if (size) 921 tty_flip_buffer_push(tport); 922 ret = IRQ_HANDLED; 923 } 924 925 return ret; 926 } 927 928 static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl) 929 { 930 struct stm32_port *stm32_port = to_stm32_port(port); 931 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 932 933 if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS)) 934 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_RTSE); 935 else 936 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_RTSE); 937 938 mctrl_gpio_set(stm32_port->gpios, mctrl); 939 } 940 941 static unsigned int stm32_usart_get_mctrl(struct uart_port *port) 942 { 943 struct stm32_port *stm32_port = to_stm32_port(port); 944 unsigned int ret; 945 946 /* This routine is used to get signals of: DCD, DSR, RI, and CTS */ 947 ret = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; 948 949 return mctrl_gpio_get(stm32_port->gpios, &ret); 950 } 951 952 static void stm32_usart_enable_ms(struct uart_port *port) 953 { 954 mctrl_gpio_enable_ms(to_stm32_port(port)->gpios); 955 } 956 957 static void stm32_usart_disable_ms(struct uart_port *port) 958 { 959 mctrl_gpio_disable_ms(to_stm32_port(port)->gpios); 960 } 961 962 /* Transmit stop */ 963 static void stm32_usart_stop_tx(struct uart_port *port) 964 { 965 struct stm32_port *stm32_port = to_stm32_port(port); 966 967 stm32_usart_tx_interrupt_disable(port); 968 969 /* dma terminate may have been called in case of dma pause failure */ 970 stm32_usart_tx_dma_pause(stm32_port); 971 972 stm32_usart_rs485_rts_disable(port); 973 } 974 975 /* There are probably characters waiting to be transmitted. */ 976 static void stm32_usart_start_tx(struct uart_port *port) 977 { 978 struct circ_buf *xmit = &port->state->xmit; 979 980 if (uart_circ_empty(xmit) && !port->x_char) { 981 stm32_usart_rs485_rts_disable(port); 982 return; 983 } 984 985 stm32_usart_rs485_rts_enable(port); 986 987 stm32_usart_transmit_chars(port); 988 } 989 990 /* Flush the transmit buffer. */ 991 static void stm32_usart_flush_buffer(struct uart_port *port) 992 { 993 struct stm32_port *stm32_port = to_stm32_port(port); 994 995 if (stm32_port->tx_ch) 996 stm32_usart_tx_dma_terminate(stm32_port); 997 } 998 999 /* Throttle the remote when input buffer is about to overflow. */ 1000 static void stm32_usart_throttle(struct uart_port *port) 1001 { 1002 struct stm32_port *stm32_port = to_stm32_port(port); 1003 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1004 unsigned long flags; 1005 1006 uart_port_lock_irqsave(port, &flags); 1007 1008 /* 1009 * Pause DMA transfer, so the RX data gets queued into the FIFO. 1010 * Hardware flow control is triggered when RX FIFO is full. 1011 */ 1012 stm32_usart_rx_dma_pause(stm32_port); 1013 1014 stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq); 1015 if (stm32_port->cr3_irq) 1016 stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq); 1017 1018 stm32_port->throttled = true; 1019 uart_port_unlock_irqrestore(port, flags); 1020 } 1021 1022 /* Unthrottle the remote, the input buffer can now accept data. */ 1023 static void stm32_usart_unthrottle(struct uart_port *port) 1024 { 1025 struct stm32_port *stm32_port = to_stm32_port(port); 1026 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1027 unsigned long flags; 1028 1029 uart_port_lock_irqsave(port, &flags); 1030 stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq); 1031 if (stm32_port->cr3_irq) 1032 stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq); 1033 1034 stm32_port->throttled = false; 1035 1036 /* 1037 * Switch back to DMA mode (resume DMA). 1038 * Hardware flow control is stopped when FIFO is not full any more. 1039 */ 1040 if (stm32_port->rx_ch) 1041 stm32_usart_rx_dma_start_or_resume(port); 1042 1043 uart_port_unlock_irqrestore(port, flags); 1044 } 1045 1046 /* Receive stop */ 1047 static void stm32_usart_stop_rx(struct uart_port *port) 1048 { 1049 struct stm32_port *stm32_port = to_stm32_port(port); 1050 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1051 1052 /* Disable DMA request line. */ 1053 stm32_usart_rx_dma_pause(stm32_port); 1054 1055 stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq); 1056 if (stm32_port->cr3_irq) 1057 stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq); 1058 } 1059 1060 static void stm32_usart_break_ctl(struct uart_port *port, int break_state) 1061 { 1062 struct stm32_port *stm32_port = to_stm32_port(port); 1063 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1064 unsigned long flags; 1065 1066 spin_lock_irqsave(&port->lock, flags); 1067 1068 if (break_state) 1069 stm32_usart_set_bits(port, ofs->rqr, USART_RQR_SBKRQ); 1070 else 1071 stm32_usart_clr_bits(port, ofs->rqr, USART_RQR_SBKRQ); 1072 1073 spin_unlock_irqrestore(&port->lock, flags); 1074 } 1075 1076 static int stm32_usart_startup(struct uart_port *port) 1077 { 1078 struct stm32_port *stm32_port = to_stm32_port(port); 1079 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1080 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1081 const char *name = to_platform_device(port->dev)->name; 1082 u32 val; 1083 int ret; 1084 1085 ret = request_irq(port->irq, stm32_usart_interrupt, 1086 IRQF_NO_SUSPEND, name, port); 1087 if (ret) 1088 return ret; 1089 1090 if (stm32_port->swap) { 1091 val = readl_relaxed(port->membase + ofs->cr2); 1092 val |= USART_CR2_SWAP; 1093 writel_relaxed(val, port->membase + ofs->cr2); 1094 } 1095 stm32_port->throttled = false; 1096 1097 /* RX FIFO Flush */ 1098 if (ofs->rqr != UNDEF_REG) 1099 writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr); 1100 1101 if (stm32_port->rx_ch) { 1102 ret = stm32_usart_rx_dma_start_or_resume(port); 1103 if (ret) { 1104 free_irq(port->irq, port); 1105 return ret; 1106 } 1107 } 1108 1109 /* RX enabling */ 1110 val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit); 1111 stm32_usart_set_bits(port, ofs->cr1, val); 1112 1113 return 0; 1114 } 1115 1116 static void stm32_usart_shutdown(struct uart_port *port) 1117 { 1118 struct stm32_port *stm32_port = to_stm32_port(port); 1119 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1120 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1121 u32 val, isr; 1122 int ret; 1123 1124 if (stm32_usart_tx_dma_started(stm32_port)) 1125 stm32_usart_tx_dma_terminate(stm32_port); 1126 1127 if (stm32_port->tx_ch) 1128 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 1129 1130 /* Disable modem control interrupts */ 1131 stm32_usart_disable_ms(port); 1132 1133 val = USART_CR1_TXEIE | USART_CR1_TE; 1134 val |= stm32_port->cr1_irq | USART_CR1_RE; 1135 val |= BIT(cfg->uart_enable_bit); 1136 if (stm32_port->fifoen) 1137 val |= USART_CR1_FIFOEN; 1138 1139 ret = readl_relaxed_poll_timeout(port->membase + ofs->isr, 1140 isr, (isr & USART_SR_TC), 1141 10, 100000); 1142 1143 /* Send the TC error message only when ISR_TC is not set */ 1144 if (ret) 1145 dev_err(port->dev, "Transmission is not complete\n"); 1146 1147 /* Disable RX DMA. */ 1148 if (stm32_port->rx_ch) { 1149 stm32_usart_rx_dma_terminate(stm32_port); 1150 dmaengine_synchronize(stm32_port->rx_ch); 1151 } 1152 1153 /* flush RX & TX FIFO */ 1154 if (ofs->rqr != UNDEF_REG) 1155 writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ, 1156 port->membase + ofs->rqr); 1157 1158 stm32_usart_clr_bits(port, ofs->cr1, val); 1159 1160 free_irq(port->irq, port); 1161 } 1162 1163 static const unsigned int stm32_usart_presc_val[] = {1, 2, 4, 6, 8, 10, 12, 16, 32, 64, 128, 256}; 1164 1165 static void stm32_usart_set_termios(struct uart_port *port, 1166 struct ktermios *termios, 1167 const struct ktermios *old) 1168 { 1169 struct stm32_port *stm32_port = to_stm32_port(port); 1170 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1171 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1172 struct serial_rs485 *rs485conf = &port->rs485; 1173 unsigned int baud, bits, uart_clk, uart_clk_pres; 1174 u32 usartdiv, mantissa, fraction, oversampling; 1175 tcflag_t cflag = termios->c_cflag; 1176 u32 cr1, cr2, cr3, isr, brr, presc; 1177 unsigned long flags; 1178 int ret; 1179 1180 if (!stm32_port->hw_flow_control) 1181 cflag &= ~CRTSCTS; 1182 1183 uart_clk = clk_get_rate(stm32_port->clk); 1184 1185 baud = uart_get_baud_rate(port, termios, old, 0, uart_clk / 8); 1186 1187 uart_port_lock_irqsave(port, &flags); 1188 1189 ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, 1190 isr, 1191 (isr & USART_SR_TC), 1192 10, 100000); 1193 1194 /* Send the TC error message only when ISR_TC is not set. */ 1195 if (ret) 1196 dev_err(port->dev, "Transmission is not complete\n"); 1197 1198 /* Stop serial port and reset value */ 1199 writel_relaxed(0, port->membase + ofs->cr1); 1200 1201 /* flush RX & TX FIFO */ 1202 if (ofs->rqr != UNDEF_REG) 1203 writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ, 1204 port->membase + ofs->rqr); 1205 1206 cr1 = USART_CR1_TE | USART_CR1_RE; 1207 if (stm32_port->fifoen) 1208 cr1 |= USART_CR1_FIFOEN; 1209 cr2 = stm32_port->swap ? USART_CR2_SWAP : 0; 1210 1211 /* Tx and RX FIFO configuration */ 1212 cr3 = readl_relaxed(port->membase + ofs->cr3); 1213 cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE; 1214 if (stm32_port->fifoen) { 1215 if (stm32_port->txftcfg >= 0) 1216 cr3 |= stm32_port->txftcfg << USART_CR3_TXFTCFG_SHIFT; 1217 if (stm32_port->rxftcfg >= 0) 1218 cr3 |= stm32_port->rxftcfg << USART_CR3_RXFTCFG_SHIFT; 1219 } 1220 1221 if (cflag & CSTOPB) 1222 cr2 |= USART_CR2_STOP_2B; 1223 1224 bits = tty_get_char_size(cflag); 1225 stm32_port->rdr_mask = (BIT(bits) - 1); 1226 1227 if (cflag & PARENB) { 1228 bits++; 1229 cr1 |= USART_CR1_PCE; 1230 } 1231 1232 /* 1233 * Word length configuration: 1234 * CS8 + parity, 9 bits word aka [M1:M0] = 0b01 1235 * CS7 or (CS6 + parity), 7 bits word aka [M1:M0] = 0b10 1236 * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00 1237 * M0 and M1 already cleared by cr1 initialization. 1238 */ 1239 if (bits == 9) { 1240 cr1 |= USART_CR1_M0; 1241 } else if ((bits == 7) && cfg->has_7bits_data) { 1242 cr1 |= USART_CR1_M1; 1243 } else if (bits != 8) { 1244 dev_dbg(port->dev, "Unsupported data bits config: %u bits\n" 1245 , bits); 1246 cflag &= ~CSIZE; 1247 cflag |= CS8; 1248 termios->c_cflag = cflag; 1249 bits = 8; 1250 if (cflag & PARENB) { 1251 bits++; 1252 cr1 |= USART_CR1_M0; 1253 } 1254 } 1255 1256 if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch || 1257 (stm32_port->fifoen && 1258 stm32_port->rxftcfg >= 0))) { 1259 if (cflag & CSTOPB) 1260 bits = bits + 3; /* 1 start bit + 2 stop bits */ 1261 else 1262 bits = bits + 2; /* 1 start bit + 1 stop bit */ 1263 1264 /* RX timeout irq to occur after last stop bit + bits */ 1265 stm32_port->cr1_irq = USART_CR1_RTOIE; 1266 writel_relaxed(bits, port->membase + ofs->rtor); 1267 cr2 |= USART_CR2_RTOEN; 1268 /* 1269 * Enable fifo threshold irq in two cases, either when there is no DMA, or when 1270 * wake up over usart, from low power until the DMA gets re-enabled by resume. 1271 */ 1272 stm32_port->cr3_irq = USART_CR3_RXFTIE; 1273 } 1274 1275 cr1 |= stm32_port->cr1_irq; 1276 cr3 |= stm32_port->cr3_irq; 1277 1278 if (cflag & PARODD) 1279 cr1 |= USART_CR1_PS; 1280 1281 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); 1282 if (cflag & CRTSCTS) { 1283 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; 1284 cr3 |= USART_CR3_CTSE | USART_CR3_RTSE; 1285 } 1286 1287 for (presc = 0; presc <= USART_PRESC_MAX; presc++) { 1288 uart_clk_pres = DIV_ROUND_CLOSEST(uart_clk, stm32_usart_presc_val[presc]); 1289 usartdiv = DIV_ROUND_CLOSEST(uart_clk_pres, baud); 1290 1291 /* 1292 * The USART supports 16 or 8 times oversampling. 1293 * By default we prefer 16 times oversampling, so that the receiver 1294 * has a better tolerance to clock deviations. 1295 * 8 times oversampling is only used to achieve higher speeds. 1296 */ 1297 if (usartdiv < 16) { 1298 oversampling = 8; 1299 cr1 |= USART_CR1_OVER8; 1300 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8); 1301 } else { 1302 oversampling = 16; 1303 cr1 &= ~USART_CR1_OVER8; 1304 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8); 1305 } 1306 1307 mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT; 1308 fraction = usartdiv % oversampling; 1309 brr = mantissa | fraction; 1310 1311 if (FIELD_FIT(USART_BRR_MASK, brr)) { 1312 if (ofs->presc != UNDEF_REG) { 1313 port->uartclk = uart_clk_pres; 1314 writel_relaxed(presc, port->membase + ofs->presc); 1315 } else if (presc) { 1316 /* We need a prescaler but we don't have it (STM32F4, STM32F7) */ 1317 dev_err(port->dev, 1318 "unable to set baudrate, input clock is too high"); 1319 } 1320 break; 1321 } else if (presc == USART_PRESC_MAX) { 1322 /* Even with prescaler and brr at max value we can't set baudrate */ 1323 dev_err(port->dev, "unable to set baudrate, input clock is too high"); 1324 break; 1325 } 1326 } 1327 1328 writel_relaxed(brr, port->membase + ofs->brr); 1329 1330 uart_update_timeout(port, cflag, baud); 1331 1332 port->read_status_mask = USART_SR_ORE; 1333 if (termios->c_iflag & INPCK) 1334 port->read_status_mask |= USART_SR_PE | USART_SR_FE; 1335 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) 1336 port->read_status_mask |= USART_SR_FE; 1337 1338 /* Characters to ignore */ 1339 port->ignore_status_mask = 0; 1340 if (termios->c_iflag & IGNPAR) 1341 port->ignore_status_mask = USART_SR_PE | USART_SR_FE; 1342 if (termios->c_iflag & IGNBRK) { 1343 port->ignore_status_mask |= USART_SR_FE; 1344 /* 1345 * If we're ignoring parity and break indicators, 1346 * ignore overruns too (for real raw support). 1347 */ 1348 if (termios->c_iflag & IGNPAR) 1349 port->ignore_status_mask |= USART_SR_ORE; 1350 } 1351 1352 /* Ignore all characters if CREAD is not set */ 1353 if ((termios->c_cflag & CREAD) == 0) 1354 port->ignore_status_mask |= USART_SR_DUMMY_RX; 1355 1356 if (stm32_port->rx_ch) { 1357 /* 1358 * Setup DMA to collect only valid data and enable error irqs. 1359 * This also enables break reception when using DMA. 1360 */ 1361 cr1 |= USART_CR1_PEIE; 1362 cr3 |= USART_CR3_EIE; 1363 cr3 |= USART_CR3_DMAR; 1364 cr3 |= USART_CR3_DDRE; 1365 } 1366 1367 if (stm32_port->tx_ch) 1368 cr3 |= USART_CR3_DMAT; 1369 1370 if (rs485conf->flags & SER_RS485_ENABLED) { 1371 stm32_usart_config_reg_rs485(&cr1, &cr3, 1372 rs485conf->delay_rts_before_send, 1373 rs485conf->delay_rts_after_send, 1374 baud); 1375 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 1376 cr3 &= ~USART_CR3_DEP; 1377 rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND; 1378 } else { 1379 cr3 |= USART_CR3_DEP; 1380 rs485conf->flags |= SER_RS485_RTS_AFTER_SEND; 1381 } 1382 1383 } else { 1384 cr3 &= ~(USART_CR3_DEM | USART_CR3_DEP); 1385 cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); 1386 } 1387 1388 /* Configure wake up from low power on start bit detection */ 1389 if (stm32_port->wakeup_src) { 1390 cr3 &= ~USART_CR3_WUS_MASK; 1391 cr3 |= USART_CR3_WUS_START_BIT; 1392 } 1393 1394 writel_relaxed(cr3, port->membase + ofs->cr3); 1395 writel_relaxed(cr2, port->membase + ofs->cr2); 1396 writel_relaxed(cr1, port->membase + ofs->cr1); 1397 1398 stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 1399 uart_port_unlock_irqrestore(port, flags); 1400 1401 /* Handle modem control interrupts */ 1402 if (UART_ENABLE_MS(port, termios->c_cflag)) 1403 stm32_usart_enable_ms(port); 1404 else 1405 stm32_usart_disable_ms(port); 1406 } 1407 1408 static const char *stm32_usart_type(struct uart_port *port) 1409 { 1410 return (port->type == PORT_STM32) ? DRIVER_NAME : NULL; 1411 } 1412 1413 static void stm32_usart_release_port(struct uart_port *port) 1414 { 1415 } 1416 1417 static int stm32_usart_request_port(struct uart_port *port) 1418 { 1419 return 0; 1420 } 1421 1422 static void stm32_usart_config_port(struct uart_port *port, int flags) 1423 { 1424 if (flags & UART_CONFIG_TYPE) 1425 port->type = PORT_STM32; 1426 } 1427 1428 static int 1429 stm32_usart_verify_port(struct uart_port *port, struct serial_struct *ser) 1430 { 1431 /* No user changeable parameters */ 1432 return -EINVAL; 1433 } 1434 1435 static void stm32_usart_pm(struct uart_port *port, unsigned int state, 1436 unsigned int oldstate) 1437 { 1438 struct stm32_port *stm32port = container_of(port, 1439 struct stm32_port, port); 1440 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1441 const struct stm32_usart_config *cfg = &stm32port->info->cfg; 1442 unsigned long flags; 1443 1444 switch (state) { 1445 case UART_PM_STATE_ON: 1446 pm_runtime_get_sync(port->dev); 1447 break; 1448 case UART_PM_STATE_OFF: 1449 uart_port_lock_irqsave(port, &flags); 1450 stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 1451 uart_port_unlock_irqrestore(port, flags); 1452 pm_runtime_put_sync(port->dev); 1453 break; 1454 } 1455 } 1456 1457 #if defined(CONFIG_CONSOLE_POLL) 1458 1459 /* Callbacks for characters polling in debug context (i.e. KGDB). */ 1460 static int stm32_usart_poll_init(struct uart_port *port) 1461 { 1462 struct stm32_port *stm32_port = to_stm32_port(port); 1463 1464 return clk_prepare_enable(stm32_port->clk); 1465 } 1466 1467 static int stm32_usart_poll_get_char(struct uart_port *port) 1468 { 1469 struct stm32_port *stm32_port = to_stm32_port(port); 1470 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1471 1472 if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_RXNE)) 1473 return NO_POLL_CHAR; 1474 1475 return readl_relaxed(port->membase + ofs->rdr) & stm32_port->rdr_mask; 1476 } 1477 1478 static void stm32_usart_poll_put_char(struct uart_port *port, unsigned char ch) 1479 { 1480 stm32_usart_console_putchar(port, ch); 1481 } 1482 #endif /* CONFIG_CONSOLE_POLL */ 1483 1484 static const struct uart_ops stm32_uart_ops = { 1485 .tx_empty = stm32_usart_tx_empty, 1486 .set_mctrl = stm32_usart_set_mctrl, 1487 .get_mctrl = stm32_usart_get_mctrl, 1488 .stop_tx = stm32_usart_stop_tx, 1489 .start_tx = stm32_usart_start_tx, 1490 .throttle = stm32_usart_throttle, 1491 .unthrottle = stm32_usart_unthrottle, 1492 .stop_rx = stm32_usart_stop_rx, 1493 .enable_ms = stm32_usart_enable_ms, 1494 .break_ctl = stm32_usart_break_ctl, 1495 .startup = stm32_usart_startup, 1496 .shutdown = stm32_usart_shutdown, 1497 .flush_buffer = stm32_usart_flush_buffer, 1498 .set_termios = stm32_usart_set_termios, 1499 .pm = stm32_usart_pm, 1500 .type = stm32_usart_type, 1501 .release_port = stm32_usart_release_port, 1502 .request_port = stm32_usart_request_port, 1503 .config_port = stm32_usart_config_port, 1504 .verify_port = stm32_usart_verify_port, 1505 #if defined(CONFIG_CONSOLE_POLL) 1506 .poll_init = stm32_usart_poll_init, 1507 .poll_get_char = stm32_usart_poll_get_char, 1508 .poll_put_char = stm32_usart_poll_put_char, 1509 #endif /* CONFIG_CONSOLE_POLL */ 1510 }; 1511 1512 struct stm32_usart_thresh_ratio { 1513 int mul; 1514 int div; 1515 }; 1516 1517 static const struct stm32_usart_thresh_ratio stm32h7_usart_fifo_thresh_cfg[] = { 1518 {1, 8}, {1, 4}, {1, 2}, {3, 4}, {7, 8}, {1, 1} }; 1519 1520 static int stm32_usart_get_thresh_value(u32 fifo_size, int index) 1521 { 1522 return fifo_size * stm32h7_usart_fifo_thresh_cfg[index].mul / 1523 stm32h7_usart_fifo_thresh_cfg[index].div; 1524 } 1525 1526 static int stm32_usart_get_ftcfg(struct platform_device *pdev, struct stm32_port *stm32port, 1527 const char *p, int *ftcfg) 1528 { 1529 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1530 u32 bytes, i, cfg8; 1531 int fifo_size; 1532 1533 if (WARN_ON(ofs->hwcfgr1 == UNDEF_REG)) 1534 return 1; 1535 1536 cfg8 = FIELD_GET(USART_HWCFGR1_CFG8, 1537 readl_relaxed(stm32port->port.membase + ofs->hwcfgr1)); 1538 1539 /* On STM32H7, hwcfgr is not present, so returned value will be 0 */ 1540 fifo_size = cfg8 ? 1 << cfg8 : STM32H7_USART_FIFO_SIZE; 1541 1542 /* DT option to get RX & TX FIFO threshold (default to half fifo size) */ 1543 if (of_property_read_u32(pdev->dev.of_node, p, &bytes)) 1544 bytes = fifo_size / 2; 1545 1546 if (bytes < stm32_usart_get_thresh_value(fifo_size, 0)) { 1547 *ftcfg = -EINVAL; 1548 return fifo_size; 1549 } 1550 1551 for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++) { 1552 if (stm32_usart_get_thresh_value(fifo_size, i) >= bytes) 1553 break; 1554 } 1555 if (i >= ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg)) 1556 i = ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg) - 1; 1557 1558 dev_dbg(&pdev->dev, "%s set to %d/%d bytes\n", p, 1559 stm32_usart_get_thresh_value(fifo_size, i), fifo_size); 1560 1561 *ftcfg = i; 1562 return fifo_size; 1563 } 1564 1565 static void stm32_usart_deinit_port(struct stm32_port *stm32port) 1566 { 1567 clk_disable_unprepare(stm32port->clk); 1568 } 1569 1570 static const struct serial_rs485 stm32_rs485_supported = { 1571 .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND | 1572 SER_RS485_RX_DURING_TX, 1573 .delay_rts_before_send = 1, 1574 .delay_rts_after_send = 1, 1575 }; 1576 1577 static int stm32_usart_init_port(struct stm32_port *stm32port, 1578 struct platform_device *pdev) 1579 { 1580 struct uart_port *port = &stm32port->port; 1581 struct resource *res; 1582 int ret, irq; 1583 1584 irq = platform_get_irq(pdev, 0); 1585 if (irq < 0) 1586 return irq; 1587 1588 port->iotype = UPIO_MEM; 1589 port->flags = UPF_BOOT_AUTOCONF; 1590 port->ops = &stm32_uart_ops; 1591 port->dev = &pdev->dev; 1592 port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE); 1593 port->irq = irq; 1594 port->rs485_config = stm32_usart_config_rs485; 1595 port->rs485_supported = stm32_rs485_supported; 1596 1597 ret = stm32_usart_init_rs485(port, pdev); 1598 if (ret) 1599 return ret; 1600 1601 stm32port->wakeup_src = stm32port->info->cfg.has_wakeup && 1602 of_property_read_bool(pdev->dev.of_node, "wakeup-source"); 1603 1604 stm32port->swap = stm32port->info->cfg.has_swap && 1605 of_property_read_bool(pdev->dev.of_node, "rx-tx-swap"); 1606 1607 port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 1608 if (IS_ERR(port->membase)) 1609 return PTR_ERR(port->membase); 1610 port->mapbase = res->start; 1611 1612 spin_lock_init(&port->lock); 1613 1614 stm32port->clk = devm_clk_get(&pdev->dev, NULL); 1615 if (IS_ERR(stm32port->clk)) 1616 return PTR_ERR(stm32port->clk); 1617 1618 /* Ensure that clk rate is correct by enabling the clk */ 1619 ret = clk_prepare_enable(stm32port->clk); 1620 if (ret) 1621 return ret; 1622 1623 stm32port->port.uartclk = clk_get_rate(stm32port->clk); 1624 if (!stm32port->port.uartclk) { 1625 ret = -EINVAL; 1626 goto err_clk; 1627 } 1628 1629 stm32port->fifoen = stm32port->info->cfg.has_fifo; 1630 if (stm32port->fifoen) { 1631 stm32_usart_get_ftcfg(pdev, stm32port, "rx-threshold", &stm32port->rxftcfg); 1632 port->fifosize = stm32_usart_get_ftcfg(pdev, stm32port, "tx-threshold", 1633 &stm32port->txftcfg); 1634 } else { 1635 port->fifosize = 1; 1636 } 1637 1638 stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0); 1639 if (IS_ERR(stm32port->gpios)) { 1640 ret = PTR_ERR(stm32port->gpios); 1641 goto err_clk; 1642 } 1643 1644 /* 1645 * Both CTS/RTS gpios and "st,hw-flow-ctrl" (deprecated) or "uart-has-rtscts" 1646 * properties should not be specified. 1647 */ 1648 if (stm32port->hw_flow_control) { 1649 if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) || 1650 mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) { 1651 dev_err(&pdev->dev, "Conflicting RTS/CTS config\n"); 1652 ret = -EINVAL; 1653 goto err_clk; 1654 } 1655 } 1656 1657 return ret; 1658 1659 err_clk: 1660 clk_disable_unprepare(stm32port->clk); 1661 1662 return ret; 1663 } 1664 1665 static struct stm32_port *stm32_usart_of_get_port(struct platform_device *pdev) 1666 { 1667 struct device_node *np = pdev->dev.of_node; 1668 int id; 1669 1670 if (!np) 1671 return NULL; 1672 1673 id = of_alias_get_id(np, "serial"); 1674 if (id < 0) { 1675 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", id); 1676 return NULL; 1677 } 1678 1679 if (WARN_ON(id >= STM32_MAX_PORTS)) 1680 return NULL; 1681 1682 stm32_ports[id].hw_flow_control = 1683 of_property_read_bool (np, "st,hw-flow-ctrl") /*deprecated*/ || 1684 of_property_read_bool (np, "uart-has-rtscts"); 1685 stm32_ports[id].port.line = id; 1686 stm32_ports[id].cr1_irq = USART_CR1_RXNEIE; 1687 stm32_ports[id].cr3_irq = 0; 1688 stm32_ports[id].last_res = RX_BUF_L; 1689 return &stm32_ports[id]; 1690 } 1691 1692 #ifdef CONFIG_OF 1693 static const struct of_device_id stm32_match[] = { 1694 { .compatible = "st,stm32-uart", .data = &stm32f4_info}, 1695 { .compatible = "st,stm32f7-uart", .data = &stm32f7_info}, 1696 { .compatible = "st,stm32h7-uart", .data = &stm32h7_info}, 1697 {}, 1698 }; 1699 1700 MODULE_DEVICE_TABLE(of, stm32_match); 1701 #endif 1702 1703 static void stm32_usart_of_dma_rx_remove(struct stm32_port *stm32port, 1704 struct platform_device *pdev) 1705 { 1706 if (stm32port->rx_buf) 1707 dma_free_coherent(&pdev->dev, RX_BUF_L, stm32port->rx_buf, 1708 stm32port->rx_dma_buf); 1709 } 1710 1711 static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port, 1712 struct platform_device *pdev) 1713 { 1714 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1715 struct uart_port *port = &stm32port->port; 1716 struct device *dev = &pdev->dev; 1717 struct dma_slave_config config; 1718 int ret; 1719 1720 stm32port->rx_buf = dma_alloc_coherent(dev, RX_BUF_L, 1721 &stm32port->rx_dma_buf, 1722 GFP_KERNEL); 1723 if (!stm32port->rx_buf) 1724 return -ENOMEM; 1725 1726 /* Configure DMA channel */ 1727 memset(&config, 0, sizeof(config)); 1728 config.src_addr = port->mapbase + ofs->rdr; 1729 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1730 1731 ret = dmaengine_slave_config(stm32port->rx_ch, &config); 1732 if (ret < 0) { 1733 dev_err(dev, "rx dma channel config failed\n"); 1734 stm32_usart_of_dma_rx_remove(stm32port, pdev); 1735 return ret; 1736 } 1737 1738 return 0; 1739 } 1740 1741 static void stm32_usart_of_dma_tx_remove(struct stm32_port *stm32port, 1742 struct platform_device *pdev) 1743 { 1744 if (stm32port->tx_buf) 1745 dma_free_coherent(&pdev->dev, TX_BUF_L, stm32port->tx_buf, 1746 stm32port->tx_dma_buf); 1747 } 1748 1749 static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port, 1750 struct platform_device *pdev) 1751 { 1752 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1753 struct uart_port *port = &stm32port->port; 1754 struct device *dev = &pdev->dev; 1755 struct dma_slave_config config; 1756 int ret; 1757 1758 stm32port->tx_buf = dma_alloc_coherent(dev, TX_BUF_L, 1759 &stm32port->tx_dma_buf, 1760 GFP_KERNEL); 1761 if (!stm32port->tx_buf) 1762 return -ENOMEM; 1763 1764 /* Configure DMA channel */ 1765 memset(&config, 0, sizeof(config)); 1766 config.dst_addr = port->mapbase + ofs->tdr; 1767 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1768 1769 ret = dmaengine_slave_config(stm32port->tx_ch, &config); 1770 if (ret < 0) { 1771 dev_err(dev, "tx dma channel config failed\n"); 1772 stm32_usart_of_dma_tx_remove(stm32port, pdev); 1773 return ret; 1774 } 1775 1776 return 0; 1777 } 1778 1779 static int stm32_usart_serial_probe(struct platform_device *pdev) 1780 { 1781 struct stm32_port *stm32port; 1782 int ret; 1783 1784 stm32port = stm32_usart_of_get_port(pdev); 1785 if (!stm32port) 1786 return -ENODEV; 1787 1788 stm32port->info = of_device_get_match_data(&pdev->dev); 1789 if (!stm32port->info) 1790 return -EINVAL; 1791 1792 stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx"); 1793 if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER) 1794 return -EPROBE_DEFER; 1795 1796 /* Fall back in interrupt mode for any non-deferral error */ 1797 if (IS_ERR(stm32port->rx_ch)) 1798 stm32port->rx_ch = NULL; 1799 1800 stm32port->tx_ch = dma_request_chan(&pdev->dev, "tx"); 1801 if (PTR_ERR(stm32port->tx_ch) == -EPROBE_DEFER) { 1802 ret = -EPROBE_DEFER; 1803 goto err_dma_rx; 1804 } 1805 /* Fall back in interrupt mode for any non-deferral error */ 1806 if (IS_ERR(stm32port->tx_ch)) 1807 stm32port->tx_ch = NULL; 1808 1809 ret = stm32_usart_init_port(stm32port, pdev); 1810 if (ret) 1811 goto err_dma_tx; 1812 1813 if (stm32port->wakeup_src) { 1814 device_set_wakeup_capable(&pdev->dev, true); 1815 ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq); 1816 if (ret) 1817 goto err_deinit_port; 1818 } 1819 1820 if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) { 1821 /* Fall back in interrupt mode */ 1822 dma_release_channel(stm32port->rx_ch); 1823 stm32port->rx_ch = NULL; 1824 } 1825 1826 if (stm32port->tx_ch && stm32_usart_of_dma_tx_probe(stm32port, pdev)) { 1827 /* Fall back in interrupt mode */ 1828 dma_release_channel(stm32port->tx_ch); 1829 stm32port->tx_ch = NULL; 1830 } 1831 1832 if (!stm32port->rx_ch) 1833 dev_info(&pdev->dev, "interrupt mode for rx (no dma)\n"); 1834 if (!stm32port->tx_ch) 1835 dev_info(&pdev->dev, "interrupt mode for tx (no dma)\n"); 1836 1837 platform_set_drvdata(pdev, &stm32port->port); 1838 1839 pm_runtime_get_noresume(&pdev->dev); 1840 pm_runtime_set_active(&pdev->dev); 1841 pm_runtime_enable(&pdev->dev); 1842 1843 ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port); 1844 if (ret) 1845 goto err_port; 1846 1847 pm_runtime_put_sync(&pdev->dev); 1848 1849 return 0; 1850 1851 err_port: 1852 pm_runtime_disable(&pdev->dev); 1853 pm_runtime_set_suspended(&pdev->dev); 1854 pm_runtime_put_noidle(&pdev->dev); 1855 1856 if (stm32port->tx_ch) 1857 stm32_usart_of_dma_tx_remove(stm32port, pdev); 1858 if (stm32port->rx_ch) 1859 stm32_usart_of_dma_rx_remove(stm32port, pdev); 1860 1861 if (stm32port->wakeup_src) 1862 dev_pm_clear_wake_irq(&pdev->dev); 1863 1864 err_deinit_port: 1865 if (stm32port->wakeup_src) 1866 device_set_wakeup_capable(&pdev->dev, false); 1867 1868 stm32_usart_deinit_port(stm32port); 1869 1870 err_dma_tx: 1871 if (stm32port->tx_ch) 1872 dma_release_channel(stm32port->tx_ch); 1873 1874 err_dma_rx: 1875 if (stm32port->rx_ch) 1876 dma_release_channel(stm32port->rx_ch); 1877 1878 return ret; 1879 } 1880 1881 static void stm32_usart_serial_remove(struct platform_device *pdev) 1882 { 1883 struct uart_port *port = platform_get_drvdata(pdev); 1884 struct stm32_port *stm32_port = to_stm32_port(port); 1885 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1886 u32 cr3; 1887 1888 pm_runtime_get_sync(&pdev->dev); 1889 uart_remove_one_port(&stm32_usart_driver, port); 1890 1891 pm_runtime_disable(&pdev->dev); 1892 pm_runtime_set_suspended(&pdev->dev); 1893 pm_runtime_put_noidle(&pdev->dev); 1894 1895 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_PEIE); 1896 1897 if (stm32_port->tx_ch) { 1898 stm32_usart_of_dma_tx_remove(stm32_port, pdev); 1899 dma_release_channel(stm32_port->tx_ch); 1900 } 1901 1902 if (stm32_port->rx_ch) { 1903 stm32_usart_of_dma_rx_remove(stm32_port, pdev); 1904 dma_release_channel(stm32_port->rx_ch); 1905 } 1906 1907 cr3 = readl_relaxed(port->membase + ofs->cr3); 1908 cr3 &= ~USART_CR3_EIE; 1909 cr3 &= ~USART_CR3_DMAR; 1910 cr3 &= ~USART_CR3_DMAT; 1911 cr3 &= ~USART_CR3_DDRE; 1912 writel_relaxed(cr3, port->membase + ofs->cr3); 1913 1914 if (stm32_port->wakeup_src) { 1915 dev_pm_clear_wake_irq(&pdev->dev); 1916 device_init_wakeup(&pdev->dev, false); 1917 } 1918 1919 stm32_usart_deinit_port(stm32_port); 1920 } 1921 1922 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch) 1923 { 1924 struct stm32_port *stm32_port = to_stm32_port(port); 1925 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1926 u32 isr; 1927 int ret; 1928 1929 ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, isr, 1930 (isr & USART_SR_TXE), 100, 1931 STM32_USART_TIMEOUT_USEC); 1932 if (ret != 0) { 1933 dev_err(port->dev, "Error while sending data in UART TX : %d\n", ret); 1934 return; 1935 } 1936 writel_relaxed(ch, port->membase + ofs->tdr); 1937 } 1938 1939 #ifdef CONFIG_SERIAL_STM32_CONSOLE 1940 static void stm32_usart_console_write(struct console *co, const char *s, 1941 unsigned int cnt) 1942 { 1943 struct uart_port *port = &stm32_ports[co->index].port; 1944 struct stm32_port *stm32_port = to_stm32_port(port); 1945 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1946 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1947 unsigned long flags; 1948 u32 old_cr1, new_cr1; 1949 int locked = 1; 1950 1951 if (oops_in_progress) 1952 locked = uart_port_trylock_irqsave(port, &flags); 1953 else 1954 uart_port_lock_irqsave(port, &flags); 1955 1956 /* Save and disable interrupts, enable the transmitter */ 1957 old_cr1 = readl_relaxed(port->membase + ofs->cr1); 1958 new_cr1 = old_cr1 & ~USART_CR1_IE_MASK; 1959 new_cr1 |= USART_CR1_TE | BIT(cfg->uart_enable_bit); 1960 writel_relaxed(new_cr1, port->membase + ofs->cr1); 1961 1962 uart_console_write(port, s, cnt, stm32_usart_console_putchar); 1963 1964 /* Restore interrupt state */ 1965 writel_relaxed(old_cr1, port->membase + ofs->cr1); 1966 1967 if (locked) 1968 uart_port_unlock_irqrestore(port, flags); 1969 } 1970 1971 static int stm32_usart_console_setup(struct console *co, char *options) 1972 { 1973 struct stm32_port *stm32port; 1974 int baud = 9600; 1975 int bits = 8; 1976 int parity = 'n'; 1977 int flow = 'n'; 1978 1979 if (co->index >= STM32_MAX_PORTS) 1980 return -ENODEV; 1981 1982 stm32port = &stm32_ports[co->index]; 1983 1984 /* 1985 * This driver does not support early console initialization 1986 * (use ARM early printk support instead), so we only expect 1987 * this to be called during the uart port registration when the 1988 * driver gets probed and the port should be mapped at that point. 1989 */ 1990 if (stm32port->port.mapbase == 0 || !stm32port->port.membase) 1991 return -ENXIO; 1992 1993 if (options) 1994 uart_parse_options(options, &baud, &parity, &bits, &flow); 1995 1996 return uart_set_options(&stm32port->port, co, baud, parity, bits, flow); 1997 } 1998 1999 static struct console stm32_console = { 2000 .name = STM32_SERIAL_NAME, 2001 .device = uart_console_device, 2002 .write = stm32_usart_console_write, 2003 .setup = stm32_usart_console_setup, 2004 .flags = CON_PRINTBUFFER, 2005 .index = -1, 2006 .data = &stm32_usart_driver, 2007 }; 2008 2009 #define STM32_SERIAL_CONSOLE (&stm32_console) 2010 2011 #else 2012 #define STM32_SERIAL_CONSOLE NULL 2013 #endif /* CONFIG_SERIAL_STM32_CONSOLE */ 2014 2015 #ifdef CONFIG_SERIAL_EARLYCON 2016 static void early_stm32_usart_console_putchar(struct uart_port *port, unsigned char ch) 2017 { 2018 struct stm32_usart_info *info = port->private_data; 2019 2020 while (!(readl_relaxed(port->membase + info->ofs.isr) & USART_SR_TXE)) 2021 cpu_relax(); 2022 2023 writel_relaxed(ch, port->membase + info->ofs.tdr); 2024 } 2025 2026 static void early_stm32_serial_write(struct console *console, const char *s, unsigned int count) 2027 { 2028 struct earlycon_device *device = console->data; 2029 struct uart_port *port = &device->port; 2030 2031 uart_console_write(port, s, count, early_stm32_usart_console_putchar); 2032 } 2033 2034 static int __init early_stm32_h7_serial_setup(struct earlycon_device *device, const char *options) 2035 { 2036 if (!(device->port.membase || device->port.iobase)) 2037 return -ENODEV; 2038 device->port.private_data = &stm32h7_info; 2039 device->con->write = early_stm32_serial_write; 2040 return 0; 2041 } 2042 2043 static int __init early_stm32_f7_serial_setup(struct earlycon_device *device, const char *options) 2044 { 2045 if (!(device->port.membase || device->port.iobase)) 2046 return -ENODEV; 2047 device->port.private_data = &stm32f7_info; 2048 device->con->write = early_stm32_serial_write; 2049 return 0; 2050 } 2051 2052 static int __init early_stm32_f4_serial_setup(struct earlycon_device *device, const char *options) 2053 { 2054 if (!(device->port.membase || device->port.iobase)) 2055 return -ENODEV; 2056 device->port.private_data = &stm32f4_info; 2057 device->con->write = early_stm32_serial_write; 2058 return 0; 2059 } 2060 2061 OF_EARLYCON_DECLARE(stm32, "st,stm32h7-uart", early_stm32_h7_serial_setup); 2062 OF_EARLYCON_DECLARE(stm32, "st,stm32f7-uart", early_stm32_f7_serial_setup); 2063 OF_EARLYCON_DECLARE(stm32, "st,stm32-uart", early_stm32_f4_serial_setup); 2064 #endif /* CONFIG_SERIAL_EARLYCON */ 2065 2066 static struct uart_driver stm32_usart_driver = { 2067 .driver_name = DRIVER_NAME, 2068 .dev_name = STM32_SERIAL_NAME, 2069 .major = 0, 2070 .minor = 0, 2071 .nr = STM32_MAX_PORTS, 2072 .cons = STM32_SERIAL_CONSOLE, 2073 }; 2074 2075 static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port, 2076 bool enable) 2077 { 2078 struct stm32_port *stm32_port = to_stm32_port(port); 2079 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 2080 struct tty_port *tport = &port->state->port; 2081 int ret; 2082 unsigned int size = 0; 2083 unsigned long flags; 2084 2085 if (!stm32_port->wakeup_src || !tty_port_initialized(tport)) 2086 return 0; 2087 2088 /* 2089 * Enable low-power wake-up and wake-up irq if argument is set to 2090 * "enable", disable low-power wake-up and wake-up irq otherwise 2091 */ 2092 if (enable) { 2093 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM); 2094 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE); 2095 mctrl_gpio_enable_irq_wake(stm32_port->gpios); 2096 2097 /* 2098 * When DMA is used for reception, it must be disabled before 2099 * entering low-power mode and re-enabled when exiting from 2100 * low-power mode. 2101 */ 2102 if (stm32_port->rx_ch) { 2103 uart_port_lock_irqsave(port, &flags); 2104 /* Poll data from DMA RX buffer if any */ 2105 if (!stm32_usart_rx_dma_pause(stm32_port)) 2106 size += stm32_usart_receive_chars(port, true); 2107 stm32_usart_rx_dma_terminate(stm32_port); 2108 uart_unlock_and_check_sysrq_irqrestore(port, flags); 2109 if (size) 2110 tty_flip_buffer_push(tport); 2111 } 2112 2113 /* Poll data from RX FIFO if any */ 2114 stm32_usart_receive_chars(port, false); 2115 } else { 2116 if (stm32_port->rx_ch) { 2117 ret = stm32_usart_rx_dma_start_or_resume(port); 2118 if (ret) 2119 return ret; 2120 } 2121 mctrl_gpio_disable_irq_wake(stm32_port->gpios); 2122 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM); 2123 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE); 2124 } 2125 2126 return 0; 2127 } 2128 2129 static int __maybe_unused stm32_usart_serial_suspend(struct device *dev) 2130 { 2131 struct uart_port *port = dev_get_drvdata(dev); 2132 int ret; 2133 2134 uart_suspend_port(&stm32_usart_driver, port); 2135 2136 if (device_may_wakeup(dev) || device_wakeup_path(dev)) { 2137 ret = stm32_usart_serial_en_wakeup(port, true); 2138 if (ret) 2139 return ret; 2140 } 2141 2142 /* 2143 * When "no_console_suspend" is enabled, keep the pinctrl default state 2144 * and rely on bootloader stage to restore this state upon resume. 2145 * Otherwise, apply the idle or sleep states depending on wakeup 2146 * capabilities. 2147 */ 2148 if (console_suspend_enabled || !uart_console(port)) { 2149 if (device_may_wakeup(dev) || device_wakeup_path(dev)) 2150 pinctrl_pm_select_idle_state(dev); 2151 else 2152 pinctrl_pm_select_sleep_state(dev); 2153 } 2154 2155 return 0; 2156 } 2157 2158 static int __maybe_unused stm32_usart_serial_resume(struct device *dev) 2159 { 2160 struct uart_port *port = dev_get_drvdata(dev); 2161 int ret; 2162 2163 pinctrl_pm_select_default_state(dev); 2164 2165 if (device_may_wakeup(dev) || device_wakeup_path(dev)) { 2166 ret = stm32_usart_serial_en_wakeup(port, false); 2167 if (ret) 2168 return ret; 2169 } 2170 2171 return uart_resume_port(&stm32_usart_driver, port); 2172 } 2173 2174 static int __maybe_unused stm32_usart_runtime_suspend(struct device *dev) 2175 { 2176 struct uart_port *port = dev_get_drvdata(dev); 2177 struct stm32_port *stm32port = container_of(port, 2178 struct stm32_port, port); 2179 2180 clk_disable_unprepare(stm32port->clk); 2181 2182 return 0; 2183 } 2184 2185 static int __maybe_unused stm32_usart_runtime_resume(struct device *dev) 2186 { 2187 struct uart_port *port = dev_get_drvdata(dev); 2188 struct stm32_port *stm32port = container_of(port, 2189 struct stm32_port, port); 2190 2191 return clk_prepare_enable(stm32port->clk); 2192 } 2193 2194 static const struct dev_pm_ops stm32_serial_pm_ops = { 2195 SET_RUNTIME_PM_OPS(stm32_usart_runtime_suspend, 2196 stm32_usart_runtime_resume, NULL) 2197 SET_SYSTEM_SLEEP_PM_OPS(stm32_usart_serial_suspend, 2198 stm32_usart_serial_resume) 2199 }; 2200 2201 static struct platform_driver stm32_serial_driver = { 2202 .probe = stm32_usart_serial_probe, 2203 .remove_new = stm32_usart_serial_remove, 2204 .driver = { 2205 .name = DRIVER_NAME, 2206 .pm = &stm32_serial_pm_ops, 2207 .of_match_table = of_match_ptr(stm32_match), 2208 }, 2209 }; 2210 2211 static int __init stm32_usart_init(void) 2212 { 2213 static char banner[] __initdata = "STM32 USART driver initialized"; 2214 int ret; 2215 2216 pr_info("%s\n", banner); 2217 2218 ret = uart_register_driver(&stm32_usart_driver); 2219 if (ret) 2220 return ret; 2221 2222 ret = platform_driver_register(&stm32_serial_driver); 2223 if (ret) 2224 uart_unregister_driver(&stm32_usart_driver); 2225 2226 return ret; 2227 } 2228 2229 static void __exit stm32_usart_exit(void) 2230 { 2231 platform_driver_unregister(&stm32_serial_driver); 2232 uart_unregister_driver(&stm32_usart_driver); 2233 } 2234 2235 module_init(stm32_usart_init); 2236 module_exit(stm32_usart_exit); 2237 2238 MODULE_ALIAS("platform:" DRIVER_NAME); 2239 MODULE_DESCRIPTION("STMicroelectronics STM32 serial port driver"); 2240 MODULE_LICENSE("GPL v2"); 2241