1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) Maxime Coquelin 2015 4 * Copyright (C) STMicroelectronics SA 2017 5 * Authors: Maxime Coquelin <mcoquelin.stm32@gmail.com> 6 * Gerald Baeza <gerald.baeza@foss.st.com> 7 * Erwan Le Ray <erwan.leray@foss.st.com> 8 * 9 * Inspired by st-asc.c from STMicroelectronics (c) 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/console.h> 14 #include <linux/delay.h> 15 #include <linux/dma-direction.h> 16 #include <linux/dmaengine.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/io.h> 19 #include <linux/iopoll.h> 20 #include <linux/irq.h> 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/of_platform.h> 24 #include <linux/pinctrl/consumer.h> 25 #include <linux/platform_device.h> 26 #include <linux/pm_runtime.h> 27 #include <linux/pm_wakeirq.h> 28 #include <linux/serial_core.h> 29 #include <linux/serial.h> 30 #include <linux/spinlock.h> 31 #include <linux/sysrq.h> 32 #include <linux/tty_flip.h> 33 #include <linux/tty.h> 34 35 #include "serial_mctrl_gpio.h" 36 #include "stm32-usart.h" 37 38 static void stm32_usart_stop_tx(struct uart_port *port); 39 static void stm32_usart_transmit_chars(struct uart_port *port); 40 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch); 41 42 static inline struct stm32_port *to_stm32_port(struct uart_port *port) 43 { 44 return container_of(port, struct stm32_port, port); 45 } 46 47 static void stm32_usart_set_bits(struct uart_port *port, u32 reg, u32 bits) 48 { 49 u32 val; 50 51 val = readl_relaxed(port->membase + reg); 52 val |= bits; 53 writel_relaxed(val, port->membase + reg); 54 } 55 56 static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits) 57 { 58 u32 val; 59 60 val = readl_relaxed(port->membase + reg); 61 val &= ~bits; 62 writel_relaxed(val, port->membase + reg); 63 } 64 65 static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE, 66 u32 delay_DDE, u32 baud) 67 { 68 u32 rs485_deat_dedt; 69 u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT); 70 bool over8; 71 72 *cr3 |= USART_CR3_DEM; 73 over8 = *cr1 & USART_CR1_OVER8; 74 75 *cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); 76 77 if (over8) 78 rs485_deat_dedt = delay_ADE * baud * 8; 79 else 80 rs485_deat_dedt = delay_ADE * baud * 16; 81 82 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000); 83 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ? 84 rs485_deat_dedt_max : rs485_deat_dedt; 85 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEAT_SHIFT) & 86 USART_CR1_DEAT_MASK; 87 *cr1 |= rs485_deat_dedt; 88 89 if (over8) 90 rs485_deat_dedt = delay_DDE * baud * 8; 91 else 92 rs485_deat_dedt = delay_DDE * baud * 16; 93 94 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000); 95 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ? 96 rs485_deat_dedt_max : rs485_deat_dedt; 97 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEDT_SHIFT) & 98 USART_CR1_DEDT_MASK; 99 *cr1 |= rs485_deat_dedt; 100 } 101 102 static int stm32_usart_config_rs485(struct uart_port *port, 103 struct serial_rs485 *rs485conf) 104 { 105 struct stm32_port *stm32_port = to_stm32_port(port); 106 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 107 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 108 u32 usartdiv, baud, cr1, cr3; 109 bool over8; 110 111 stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 112 113 rs485conf->flags |= SER_RS485_RX_DURING_TX; 114 115 if (rs485conf->flags & SER_RS485_ENABLED) { 116 cr1 = readl_relaxed(port->membase + ofs->cr1); 117 cr3 = readl_relaxed(port->membase + ofs->cr3); 118 usartdiv = readl_relaxed(port->membase + ofs->brr); 119 usartdiv = usartdiv & GENMASK(15, 0); 120 over8 = cr1 & USART_CR1_OVER8; 121 122 if (over8) 123 usartdiv = usartdiv | (usartdiv & GENMASK(4, 0)) 124 << USART_BRR_04_R_SHIFT; 125 126 baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv); 127 stm32_usart_config_reg_rs485(&cr1, &cr3, 128 rs485conf->delay_rts_before_send, 129 rs485conf->delay_rts_after_send, 130 baud); 131 132 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) 133 cr3 &= ~USART_CR3_DEP; 134 else 135 cr3 |= USART_CR3_DEP; 136 137 writel_relaxed(cr3, port->membase + ofs->cr3); 138 writel_relaxed(cr1, port->membase + ofs->cr1); 139 } else { 140 stm32_usart_clr_bits(port, ofs->cr3, 141 USART_CR3_DEM | USART_CR3_DEP); 142 stm32_usart_clr_bits(port, ofs->cr1, 143 USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); 144 } 145 146 stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 147 148 return 0; 149 } 150 151 static int stm32_usart_init_rs485(struct uart_port *port, 152 struct platform_device *pdev) 153 { 154 struct serial_rs485 *rs485conf = &port->rs485; 155 156 rs485conf->flags = 0; 157 rs485conf->delay_rts_before_send = 0; 158 rs485conf->delay_rts_after_send = 0; 159 160 if (!pdev->dev.of_node) 161 return -ENODEV; 162 163 return uart_get_rs485_mode(port); 164 } 165 166 static bool stm32_usart_rx_dma_enabled(struct uart_port *port) 167 { 168 struct stm32_port *stm32_port = to_stm32_port(port); 169 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 170 171 if (!stm32_port->rx_ch) 172 return false; 173 174 return !!(readl_relaxed(port->membase + ofs->cr3) & USART_CR3_DMAR); 175 } 176 177 /* Return true when data is pending (in pio mode), and false when no data is pending. */ 178 static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr) 179 { 180 struct stm32_port *stm32_port = to_stm32_port(port); 181 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 182 183 *sr = readl_relaxed(port->membase + ofs->isr); 184 /* Get pending characters in RDR or FIFO */ 185 if (*sr & USART_SR_RXNE) { 186 /* Get all pending characters from the RDR or the FIFO when using interrupts */ 187 if (!stm32_usart_rx_dma_enabled(port)) 188 return true; 189 190 /* Handle only RX data errors when using DMA */ 191 if (*sr & USART_SR_ERR_MASK) 192 return true; 193 } 194 195 return false; 196 } 197 198 static unsigned long stm32_usart_get_char_pio(struct uart_port *port) 199 { 200 struct stm32_port *stm32_port = to_stm32_port(port); 201 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 202 unsigned long c; 203 204 c = readl_relaxed(port->membase + ofs->rdr); 205 /* Apply RDR data mask */ 206 c &= stm32_port->rdr_mask; 207 208 return c; 209 } 210 211 static unsigned int stm32_usart_receive_chars_pio(struct uart_port *port) 212 { 213 struct stm32_port *stm32_port = to_stm32_port(port); 214 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 215 unsigned long c; 216 unsigned int size = 0; 217 u32 sr; 218 char flag; 219 220 while (stm32_usart_pending_rx_pio(port, &sr)) { 221 sr |= USART_SR_DUMMY_RX; 222 flag = TTY_NORMAL; 223 224 /* 225 * Status bits has to be cleared before reading the RDR: 226 * In FIFO mode, reading the RDR will pop the next data 227 * (if any) along with its status bits into the SR. 228 * Not doing so leads to misalignement between RDR and SR, 229 * and clear status bits of the next rx data. 230 * 231 * Clear errors flags for stm32f7 and stm32h7 compatible 232 * devices. On stm32f4 compatible devices, the error bit is 233 * cleared by the sequence [read SR - read DR]. 234 */ 235 if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG) 236 writel_relaxed(sr & USART_SR_ERR_MASK, 237 port->membase + ofs->icr); 238 239 c = stm32_usart_get_char_pio(port); 240 port->icount.rx++; 241 size++; 242 if (sr & USART_SR_ERR_MASK) { 243 if (sr & USART_SR_ORE) { 244 port->icount.overrun++; 245 } else if (sr & USART_SR_PE) { 246 port->icount.parity++; 247 } else if (sr & USART_SR_FE) { 248 /* Break detection if character is null */ 249 if (!c) { 250 port->icount.brk++; 251 if (uart_handle_break(port)) 252 continue; 253 } else { 254 port->icount.frame++; 255 } 256 } 257 258 sr &= port->read_status_mask; 259 260 if (sr & USART_SR_PE) { 261 flag = TTY_PARITY; 262 } else if (sr & USART_SR_FE) { 263 if (!c) 264 flag = TTY_BREAK; 265 else 266 flag = TTY_FRAME; 267 } 268 } 269 270 if (uart_prepare_sysrq_char(port, c)) 271 continue; 272 uart_insert_char(port, sr, USART_SR_ORE, c, flag); 273 } 274 275 return size; 276 } 277 278 static void stm32_usart_push_buffer_dma(struct uart_port *port, unsigned int dma_size) 279 { 280 struct stm32_port *stm32_port = to_stm32_port(port); 281 struct tty_port *ttyport = &stm32_port->port.state->port; 282 unsigned char *dma_start; 283 int dma_count, i; 284 285 dma_start = stm32_port->rx_buf + (RX_BUF_L - stm32_port->last_res); 286 287 /* 288 * Apply rdr_mask on buffer in order to mask parity bit. 289 * This loop is useless in cs8 mode because DMA copies only 290 * 8 bits and already ignores parity bit. 291 */ 292 if (!(stm32_port->rdr_mask == (BIT(8) - 1))) 293 for (i = 0; i < dma_size; i++) 294 *(dma_start + i) &= stm32_port->rdr_mask; 295 296 dma_count = tty_insert_flip_string(ttyport, dma_start, dma_size); 297 port->icount.rx += dma_count; 298 if (dma_count != dma_size) 299 port->icount.buf_overrun++; 300 stm32_port->last_res -= dma_count; 301 if (stm32_port->last_res == 0) 302 stm32_port->last_res = RX_BUF_L; 303 } 304 305 static unsigned int stm32_usart_receive_chars_dma(struct uart_port *port) 306 { 307 struct stm32_port *stm32_port = to_stm32_port(port); 308 unsigned int dma_size, size = 0; 309 310 /* DMA buffer is configured in cyclic mode and handles the rollback of the buffer. */ 311 if (stm32_port->rx_dma_state.residue > stm32_port->last_res) { 312 /* Conditional first part: from last_res to end of DMA buffer */ 313 dma_size = stm32_port->last_res; 314 stm32_usart_push_buffer_dma(port, dma_size); 315 size = dma_size; 316 } 317 318 dma_size = stm32_port->last_res - stm32_port->rx_dma_state.residue; 319 stm32_usart_push_buffer_dma(port, dma_size); 320 size += dma_size; 321 322 return size; 323 } 324 325 static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force_dma_flush) 326 { 327 struct stm32_port *stm32_port = to_stm32_port(port); 328 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 329 enum dma_status rx_dma_status; 330 u32 sr; 331 unsigned int size = 0; 332 333 if (stm32_usart_rx_dma_enabled(port) || force_dma_flush) { 334 rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch, 335 stm32_port->rx_ch->cookie, 336 &stm32_port->rx_dma_state); 337 if (rx_dma_status == DMA_IN_PROGRESS) { 338 /* Empty DMA buffer */ 339 size = stm32_usart_receive_chars_dma(port); 340 sr = readl_relaxed(port->membase + ofs->isr); 341 if (sr & USART_SR_ERR_MASK) { 342 /* Disable DMA request line */ 343 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 344 345 /* Switch to PIO mode to handle the errors */ 346 size += stm32_usart_receive_chars_pio(port); 347 348 /* Switch back to DMA mode */ 349 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR); 350 } 351 } else { 352 /* Disable RX DMA */ 353 dmaengine_terminate_async(stm32_port->rx_ch); 354 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 355 /* Fall back to interrupt mode */ 356 dev_dbg(port->dev, "DMA error, fallback to irq mode\n"); 357 size = stm32_usart_receive_chars_pio(port); 358 } 359 } else { 360 size = stm32_usart_receive_chars_pio(port); 361 } 362 363 return size; 364 } 365 366 static void stm32_usart_tx_dma_terminate(struct stm32_port *stm32_port) 367 { 368 dmaengine_terminate_async(stm32_port->tx_ch); 369 stm32_port->tx_dma_busy = false; 370 } 371 372 static bool stm32_usart_tx_dma_started(struct stm32_port *stm32_port) 373 { 374 /* 375 * We cannot use the function "dmaengine_tx_status" to know the 376 * status of DMA. This function does not show if the "dma complete" 377 * callback of the DMA transaction has been called. So we prefer 378 * to use "tx_dma_busy" flag to prevent dual DMA transaction at the 379 * same time. 380 */ 381 return stm32_port->tx_dma_busy; 382 } 383 384 static bool stm32_usart_tx_dma_enabled(struct stm32_port *stm32_port) 385 { 386 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 387 388 return !!(readl_relaxed(stm32_port->port.membase + ofs->cr3) & USART_CR3_DMAT); 389 } 390 391 static void stm32_usart_tx_dma_complete(void *arg) 392 { 393 struct uart_port *port = arg; 394 struct stm32_port *stm32port = to_stm32_port(port); 395 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 396 unsigned long flags; 397 398 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 399 stm32_usart_tx_dma_terminate(stm32port); 400 401 /* Let's see if we have pending data to send */ 402 spin_lock_irqsave(&port->lock, flags); 403 stm32_usart_transmit_chars(port); 404 spin_unlock_irqrestore(&port->lock, flags); 405 } 406 407 static void stm32_usart_tx_interrupt_enable(struct uart_port *port) 408 { 409 struct stm32_port *stm32_port = to_stm32_port(port); 410 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 411 412 /* 413 * Enables TX FIFO threashold irq when FIFO is enabled, 414 * or TX empty irq when FIFO is disabled 415 */ 416 if (stm32_port->fifoen && stm32_port->txftcfg >= 0) 417 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_TXFTIE); 418 else 419 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE); 420 } 421 422 static void stm32_usart_tc_interrupt_enable(struct uart_port *port) 423 { 424 struct stm32_port *stm32_port = to_stm32_port(port); 425 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 426 427 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TCIE); 428 } 429 430 static void stm32_usart_rx_dma_complete(void *arg) 431 { 432 struct uart_port *port = arg; 433 struct tty_port *tport = &port->state->port; 434 unsigned int size; 435 unsigned long flags; 436 437 spin_lock_irqsave(&port->lock, flags); 438 size = stm32_usart_receive_chars(port, false); 439 uart_unlock_and_check_sysrq_irqrestore(port, flags); 440 if (size) 441 tty_flip_buffer_push(tport); 442 } 443 444 static void stm32_usart_tx_interrupt_disable(struct uart_port *port) 445 { 446 struct stm32_port *stm32_port = to_stm32_port(port); 447 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 448 449 if (stm32_port->fifoen && stm32_port->txftcfg >= 0) 450 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE); 451 else 452 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE); 453 } 454 455 static void stm32_usart_tc_interrupt_disable(struct uart_port *port) 456 { 457 struct stm32_port *stm32_port = to_stm32_port(port); 458 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 459 460 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TCIE); 461 } 462 463 static void stm32_usart_rs485_rts_enable(struct uart_port *port) 464 { 465 struct stm32_port *stm32_port = to_stm32_port(port); 466 struct serial_rs485 *rs485conf = &port->rs485; 467 468 if (stm32_port->hw_flow_control || 469 !(rs485conf->flags & SER_RS485_ENABLED)) 470 return; 471 472 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 473 mctrl_gpio_set(stm32_port->gpios, 474 stm32_port->port.mctrl | TIOCM_RTS); 475 } else { 476 mctrl_gpio_set(stm32_port->gpios, 477 stm32_port->port.mctrl & ~TIOCM_RTS); 478 } 479 } 480 481 static void stm32_usart_rs485_rts_disable(struct uart_port *port) 482 { 483 struct stm32_port *stm32_port = to_stm32_port(port); 484 struct serial_rs485 *rs485conf = &port->rs485; 485 486 if (stm32_port->hw_flow_control || 487 !(rs485conf->flags & SER_RS485_ENABLED)) 488 return; 489 490 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 491 mctrl_gpio_set(stm32_port->gpios, 492 stm32_port->port.mctrl & ~TIOCM_RTS); 493 } else { 494 mctrl_gpio_set(stm32_port->gpios, 495 stm32_port->port.mctrl | TIOCM_RTS); 496 } 497 } 498 499 static void stm32_usart_transmit_chars_pio(struct uart_port *port) 500 { 501 struct stm32_port *stm32_port = to_stm32_port(port); 502 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 503 struct circ_buf *xmit = &port->state->xmit; 504 505 if (stm32_usart_tx_dma_enabled(stm32_port)) 506 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 507 508 while (!uart_circ_empty(xmit)) { 509 /* Check that TDR is empty before filling FIFO */ 510 if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE)) 511 break; 512 writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr); 513 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 514 port->icount.tx++; 515 } 516 517 /* rely on TXE irq (mask or unmask) for sending remaining data */ 518 if (uart_circ_empty(xmit)) 519 stm32_usart_tx_interrupt_disable(port); 520 else 521 stm32_usart_tx_interrupt_enable(port); 522 } 523 524 static void stm32_usart_transmit_chars_dma(struct uart_port *port) 525 { 526 struct stm32_port *stm32port = to_stm32_port(port); 527 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 528 struct circ_buf *xmit = &port->state->xmit; 529 struct dma_async_tx_descriptor *desc = NULL; 530 unsigned int count; 531 532 if (stm32_usart_tx_dma_started(stm32port)) { 533 if (!stm32_usart_tx_dma_enabled(stm32port)) 534 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT); 535 return; 536 } 537 538 count = uart_circ_chars_pending(xmit); 539 540 if (count > TX_BUF_L) 541 count = TX_BUF_L; 542 543 if (xmit->tail < xmit->head) { 544 memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count); 545 } else { 546 size_t one = UART_XMIT_SIZE - xmit->tail; 547 size_t two; 548 549 if (one > count) 550 one = count; 551 two = count - one; 552 553 memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one); 554 if (two) 555 memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two); 556 } 557 558 desc = dmaengine_prep_slave_single(stm32port->tx_ch, 559 stm32port->tx_dma_buf, 560 count, 561 DMA_MEM_TO_DEV, 562 DMA_PREP_INTERRUPT); 563 564 if (!desc) 565 goto fallback_err; 566 567 /* 568 * Set "tx_dma_busy" flag. This flag will be released when 569 * dmaengine_terminate_async will be called. This flag helps 570 * transmit_chars_dma not to start another DMA transaction 571 * if the callback of the previous is not yet called. 572 */ 573 stm32port->tx_dma_busy = true; 574 575 desc->callback = stm32_usart_tx_dma_complete; 576 desc->callback_param = port; 577 578 /* Push current DMA TX transaction in the pending queue */ 579 if (dma_submit_error(dmaengine_submit(desc))) { 580 /* dma no yet started, safe to free resources */ 581 stm32_usart_tx_dma_terminate(stm32port); 582 goto fallback_err; 583 } 584 585 /* Issue pending DMA TX requests */ 586 dma_async_issue_pending(stm32port->tx_ch); 587 588 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT); 589 590 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); 591 port->icount.tx += count; 592 return; 593 594 fallback_err: 595 stm32_usart_transmit_chars_pio(port); 596 } 597 598 static void stm32_usart_transmit_chars(struct uart_port *port) 599 { 600 struct stm32_port *stm32_port = to_stm32_port(port); 601 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 602 struct circ_buf *xmit = &port->state->xmit; 603 u32 isr; 604 int ret; 605 606 if (!stm32_port->hw_flow_control && 607 port->rs485.flags & SER_RS485_ENABLED) { 608 stm32_port->txdone = false; 609 stm32_usart_tc_interrupt_disable(port); 610 stm32_usart_rs485_rts_enable(port); 611 } 612 613 if (port->x_char) { 614 if (stm32_usart_tx_dma_started(stm32_port) && 615 stm32_usart_tx_dma_enabled(stm32_port)) 616 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 617 618 /* Check that TDR is empty before filling FIFO */ 619 ret = 620 readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, 621 isr, 622 (isr & USART_SR_TXE), 623 10, 1000); 624 if (ret) 625 dev_warn(port->dev, "1 character may be erased\n"); 626 627 writel_relaxed(port->x_char, port->membase + ofs->tdr); 628 port->x_char = 0; 629 port->icount.tx++; 630 if (stm32_usart_tx_dma_started(stm32_port)) 631 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT); 632 return; 633 } 634 635 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { 636 stm32_usart_tx_interrupt_disable(port); 637 return; 638 } 639 640 if (ofs->icr == UNDEF_REG) 641 stm32_usart_clr_bits(port, ofs->isr, USART_SR_TC); 642 else 643 writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr); 644 645 if (stm32_port->tx_ch) 646 stm32_usart_transmit_chars_dma(port); 647 else 648 stm32_usart_transmit_chars_pio(port); 649 650 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 651 uart_write_wakeup(port); 652 653 if (uart_circ_empty(xmit)) { 654 stm32_usart_tx_interrupt_disable(port); 655 if (!stm32_port->hw_flow_control && 656 port->rs485.flags & SER_RS485_ENABLED) { 657 stm32_port->txdone = true; 658 stm32_usart_tc_interrupt_enable(port); 659 } 660 } 661 } 662 663 static irqreturn_t stm32_usart_interrupt(int irq, void *ptr) 664 { 665 struct uart_port *port = ptr; 666 struct tty_port *tport = &port->state->port; 667 struct stm32_port *stm32_port = to_stm32_port(port); 668 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 669 u32 sr; 670 unsigned int size; 671 672 sr = readl_relaxed(port->membase + ofs->isr); 673 674 if (!stm32_port->hw_flow_control && 675 port->rs485.flags & SER_RS485_ENABLED && 676 (sr & USART_SR_TC)) { 677 stm32_usart_tc_interrupt_disable(port); 678 stm32_usart_rs485_rts_disable(port); 679 } 680 681 if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG) 682 writel_relaxed(USART_ICR_RTOCF, 683 port->membase + ofs->icr); 684 685 if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) { 686 /* Clear wake up flag and disable wake up interrupt */ 687 writel_relaxed(USART_ICR_WUCF, 688 port->membase + ofs->icr); 689 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE); 690 if (irqd_is_wakeup_set(irq_get_irq_data(port->irq))) 691 pm_wakeup_event(tport->tty->dev, 0); 692 } 693 694 /* 695 * rx errors in dma mode has to be handled ASAP to avoid overrun as the DMA request 696 * line has been masked by HW and rx data are stacking in FIFO. 697 */ 698 if (!stm32_port->throttled) { 699 if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_enabled(port)) || 700 ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_enabled(port))) { 701 spin_lock(&port->lock); 702 size = stm32_usart_receive_chars(port, false); 703 uart_unlock_and_check_sysrq(port); 704 if (size) 705 tty_flip_buffer_push(tport); 706 } 707 } 708 709 if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) { 710 spin_lock(&port->lock); 711 stm32_usart_transmit_chars(port); 712 spin_unlock(&port->lock); 713 } 714 715 if (stm32_usart_rx_dma_enabled(port)) 716 return IRQ_WAKE_THREAD; 717 else 718 return IRQ_HANDLED; 719 } 720 721 static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr) 722 { 723 struct uart_port *port = ptr; 724 struct tty_port *tport = &port->state->port; 725 struct stm32_port *stm32_port = to_stm32_port(port); 726 unsigned int size; 727 unsigned long flags; 728 729 /* Receiver timeout irq for DMA RX */ 730 if (!stm32_port->throttled) { 731 spin_lock_irqsave(&port->lock, flags); 732 size = stm32_usart_receive_chars(port, false); 733 uart_unlock_and_check_sysrq_irqrestore(port, flags); 734 if (size) 735 tty_flip_buffer_push(tport); 736 } 737 738 return IRQ_HANDLED; 739 } 740 741 static unsigned int stm32_usart_tx_empty(struct uart_port *port) 742 { 743 struct stm32_port *stm32_port = to_stm32_port(port); 744 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 745 746 if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC) 747 return TIOCSER_TEMT; 748 749 return 0; 750 } 751 752 static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl) 753 { 754 struct stm32_port *stm32_port = to_stm32_port(port); 755 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 756 757 if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS)) 758 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_RTSE); 759 else 760 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_RTSE); 761 762 mctrl_gpio_set(stm32_port->gpios, mctrl); 763 } 764 765 static unsigned int stm32_usart_get_mctrl(struct uart_port *port) 766 { 767 struct stm32_port *stm32_port = to_stm32_port(port); 768 unsigned int ret; 769 770 /* This routine is used to get signals of: DCD, DSR, RI, and CTS */ 771 ret = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; 772 773 return mctrl_gpio_get(stm32_port->gpios, &ret); 774 } 775 776 static void stm32_usart_enable_ms(struct uart_port *port) 777 { 778 mctrl_gpio_enable_ms(to_stm32_port(port)->gpios); 779 } 780 781 static void stm32_usart_disable_ms(struct uart_port *port) 782 { 783 mctrl_gpio_disable_ms(to_stm32_port(port)->gpios); 784 } 785 786 /* Transmit stop */ 787 static void stm32_usart_stop_tx(struct uart_port *port) 788 { 789 struct stm32_port *stm32_port = to_stm32_port(port); 790 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 791 792 stm32_usart_tx_interrupt_disable(port); 793 if (stm32_usart_tx_dma_started(stm32_port) && stm32_usart_tx_dma_enabled(stm32_port)) 794 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 795 796 stm32_usart_rs485_rts_disable(port); 797 } 798 799 /* There are probably characters waiting to be transmitted. */ 800 static void stm32_usart_start_tx(struct uart_port *port) 801 { 802 struct circ_buf *xmit = &port->state->xmit; 803 804 if (uart_circ_empty(xmit) && !port->x_char) { 805 stm32_usart_rs485_rts_disable(port); 806 return; 807 } 808 809 stm32_usart_rs485_rts_enable(port); 810 811 stm32_usart_transmit_chars(port); 812 } 813 814 /* Flush the transmit buffer. */ 815 static void stm32_usart_flush_buffer(struct uart_port *port) 816 { 817 struct stm32_port *stm32_port = to_stm32_port(port); 818 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 819 820 if (stm32_port->tx_ch) { 821 stm32_usart_tx_dma_terminate(stm32_port); 822 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 823 } 824 } 825 826 /* Throttle the remote when input buffer is about to overflow. */ 827 static void stm32_usart_throttle(struct uart_port *port) 828 { 829 struct stm32_port *stm32_port = to_stm32_port(port); 830 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 831 unsigned long flags; 832 833 spin_lock_irqsave(&port->lock, flags); 834 835 /* 836 * Disable DMA request line if enabled, so the RX data gets queued into the FIFO. 837 * Hardware flow control is triggered when RX FIFO is full. 838 */ 839 if (stm32_usart_rx_dma_enabled(port)) 840 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 841 842 stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq); 843 if (stm32_port->cr3_irq) 844 stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq); 845 846 stm32_port->throttled = true; 847 spin_unlock_irqrestore(&port->lock, flags); 848 } 849 850 /* Unthrottle the remote, the input buffer can now accept data. */ 851 static void stm32_usart_unthrottle(struct uart_port *port) 852 { 853 struct stm32_port *stm32_port = to_stm32_port(port); 854 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 855 unsigned long flags; 856 857 spin_lock_irqsave(&port->lock, flags); 858 stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq); 859 if (stm32_port->cr3_irq) 860 stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq); 861 862 /* 863 * Switch back to DMA mode (re-enable DMA request line). 864 * Hardware flow control is stopped when FIFO is not full any more. 865 */ 866 if (stm32_port->rx_ch) 867 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR); 868 869 stm32_port->throttled = false; 870 spin_unlock_irqrestore(&port->lock, flags); 871 } 872 873 /* Receive stop */ 874 static void stm32_usart_stop_rx(struct uart_port *port) 875 { 876 struct stm32_port *stm32_port = to_stm32_port(port); 877 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 878 879 /* Disable DMA request line. */ 880 if (stm32_port->rx_ch) 881 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 882 883 stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq); 884 if (stm32_port->cr3_irq) 885 stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq); 886 } 887 888 /* Handle breaks - ignored by us */ 889 static void stm32_usart_break_ctl(struct uart_port *port, int break_state) 890 { 891 } 892 893 static int stm32_usart_start_rx_dma_cyclic(struct uart_port *port) 894 { 895 struct stm32_port *stm32_port = to_stm32_port(port); 896 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 897 struct dma_async_tx_descriptor *desc; 898 int ret; 899 900 stm32_port->last_res = RX_BUF_L; 901 /* Prepare a DMA cyclic transaction */ 902 desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch, 903 stm32_port->rx_dma_buf, 904 RX_BUF_L, RX_BUF_P, 905 DMA_DEV_TO_MEM, 906 DMA_PREP_INTERRUPT); 907 if (!desc) { 908 dev_err(port->dev, "rx dma prep cyclic failed\n"); 909 return -ENODEV; 910 } 911 912 desc->callback = stm32_usart_rx_dma_complete; 913 desc->callback_param = port; 914 915 /* Push current DMA transaction in the pending queue */ 916 ret = dma_submit_error(dmaengine_submit(desc)); 917 if (ret) { 918 dmaengine_terminate_sync(stm32_port->rx_ch); 919 return ret; 920 } 921 922 /* Issue pending DMA requests */ 923 dma_async_issue_pending(stm32_port->rx_ch); 924 925 /* 926 * DMA request line not re-enabled at resume when port is throttled. 927 * It will be re-enabled by unthrottle ops. 928 */ 929 if (!stm32_port->throttled) 930 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR); 931 932 return 0; 933 } 934 935 static int stm32_usart_startup(struct uart_port *port) 936 { 937 struct stm32_port *stm32_port = to_stm32_port(port); 938 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 939 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 940 const char *name = to_platform_device(port->dev)->name; 941 u32 val; 942 int ret; 943 944 ret = request_threaded_irq(port->irq, stm32_usart_interrupt, 945 stm32_usart_threaded_interrupt, 946 IRQF_ONESHOT | IRQF_NO_SUSPEND, 947 name, port); 948 if (ret) 949 return ret; 950 951 if (stm32_port->swap) { 952 val = readl_relaxed(port->membase + ofs->cr2); 953 val |= USART_CR2_SWAP; 954 writel_relaxed(val, port->membase + ofs->cr2); 955 } 956 957 /* RX FIFO Flush */ 958 if (ofs->rqr != UNDEF_REG) 959 writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr); 960 961 if (stm32_port->rx_ch) { 962 ret = stm32_usart_start_rx_dma_cyclic(port); 963 if (ret) { 964 free_irq(port->irq, port); 965 return ret; 966 } 967 } 968 969 /* RX enabling */ 970 val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit); 971 stm32_usart_set_bits(port, ofs->cr1, val); 972 973 return 0; 974 } 975 976 static void stm32_usart_shutdown(struct uart_port *port) 977 { 978 struct stm32_port *stm32_port = to_stm32_port(port); 979 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 980 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 981 u32 val, isr; 982 int ret; 983 984 if (stm32_usart_tx_dma_enabled(stm32_port)) 985 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 986 987 if (stm32_usart_tx_dma_started(stm32_port)) 988 stm32_usart_tx_dma_terminate(stm32_port); 989 990 /* Disable modem control interrupts */ 991 stm32_usart_disable_ms(port); 992 993 val = USART_CR1_TXEIE | USART_CR1_TE; 994 val |= stm32_port->cr1_irq | USART_CR1_RE; 995 val |= BIT(cfg->uart_enable_bit); 996 if (stm32_port->fifoen) 997 val |= USART_CR1_FIFOEN; 998 999 ret = readl_relaxed_poll_timeout(port->membase + ofs->isr, 1000 isr, (isr & USART_SR_TC), 1001 10, 100000); 1002 1003 /* Send the TC error message only when ISR_TC is not set */ 1004 if (ret) 1005 dev_err(port->dev, "Transmission is not complete\n"); 1006 1007 /* Disable RX DMA. */ 1008 if (stm32_port->rx_ch) 1009 dmaengine_terminate_async(stm32_port->rx_ch); 1010 1011 /* flush RX & TX FIFO */ 1012 if (ofs->rqr != UNDEF_REG) 1013 writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ, 1014 port->membase + ofs->rqr); 1015 1016 stm32_usart_clr_bits(port, ofs->cr1, val); 1017 1018 free_irq(port->irq, port); 1019 } 1020 1021 static void stm32_usart_set_termios(struct uart_port *port, 1022 struct ktermios *termios, 1023 struct ktermios *old) 1024 { 1025 struct stm32_port *stm32_port = to_stm32_port(port); 1026 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1027 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1028 struct serial_rs485 *rs485conf = &port->rs485; 1029 unsigned int baud, bits; 1030 u32 usartdiv, mantissa, fraction, oversampling; 1031 tcflag_t cflag = termios->c_cflag; 1032 u32 cr1, cr2, cr3, isr; 1033 unsigned long flags; 1034 int ret; 1035 1036 if (!stm32_port->hw_flow_control) 1037 cflag &= ~CRTSCTS; 1038 1039 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8); 1040 1041 spin_lock_irqsave(&port->lock, flags); 1042 1043 ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, 1044 isr, 1045 (isr & USART_SR_TC), 1046 10, 100000); 1047 1048 /* Send the TC error message only when ISR_TC is not set. */ 1049 if (ret) 1050 dev_err(port->dev, "Transmission is not complete\n"); 1051 1052 /* Stop serial port and reset value */ 1053 writel_relaxed(0, port->membase + ofs->cr1); 1054 1055 /* flush RX & TX FIFO */ 1056 if (ofs->rqr != UNDEF_REG) 1057 writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ, 1058 port->membase + ofs->rqr); 1059 1060 cr1 = USART_CR1_TE | USART_CR1_RE; 1061 if (stm32_port->fifoen) 1062 cr1 |= USART_CR1_FIFOEN; 1063 cr2 = stm32_port->swap ? USART_CR2_SWAP : 0; 1064 1065 /* Tx and RX FIFO configuration */ 1066 cr3 = readl_relaxed(port->membase + ofs->cr3); 1067 cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE; 1068 if (stm32_port->fifoen) { 1069 if (stm32_port->txftcfg >= 0) 1070 cr3 |= stm32_port->txftcfg << USART_CR3_TXFTCFG_SHIFT; 1071 if (stm32_port->rxftcfg >= 0) 1072 cr3 |= stm32_port->rxftcfg << USART_CR3_RXFTCFG_SHIFT; 1073 } 1074 1075 if (cflag & CSTOPB) 1076 cr2 |= USART_CR2_STOP_2B; 1077 1078 bits = tty_get_char_size(cflag); 1079 stm32_port->rdr_mask = (BIT(bits) - 1); 1080 1081 if (cflag & PARENB) { 1082 bits++; 1083 cr1 |= USART_CR1_PCE; 1084 } 1085 1086 /* 1087 * Word length configuration: 1088 * CS8 + parity, 9 bits word aka [M1:M0] = 0b01 1089 * CS7 or (CS6 + parity), 7 bits word aka [M1:M0] = 0b10 1090 * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00 1091 * M0 and M1 already cleared by cr1 initialization. 1092 */ 1093 if (bits == 9) { 1094 cr1 |= USART_CR1_M0; 1095 } else if ((bits == 7) && cfg->has_7bits_data) { 1096 cr1 |= USART_CR1_M1; 1097 } else if (bits != 8) { 1098 dev_dbg(port->dev, "Unsupported data bits config: %u bits\n" 1099 , bits); 1100 cflag &= ~CSIZE; 1101 cflag |= CS8; 1102 termios->c_cflag = cflag; 1103 bits = 8; 1104 if (cflag & PARENB) { 1105 bits++; 1106 cr1 |= USART_CR1_M0; 1107 } 1108 } 1109 1110 if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch || 1111 (stm32_port->fifoen && 1112 stm32_port->rxftcfg >= 0))) { 1113 if (cflag & CSTOPB) 1114 bits = bits + 3; /* 1 start bit + 2 stop bits */ 1115 else 1116 bits = bits + 2; /* 1 start bit + 1 stop bit */ 1117 1118 /* RX timeout irq to occur after last stop bit + bits */ 1119 stm32_port->cr1_irq = USART_CR1_RTOIE; 1120 writel_relaxed(bits, port->membase + ofs->rtor); 1121 cr2 |= USART_CR2_RTOEN; 1122 /* 1123 * Enable fifo threshold irq in two cases, either when there is no DMA, or when 1124 * wake up over usart, from low power until the DMA gets re-enabled by resume. 1125 */ 1126 stm32_port->cr3_irq = USART_CR3_RXFTIE; 1127 } 1128 1129 cr1 |= stm32_port->cr1_irq; 1130 cr3 |= stm32_port->cr3_irq; 1131 1132 if (cflag & PARODD) 1133 cr1 |= USART_CR1_PS; 1134 1135 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); 1136 if (cflag & CRTSCTS) { 1137 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; 1138 cr3 |= USART_CR3_CTSE | USART_CR3_RTSE; 1139 } 1140 1141 usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud); 1142 1143 /* 1144 * The USART supports 16 or 8 times oversampling. 1145 * By default we prefer 16 times oversampling, so that the receiver 1146 * has a better tolerance to clock deviations. 1147 * 8 times oversampling is only used to achieve higher speeds. 1148 */ 1149 if (usartdiv < 16) { 1150 oversampling = 8; 1151 cr1 |= USART_CR1_OVER8; 1152 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8); 1153 } else { 1154 oversampling = 16; 1155 cr1 &= ~USART_CR1_OVER8; 1156 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8); 1157 } 1158 1159 mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT; 1160 fraction = usartdiv % oversampling; 1161 writel_relaxed(mantissa | fraction, port->membase + ofs->brr); 1162 1163 uart_update_timeout(port, cflag, baud); 1164 1165 port->read_status_mask = USART_SR_ORE; 1166 if (termios->c_iflag & INPCK) 1167 port->read_status_mask |= USART_SR_PE | USART_SR_FE; 1168 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) 1169 port->read_status_mask |= USART_SR_FE; 1170 1171 /* Characters to ignore */ 1172 port->ignore_status_mask = 0; 1173 if (termios->c_iflag & IGNPAR) 1174 port->ignore_status_mask = USART_SR_PE | USART_SR_FE; 1175 if (termios->c_iflag & IGNBRK) { 1176 port->ignore_status_mask |= USART_SR_FE; 1177 /* 1178 * If we're ignoring parity and break indicators, 1179 * ignore overruns too (for real raw support). 1180 */ 1181 if (termios->c_iflag & IGNPAR) 1182 port->ignore_status_mask |= USART_SR_ORE; 1183 } 1184 1185 /* Ignore all characters if CREAD is not set */ 1186 if ((termios->c_cflag & CREAD) == 0) 1187 port->ignore_status_mask |= USART_SR_DUMMY_RX; 1188 1189 if (stm32_port->rx_ch) { 1190 /* 1191 * Setup DMA to collect only valid data and enable error irqs. 1192 * This also enables break reception when using DMA. 1193 */ 1194 cr1 |= USART_CR1_PEIE; 1195 cr3 |= USART_CR3_EIE; 1196 cr3 |= USART_CR3_DMAR; 1197 cr3 |= USART_CR3_DDRE; 1198 } 1199 1200 if (rs485conf->flags & SER_RS485_ENABLED) { 1201 stm32_usart_config_reg_rs485(&cr1, &cr3, 1202 rs485conf->delay_rts_before_send, 1203 rs485conf->delay_rts_after_send, 1204 baud); 1205 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 1206 cr3 &= ~USART_CR3_DEP; 1207 rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND; 1208 } else { 1209 cr3 |= USART_CR3_DEP; 1210 rs485conf->flags |= SER_RS485_RTS_AFTER_SEND; 1211 } 1212 1213 } else { 1214 cr3 &= ~(USART_CR3_DEM | USART_CR3_DEP); 1215 cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); 1216 } 1217 1218 /* Configure wake up from low power on start bit detection */ 1219 if (stm32_port->wakeup_src) { 1220 cr3 &= ~USART_CR3_WUS_MASK; 1221 cr3 |= USART_CR3_WUS_START_BIT; 1222 } 1223 1224 writel_relaxed(cr3, port->membase + ofs->cr3); 1225 writel_relaxed(cr2, port->membase + ofs->cr2); 1226 writel_relaxed(cr1, port->membase + ofs->cr1); 1227 1228 stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 1229 spin_unlock_irqrestore(&port->lock, flags); 1230 1231 /* Handle modem control interrupts */ 1232 if (UART_ENABLE_MS(port, termios->c_cflag)) 1233 stm32_usart_enable_ms(port); 1234 else 1235 stm32_usart_disable_ms(port); 1236 } 1237 1238 static const char *stm32_usart_type(struct uart_port *port) 1239 { 1240 return (port->type == PORT_STM32) ? DRIVER_NAME : NULL; 1241 } 1242 1243 static void stm32_usart_release_port(struct uart_port *port) 1244 { 1245 } 1246 1247 static int stm32_usart_request_port(struct uart_port *port) 1248 { 1249 return 0; 1250 } 1251 1252 static void stm32_usart_config_port(struct uart_port *port, int flags) 1253 { 1254 if (flags & UART_CONFIG_TYPE) 1255 port->type = PORT_STM32; 1256 } 1257 1258 static int 1259 stm32_usart_verify_port(struct uart_port *port, struct serial_struct *ser) 1260 { 1261 /* No user changeable parameters */ 1262 return -EINVAL; 1263 } 1264 1265 static void stm32_usart_pm(struct uart_port *port, unsigned int state, 1266 unsigned int oldstate) 1267 { 1268 struct stm32_port *stm32port = container_of(port, 1269 struct stm32_port, port); 1270 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1271 const struct stm32_usart_config *cfg = &stm32port->info->cfg; 1272 unsigned long flags; 1273 1274 switch (state) { 1275 case UART_PM_STATE_ON: 1276 pm_runtime_get_sync(port->dev); 1277 break; 1278 case UART_PM_STATE_OFF: 1279 spin_lock_irqsave(&port->lock, flags); 1280 stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 1281 spin_unlock_irqrestore(&port->lock, flags); 1282 pm_runtime_put_sync(port->dev); 1283 break; 1284 } 1285 } 1286 1287 #if defined(CONFIG_CONSOLE_POLL) 1288 1289 /* Callbacks for characters polling in debug context (i.e. KGDB). */ 1290 static int stm32_usart_poll_init(struct uart_port *port) 1291 { 1292 struct stm32_port *stm32_port = to_stm32_port(port); 1293 1294 return clk_prepare_enable(stm32_port->clk); 1295 } 1296 1297 static int stm32_usart_poll_get_char(struct uart_port *port) 1298 { 1299 struct stm32_port *stm32_port = to_stm32_port(port); 1300 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1301 1302 if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_RXNE)) 1303 return NO_POLL_CHAR; 1304 1305 return readl_relaxed(port->membase + ofs->rdr) & stm32_port->rdr_mask; 1306 } 1307 1308 static void stm32_usart_poll_put_char(struct uart_port *port, unsigned char ch) 1309 { 1310 stm32_usart_console_putchar(port, ch); 1311 } 1312 #endif /* CONFIG_CONSOLE_POLL */ 1313 1314 static const struct uart_ops stm32_uart_ops = { 1315 .tx_empty = stm32_usart_tx_empty, 1316 .set_mctrl = stm32_usart_set_mctrl, 1317 .get_mctrl = stm32_usart_get_mctrl, 1318 .stop_tx = stm32_usart_stop_tx, 1319 .start_tx = stm32_usart_start_tx, 1320 .throttle = stm32_usart_throttle, 1321 .unthrottle = stm32_usart_unthrottle, 1322 .stop_rx = stm32_usart_stop_rx, 1323 .enable_ms = stm32_usart_enable_ms, 1324 .break_ctl = stm32_usart_break_ctl, 1325 .startup = stm32_usart_startup, 1326 .shutdown = stm32_usart_shutdown, 1327 .flush_buffer = stm32_usart_flush_buffer, 1328 .set_termios = stm32_usart_set_termios, 1329 .pm = stm32_usart_pm, 1330 .type = stm32_usart_type, 1331 .release_port = stm32_usart_release_port, 1332 .request_port = stm32_usart_request_port, 1333 .config_port = stm32_usart_config_port, 1334 .verify_port = stm32_usart_verify_port, 1335 #if defined(CONFIG_CONSOLE_POLL) 1336 .poll_init = stm32_usart_poll_init, 1337 .poll_get_char = stm32_usart_poll_get_char, 1338 .poll_put_char = stm32_usart_poll_put_char, 1339 #endif /* CONFIG_CONSOLE_POLL */ 1340 }; 1341 1342 /* 1343 * STM32H7 RX & TX FIFO threshold configuration (CR3 RXFTCFG / TXFTCFG) 1344 * Note: 1 isn't a valid value in RXFTCFG / TXFTCFG. In this case, 1345 * RXNEIE / TXEIE can be used instead of threshold irqs: RXFTIE / TXFTIE. 1346 * So, RXFTCFG / TXFTCFG bitfields values are encoded as array index + 1. 1347 */ 1348 static const u32 stm32h7_usart_fifo_thresh_cfg[] = { 1, 2, 4, 8, 12, 14, 16 }; 1349 1350 static void stm32_usart_get_ftcfg(struct platform_device *pdev, const char *p, 1351 int *ftcfg) 1352 { 1353 u32 bytes, i; 1354 1355 /* DT option to get RX & TX FIFO threshold (default to 8 bytes) */ 1356 if (of_property_read_u32(pdev->dev.of_node, p, &bytes)) 1357 bytes = 8; 1358 1359 for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++) 1360 if (stm32h7_usart_fifo_thresh_cfg[i] >= bytes) 1361 break; 1362 if (i >= ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg)) 1363 i = ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg) - 1; 1364 1365 dev_dbg(&pdev->dev, "%s set to %d bytes\n", p, 1366 stm32h7_usart_fifo_thresh_cfg[i]); 1367 1368 /* Provide FIFO threshold ftcfg (1 is invalid: threshold irq unused) */ 1369 if (i) 1370 *ftcfg = i - 1; 1371 else 1372 *ftcfg = -EINVAL; 1373 } 1374 1375 static void stm32_usart_deinit_port(struct stm32_port *stm32port) 1376 { 1377 clk_disable_unprepare(stm32port->clk); 1378 } 1379 1380 static int stm32_usart_init_port(struct stm32_port *stm32port, 1381 struct platform_device *pdev) 1382 { 1383 struct uart_port *port = &stm32port->port; 1384 struct resource *res; 1385 int ret, irq; 1386 1387 irq = platform_get_irq(pdev, 0); 1388 if (irq < 0) 1389 return irq; 1390 1391 port->iotype = UPIO_MEM; 1392 port->flags = UPF_BOOT_AUTOCONF; 1393 port->ops = &stm32_uart_ops; 1394 port->dev = &pdev->dev; 1395 port->fifosize = stm32port->info->cfg.fifosize; 1396 port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE); 1397 port->irq = irq; 1398 port->rs485_config = stm32_usart_config_rs485; 1399 1400 ret = stm32_usart_init_rs485(port, pdev); 1401 if (ret) 1402 return ret; 1403 1404 stm32port->wakeup_src = stm32port->info->cfg.has_wakeup && 1405 of_property_read_bool(pdev->dev.of_node, "wakeup-source"); 1406 1407 stm32port->swap = stm32port->info->cfg.has_swap && 1408 of_property_read_bool(pdev->dev.of_node, "rx-tx-swap"); 1409 1410 stm32port->fifoen = stm32port->info->cfg.has_fifo; 1411 if (stm32port->fifoen) { 1412 stm32_usart_get_ftcfg(pdev, "rx-threshold", 1413 &stm32port->rxftcfg); 1414 stm32_usart_get_ftcfg(pdev, "tx-threshold", 1415 &stm32port->txftcfg); 1416 } 1417 1418 port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 1419 if (IS_ERR(port->membase)) 1420 return PTR_ERR(port->membase); 1421 port->mapbase = res->start; 1422 1423 spin_lock_init(&port->lock); 1424 1425 stm32port->clk = devm_clk_get(&pdev->dev, NULL); 1426 if (IS_ERR(stm32port->clk)) 1427 return PTR_ERR(stm32port->clk); 1428 1429 /* Ensure that clk rate is correct by enabling the clk */ 1430 ret = clk_prepare_enable(stm32port->clk); 1431 if (ret) 1432 return ret; 1433 1434 stm32port->port.uartclk = clk_get_rate(stm32port->clk); 1435 if (!stm32port->port.uartclk) { 1436 ret = -EINVAL; 1437 goto err_clk; 1438 } 1439 1440 stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0); 1441 if (IS_ERR(stm32port->gpios)) { 1442 ret = PTR_ERR(stm32port->gpios); 1443 goto err_clk; 1444 } 1445 1446 /* 1447 * Both CTS/RTS gpios and "st,hw-flow-ctrl" (deprecated) or "uart-has-rtscts" 1448 * properties should not be specified. 1449 */ 1450 if (stm32port->hw_flow_control) { 1451 if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) || 1452 mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) { 1453 dev_err(&pdev->dev, "Conflicting RTS/CTS config\n"); 1454 ret = -EINVAL; 1455 goto err_clk; 1456 } 1457 } 1458 1459 return ret; 1460 1461 err_clk: 1462 clk_disable_unprepare(stm32port->clk); 1463 1464 return ret; 1465 } 1466 1467 static struct stm32_port *stm32_usart_of_get_port(struct platform_device *pdev) 1468 { 1469 struct device_node *np = pdev->dev.of_node; 1470 int id; 1471 1472 if (!np) 1473 return NULL; 1474 1475 id = of_alias_get_id(np, "serial"); 1476 if (id < 0) { 1477 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", id); 1478 return NULL; 1479 } 1480 1481 if (WARN_ON(id >= STM32_MAX_PORTS)) 1482 return NULL; 1483 1484 stm32_ports[id].hw_flow_control = 1485 of_property_read_bool (np, "st,hw-flow-ctrl") /*deprecated*/ || 1486 of_property_read_bool (np, "uart-has-rtscts"); 1487 stm32_ports[id].port.line = id; 1488 stm32_ports[id].cr1_irq = USART_CR1_RXNEIE; 1489 stm32_ports[id].cr3_irq = 0; 1490 stm32_ports[id].last_res = RX_BUF_L; 1491 return &stm32_ports[id]; 1492 } 1493 1494 #ifdef CONFIG_OF 1495 static const struct of_device_id stm32_match[] = { 1496 { .compatible = "st,stm32-uart", .data = &stm32f4_info}, 1497 { .compatible = "st,stm32f7-uart", .data = &stm32f7_info}, 1498 { .compatible = "st,stm32h7-uart", .data = &stm32h7_info}, 1499 {}, 1500 }; 1501 1502 MODULE_DEVICE_TABLE(of, stm32_match); 1503 #endif 1504 1505 static void stm32_usart_of_dma_rx_remove(struct stm32_port *stm32port, 1506 struct platform_device *pdev) 1507 { 1508 if (stm32port->rx_buf) 1509 dma_free_coherent(&pdev->dev, RX_BUF_L, stm32port->rx_buf, 1510 stm32port->rx_dma_buf); 1511 } 1512 1513 static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port, 1514 struct platform_device *pdev) 1515 { 1516 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1517 struct uart_port *port = &stm32port->port; 1518 struct device *dev = &pdev->dev; 1519 struct dma_slave_config config; 1520 int ret; 1521 1522 /* 1523 * Using DMA and threaded handler for the console could lead to 1524 * deadlocks. 1525 */ 1526 if (uart_console(port)) 1527 return -ENODEV; 1528 1529 stm32port->rx_buf = dma_alloc_coherent(dev, RX_BUF_L, 1530 &stm32port->rx_dma_buf, 1531 GFP_KERNEL); 1532 if (!stm32port->rx_buf) 1533 return -ENOMEM; 1534 1535 /* Configure DMA channel */ 1536 memset(&config, 0, sizeof(config)); 1537 config.src_addr = port->mapbase + ofs->rdr; 1538 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1539 1540 ret = dmaengine_slave_config(stm32port->rx_ch, &config); 1541 if (ret < 0) { 1542 dev_err(dev, "rx dma channel config failed\n"); 1543 stm32_usart_of_dma_rx_remove(stm32port, pdev); 1544 return ret; 1545 } 1546 1547 return 0; 1548 } 1549 1550 static void stm32_usart_of_dma_tx_remove(struct stm32_port *stm32port, 1551 struct platform_device *pdev) 1552 { 1553 if (stm32port->tx_buf) 1554 dma_free_coherent(&pdev->dev, TX_BUF_L, stm32port->tx_buf, 1555 stm32port->tx_dma_buf); 1556 } 1557 1558 static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port, 1559 struct platform_device *pdev) 1560 { 1561 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1562 struct uart_port *port = &stm32port->port; 1563 struct device *dev = &pdev->dev; 1564 struct dma_slave_config config; 1565 int ret; 1566 1567 stm32port->tx_buf = dma_alloc_coherent(dev, TX_BUF_L, 1568 &stm32port->tx_dma_buf, 1569 GFP_KERNEL); 1570 if (!stm32port->tx_buf) 1571 return -ENOMEM; 1572 1573 /* Configure DMA channel */ 1574 memset(&config, 0, sizeof(config)); 1575 config.dst_addr = port->mapbase + ofs->tdr; 1576 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1577 1578 ret = dmaengine_slave_config(stm32port->tx_ch, &config); 1579 if (ret < 0) { 1580 dev_err(dev, "tx dma channel config failed\n"); 1581 stm32_usart_of_dma_tx_remove(stm32port, pdev); 1582 return ret; 1583 } 1584 1585 return 0; 1586 } 1587 1588 static int stm32_usart_serial_probe(struct platform_device *pdev) 1589 { 1590 struct stm32_port *stm32port; 1591 int ret; 1592 1593 stm32port = stm32_usart_of_get_port(pdev); 1594 if (!stm32port) 1595 return -ENODEV; 1596 1597 stm32port->info = of_device_get_match_data(&pdev->dev); 1598 if (!stm32port->info) 1599 return -EINVAL; 1600 1601 ret = stm32_usart_init_port(stm32port, pdev); 1602 if (ret) 1603 return ret; 1604 1605 if (stm32port->wakeup_src) { 1606 device_set_wakeup_capable(&pdev->dev, true); 1607 ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq); 1608 if (ret) 1609 goto err_deinit_port; 1610 } 1611 1612 stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx"); 1613 if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER) { 1614 ret = -EPROBE_DEFER; 1615 goto err_wakeirq; 1616 } 1617 /* Fall back in interrupt mode for any non-deferral error */ 1618 if (IS_ERR(stm32port->rx_ch)) 1619 stm32port->rx_ch = NULL; 1620 1621 stm32port->tx_ch = dma_request_chan(&pdev->dev, "tx"); 1622 if (PTR_ERR(stm32port->tx_ch) == -EPROBE_DEFER) { 1623 ret = -EPROBE_DEFER; 1624 goto err_dma_rx; 1625 } 1626 /* Fall back in interrupt mode for any non-deferral error */ 1627 if (IS_ERR(stm32port->tx_ch)) 1628 stm32port->tx_ch = NULL; 1629 1630 if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) { 1631 /* Fall back in interrupt mode */ 1632 dma_release_channel(stm32port->rx_ch); 1633 stm32port->rx_ch = NULL; 1634 } 1635 1636 if (stm32port->tx_ch && stm32_usart_of_dma_tx_probe(stm32port, pdev)) { 1637 /* Fall back in interrupt mode */ 1638 dma_release_channel(stm32port->tx_ch); 1639 stm32port->tx_ch = NULL; 1640 } 1641 1642 if (!stm32port->rx_ch) 1643 dev_info(&pdev->dev, "interrupt mode for rx (no dma)\n"); 1644 if (!stm32port->tx_ch) 1645 dev_info(&pdev->dev, "interrupt mode for tx (no dma)\n"); 1646 1647 platform_set_drvdata(pdev, &stm32port->port); 1648 1649 pm_runtime_get_noresume(&pdev->dev); 1650 pm_runtime_set_active(&pdev->dev); 1651 pm_runtime_enable(&pdev->dev); 1652 1653 ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port); 1654 if (ret) 1655 goto err_port; 1656 1657 pm_runtime_put_sync(&pdev->dev); 1658 1659 return 0; 1660 1661 err_port: 1662 pm_runtime_disable(&pdev->dev); 1663 pm_runtime_set_suspended(&pdev->dev); 1664 pm_runtime_put_noidle(&pdev->dev); 1665 1666 if (stm32port->tx_ch) { 1667 stm32_usart_of_dma_tx_remove(stm32port, pdev); 1668 dma_release_channel(stm32port->tx_ch); 1669 } 1670 1671 if (stm32port->rx_ch) 1672 stm32_usart_of_dma_rx_remove(stm32port, pdev); 1673 1674 err_dma_rx: 1675 if (stm32port->rx_ch) 1676 dma_release_channel(stm32port->rx_ch); 1677 1678 err_wakeirq: 1679 if (stm32port->wakeup_src) 1680 dev_pm_clear_wake_irq(&pdev->dev); 1681 1682 err_deinit_port: 1683 if (stm32port->wakeup_src) 1684 device_set_wakeup_capable(&pdev->dev, false); 1685 1686 stm32_usart_deinit_port(stm32port); 1687 1688 return ret; 1689 } 1690 1691 static int stm32_usart_serial_remove(struct platform_device *pdev) 1692 { 1693 struct uart_port *port = platform_get_drvdata(pdev); 1694 struct stm32_port *stm32_port = to_stm32_port(port); 1695 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1696 int err; 1697 u32 cr3; 1698 1699 pm_runtime_get_sync(&pdev->dev); 1700 err = uart_remove_one_port(&stm32_usart_driver, port); 1701 if (err) 1702 return(err); 1703 1704 pm_runtime_disable(&pdev->dev); 1705 pm_runtime_set_suspended(&pdev->dev); 1706 pm_runtime_put_noidle(&pdev->dev); 1707 1708 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_PEIE); 1709 cr3 = readl_relaxed(port->membase + ofs->cr3); 1710 cr3 &= ~USART_CR3_EIE; 1711 cr3 &= ~USART_CR3_DMAR; 1712 cr3 &= ~USART_CR3_DDRE; 1713 writel_relaxed(cr3, port->membase + ofs->cr3); 1714 1715 if (stm32_port->tx_ch) { 1716 stm32_usart_of_dma_tx_remove(stm32_port, pdev); 1717 dma_release_channel(stm32_port->tx_ch); 1718 } 1719 1720 if (stm32_port->rx_ch) { 1721 stm32_usart_of_dma_rx_remove(stm32_port, pdev); 1722 dma_release_channel(stm32_port->rx_ch); 1723 } 1724 1725 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 1726 1727 if (stm32_port->wakeup_src) { 1728 dev_pm_clear_wake_irq(&pdev->dev); 1729 device_init_wakeup(&pdev->dev, false); 1730 } 1731 1732 stm32_usart_deinit_port(stm32_port); 1733 1734 return 0; 1735 } 1736 1737 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch) 1738 { 1739 struct stm32_port *stm32_port = to_stm32_port(port); 1740 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1741 u32 isr; 1742 int ret; 1743 1744 ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, isr, 1745 (isr & USART_SR_TXE), 100, 1746 STM32_USART_TIMEOUT_USEC); 1747 if (ret != 0) { 1748 dev_err(port->dev, "Error while sending data in UART TX : %d\n", ret); 1749 return; 1750 } 1751 writel_relaxed(ch, port->membase + ofs->tdr); 1752 } 1753 1754 #ifdef CONFIG_SERIAL_STM32_CONSOLE 1755 static void stm32_usart_console_write(struct console *co, const char *s, 1756 unsigned int cnt) 1757 { 1758 struct uart_port *port = &stm32_ports[co->index].port; 1759 struct stm32_port *stm32_port = to_stm32_port(port); 1760 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1761 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1762 unsigned long flags; 1763 u32 old_cr1, new_cr1; 1764 int locked = 1; 1765 1766 if (oops_in_progress) 1767 locked = spin_trylock_irqsave(&port->lock, flags); 1768 else 1769 spin_lock_irqsave(&port->lock, flags); 1770 1771 /* Save and disable interrupts, enable the transmitter */ 1772 old_cr1 = readl_relaxed(port->membase + ofs->cr1); 1773 new_cr1 = old_cr1 & ~USART_CR1_IE_MASK; 1774 new_cr1 |= USART_CR1_TE | BIT(cfg->uart_enable_bit); 1775 writel_relaxed(new_cr1, port->membase + ofs->cr1); 1776 1777 uart_console_write(port, s, cnt, stm32_usart_console_putchar); 1778 1779 /* Restore interrupt state */ 1780 writel_relaxed(old_cr1, port->membase + ofs->cr1); 1781 1782 if (locked) 1783 spin_unlock_irqrestore(&port->lock, flags); 1784 } 1785 1786 static int stm32_usart_console_setup(struct console *co, char *options) 1787 { 1788 struct stm32_port *stm32port; 1789 int baud = 9600; 1790 int bits = 8; 1791 int parity = 'n'; 1792 int flow = 'n'; 1793 1794 if (co->index >= STM32_MAX_PORTS) 1795 return -ENODEV; 1796 1797 stm32port = &stm32_ports[co->index]; 1798 1799 /* 1800 * This driver does not support early console initialization 1801 * (use ARM early printk support instead), so we only expect 1802 * this to be called during the uart port registration when the 1803 * driver gets probed and the port should be mapped at that point. 1804 */ 1805 if (stm32port->port.mapbase == 0 || !stm32port->port.membase) 1806 return -ENXIO; 1807 1808 if (options) 1809 uart_parse_options(options, &baud, &parity, &bits, &flow); 1810 1811 return uart_set_options(&stm32port->port, co, baud, parity, bits, flow); 1812 } 1813 1814 static struct console stm32_console = { 1815 .name = STM32_SERIAL_NAME, 1816 .device = uart_console_device, 1817 .write = stm32_usart_console_write, 1818 .setup = stm32_usart_console_setup, 1819 .flags = CON_PRINTBUFFER, 1820 .index = -1, 1821 .data = &stm32_usart_driver, 1822 }; 1823 1824 #define STM32_SERIAL_CONSOLE (&stm32_console) 1825 1826 #else 1827 #define STM32_SERIAL_CONSOLE NULL 1828 #endif /* CONFIG_SERIAL_STM32_CONSOLE */ 1829 1830 #ifdef CONFIG_SERIAL_EARLYCON 1831 static void early_stm32_usart_console_putchar(struct uart_port *port, unsigned char ch) 1832 { 1833 struct stm32_usart_info *info = port->private_data; 1834 1835 while (!(readl_relaxed(port->membase + info->ofs.isr) & USART_SR_TXE)) 1836 cpu_relax(); 1837 1838 writel_relaxed(ch, port->membase + info->ofs.tdr); 1839 } 1840 1841 static void early_stm32_serial_write(struct console *console, const char *s, unsigned int count) 1842 { 1843 struct earlycon_device *device = console->data; 1844 struct uart_port *port = &device->port; 1845 1846 uart_console_write(port, s, count, early_stm32_usart_console_putchar); 1847 } 1848 1849 static int __init early_stm32_h7_serial_setup(struct earlycon_device *device, const char *options) 1850 { 1851 if (!(device->port.membase || device->port.iobase)) 1852 return -ENODEV; 1853 device->port.private_data = &stm32h7_info; 1854 device->con->write = early_stm32_serial_write; 1855 return 0; 1856 } 1857 1858 static int __init early_stm32_f7_serial_setup(struct earlycon_device *device, const char *options) 1859 { 1860 if (!(device->port.membase || device->port.iobase)) 1861 return -ENODEV; 1862 device->port.private_data = &stm32f7_info; 1863 device->con->write = early_stm32_serial_write; 1864 return 0; 1865 } 1866 1867 static int __init early_stm32_f4_serial_setup(struct earlycon_device *device, const char *options) 1868 { 1869 if (!(device->port.membase || device->port.iobase)) 1870 return -ENODEV; 1871 device->port.private_data = &stm32f4_info; 1872 device->con->write = early_stm32_serial_write; 1873 return 0; 1874 } 1875 1876 OF_EARLYCON_DECLARE(stm32, "st,stm32h7-uart", early_stm32_h7_serial_setup); 1877 OF_EARLYCON_DECLARE(stm32, "st,stm32f7-uart", early_stm32_f7_serial_setup); 1878 OF_EARLYCON_DECLARE(stm32, "st,stm32-uart", early_stm32_f4_serial_setup); 1879 #endif /* CONFIG_SERIAL_EARLYCON */ 1880 1881 static struct uart_driver stm32_usart_driver = { 1882 .driver_name = DRIVER_NAME, 1883 .dev_name = STM32_SERIAL_NAME, 1884 .major = 0, 1885 .minor = 0, 1886 .nr = STM32_MAX_PORTS, 1887 .cons = STM32_SERIAL_CONSOLE, 1888 }; 1889 1890 static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port, 1891 bool enable) 1892 { 1893 struct stm32_port *stm32_port = to_stm32_port(port); 1894 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1895 struct tty_port *tport = &port->state->port; 1896 int ret; 1897 unsigned int size; 1898 unsigned long flags; 1899 1900 if (!stm32_port->wakeup_src || !tty_port_initialized(tport)) 1901 return 0; 1902 1903 /* 1904 * Enable low-power wake-up and wake-up irq if argument is set to 1905 * "enable", disable low-power wake-up and wake-up irq otherwise 1906 */ 1907 if (enable) { 1908 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM); 1909 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE); 1910 mctrl_gpio_enable_irq_wake(stm32_port->gpios); 1911 1912 /* 1913 * When DMA is used for reception, it must be disabled before 1914 * entering low-power mode and re-enabled when exiting from 1915 * low-power mode. 1916 */ 1917 if (stm32_port->rx_ch) { 1918 spin_lock_irqsave(&port->lock, flags); 1919 /* Avoid race with RX IRQ when DMAR is cleared */ 1920 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 1921 /* Poll data from DMA RX buffer if any */ 1922 size = stm32_usart_receive_chars(port, true); 1923 dmaengine_terminate_async(stm32_port->rx_ch); 1924 uart_unlock_and_check_sysrq_irqrestore(port, flags); 1925 if (size) 1926 tty_flip_buffer_push(tport); 1927 } 1928 1929 /* Poll data from RX FIFO if any */ 1930 stm32_usart_receive_chars(port, false); 1931 } else { 1932 if (stm32_port->rx_ch) { 1933 ret = stm32_usart_start_rx_dma_cyclic(port); 1934 if (ret) 1935 return ret; 1936 } 1937 mctrl_gpio_disable_irq_wake(stm32_port->gpios); 1938 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM); 1939 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE); 1940 } 1941 1942 return 0; 1943 } 1944 1945 static int __maybe_unused stm32_usart_serial_suspend(struct device *dev) 1946 { 1947 struct uart_port *port = dev_get_drvdata(dev); 1948 int ret; 1949 1950 uart_suspend_port(&stm32_usart_driver, port); 1951 1952 if (device_may_wakeup(dev) || device_wakeup_path(dev)) { 1953 ret = stm32_usart_serial_en_wakeup(port, true); 1954 if (ret) 1955 return ret; 1956 } 1957 1958 /* 1959 * When "no_console_suspend" is enabled, keep the pinctrl default state 1960 * and rely on bootloader stage to restore this state upon resume. 1961 * Otherwise, apply the idle or sleep states depending on wakeup 1962 * capabilities. 1963 */ 1964 if (console_suspend_enabled || !uart_console(port)) { 1965 if (device_may_wakeup(dev) || device_wakeup_path(dev)) 1966 pinctrl_pm_select_idle_state(dev); 1967 else 1968 pinctrl_pm_select_sleep_state(dev); 1969 } 1970 1971 return 0; 1972 } 1973 1974 static int __maybe_unused stm32_usart_serial_resume(struct device *dev) 1975 { 1976 struct uart_port *port = dev_get_drvdata(dev); 1977 int ret; 1978 1979 pinctrl_pm_select_default_state(dev); 1980 1981 if (device_may_wakeup(dev) || device_wakeup_path(dev)) { 1982 ret = stm32_usart_serial_en_wakeup(port, false); 1983 if (ret) 1984 return ret; 1985 } 1986 1987 return uart_resume_port(&stm32_usart_driver, port); 1988 } 1989 1990 static int __maybe_unused stm32_usart_runtime_suspend(struct device *dev) 1991 { 1992 struct uart_port *port = dev_get_drvdata(dev); 1993 struct stm32_port *stm32port = container_of(port, 1994 struct stm32_port, port); 1995 1996 clk_disable_unprepare(stm32port->clk); 1997 1998 return 0; 1999 } 2000 2001 static int __maybe_unused stm32_usart_runtime_resume(struct device *dev) 2002 { 2003 struct uart_port *port = dev_get_drvdata(dev); 2004 struct stm32_port *stm32port = container_of(port, 2005 struct stm32_port, port); 2006 2007 return clk_prepare_enable(stm32port->clk); 2008 } 2009 2010 static const struct dev_pm_ops stm32_serial_pm_ops = { 2011 SET_RUNTIME_PM_OPS(stm32_usart_runtime_suspend, 2012 stm32_usart_runtime_resume, NULL) 2013 SET_SYSTEM_SLEEP_PM_OPS(stm32_usart_serial_suspend, 2014 stm32_usart_serial_resume) 2015 }; 2016 2017 static struct platform_driver stm32_serial_driver = { 2018 .probe = stm32_usart_serial_probe, 2019 .remove = stm32_usart_serial_remove, 2020 .driver = { 2021 .name = DRIVER_NAME, 2022 .pm = &stm32_serial_pm_ops, 2023 .of_match_table = of_match_ptr(stm32_match), 2024 }, 2025 }; 2026 2027 static int __init stm32_usart_init(void) 2028 { 2029 static char banner[] __initdata = "STM32 USART driver initialized"; 2030 int ret; 2031 2032 pr_info("%s\n", banner); 2033 2034 ret = uart_register_driver(&stm32_usart_driver); 2035 if (ret) 2036 return ret; 2037 2038 ret = platform_driver_register(&stm32_serial_driver); 2039 if (ret) 2040 uart_unregister_driver(&stm32_usart_driver); 2041 2042 return ret; 2043 } 2044 2045 static void __exit stm32_usart_exit(void) 2046 { 2047 platform_driver_unregister(&stm32_serial_driver); 2048 uart_unregister_driver(&stm32_usart_driver); 2049 } 2050 2051 module_init(stm32_usart_init); 2052 module_exit(stm32_usart_exit); 2053 2054 MODULE_ALIAS("platform:" DRIVER_NAME); 2055 MODULE_DESCRIPTION("STMicroelectronics STM32 serial port driver"); 2056 MODULE_LICENSE("GPL v2"); 2057