1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * *************************************************************************** 4 * Marvell Armada-3700 Serial Driver 5 * Author: Wilson Ding <dingwei@marvell.com> 6 * Copyright (C) 2015 Marvell International Ltd. 7 * *************************************************************************** 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/clk-provider.h> 12 #include <linux/console.h> 13 #include <linux/delay.h> 14 #include <linux/device.h> 15 #include <linux/init.h> 16 #include <linux/io.h> 17 #include <linux/iopoll.h> 18 #include <linux/math64.h> 19 #include <linux/of.h> 20 #include <linux/of_address.h> 21 #include <linux/of_device.h> 22 #include <linux/of_irq.h> 23 #include <linux/of_platform.h> 24 #include <linux/platform_device.h> 25 #include <linux/serial.h> 26 #include <linux/serial_core.h> 27 #include <linux/slab.h> 28 #include <linux/tty.h> 29 #include <linux/tty_flip.h> 30 31 /* Register Map */ 32 #define UART_STD_RBR 0x00 33 #define UART_EXT_RBR 0x18 34 35 #define UART_STD_TSH 0x04 36 #define UART_EXT_TSH 0x1C 37 38 #define UART_STD_CTRL1 0x08 39 #define UART_EXT_CTRL1 0x04 40 #define CTRL_SOFT_RST BIT(31) 41 #define CTRL_TXFIFO_RST BIT(15) 42 #define CTRL_RXFIFO_RST BIT(14) 43 #define CTRL_SND_BRK_SEQ BIT(11) 44 #define CTRL_BRK_DET_INT BIT(3) 45 #define CTRL_FRM_ERR_INT BIT(2) 46 #define CTRL_PAR_ERR_INT BIT(1) 47 #define CTRL_OVR_ERR_INT BIT(0) 48 #define CTRL_BRK_INT (CTRL_BRK_DET_INT | CTRL_FRM_ERR_INT | \ 49 CTRL_PAR_ERR_INT | CTRL_OVR_ERR_INT) 50 51 #define UART_STD_CTRL2 UART_STD_CTRL1 52 #define UART_EXT_CTRL2 0x20 53 #define CTRL_STD_TX_RDY_INT BIT(5) 54 #define CTRL_EXT_TX_RDY_INT BIT(6) 55 #define CTRL_STD_RX_RDY_INT BIT(4) 56 #define CTRL_EXT_RX_RDY_INT BIT(5) 57 58 #define UART_STAT 0x0C 59 #define STAT_TX_FIFO_EMP BIT(13) 60 #define STAT_TX_FIFO_FUL BIT(11) 61 #define STAT_TX_EMP BIT(6) 62 #define STAT_STD_TX_RDY BIT(5) 63 #define STAT_EXT_TX_RDY BIT(15) 64 #define STAT_STD_RX_RDY BIT(4) 65 #define STAT_EXT_RX_RDY BIT(14) 66 #define STAT_BRK_DET BIT(3) 67 #define STAT_FRM_ERR BIT(2) 68 #define STAT_PAR_ERR BIT(1) 69 #define STAT_OVR_ERR BIT(0) 70 #define STAT_BRK_ERR (STAT_BRK_DET | STAT_FRM_ERR \ 71 | STAT_PAR_ERR | STAT_OVR_ERR) 72 73 /* 74 * Marvell Armada 3700 Functional Specifications describes that bit 21 of UART 75 * Clock Control register controls UART1 and bit 20 controls UART2. But in 76 * reality bit 21 controls UART2 and bit 20 controls UART1. This seems to be an 77 * error in Marvell's documentation. Hence following CLK_DIS macros are swapped. 78 */ 79 80 #define UART_BRDV 0x10 81 /* These bits are located in UART1 address space and control UART2 */ 82 #define UART2_CLK_DIS BIT(21) 83 /* These bits are located in UART1 address space and control UART1 */ 84 #define UART1_CLK_DIS BIT(20) 85 /* These bits are located in UART1 address space and control both UARTs */ 86 #define CLK_NO_XTAL BIT(19) 87 #define CLK_TBG_DIV1_SHIFT 15 88 #define CLK_TBG_DIV1_MASK 0x7 89 #define CLK_TBG_DIV1_MAX 6 90 #define CLK_TBG_DIV2_SHIFT 12 91 #define CLK_TBG_DIV2_MASK 0x7 92 #define CLK_TBG_DIV2_MAX 6 93 #define CLK_TBG_SEL_SHIFT 10 94 #define CLK_TBG_SEL_MASK 0x3 95 /* These bits are located in both UARTs address space */ 96 #define BRDV_BAUD_MASK 0x3FF 97 #define BRDV_BAUD_MAX BRDV_BAUD_MASK 98 99 #define UART_OSAMP 0x14 100 #define OSAMP_DEFAULT_DIVISOR 16 101 #define OSAMP_DIVISORS_MASK 0x3F3F3F3F 102 #define OSAMP_MAX_DIVISOR 63 103 104 #define MVEBU_NR_UARTS 2 105 106 #define MVEBU_UART_TYPE "mvebu-uart" 107 #define DRIVER_NAME "mvebu_serial" 108 109 enum { 110 /* Either there is only one summed IRQ... */ 111 UART_IRQ_SUM = 0, 112 /* ...or there are two separate IRQ for RX and TX */ 113 UART_RX_IRQ = 0, 114 UART_TX_IRQ, 115 UART_IRQ_COUNT 116 }; 117 118 /* Diverging register offsets */ 119 struct uart_regs_layout { 120 unsigned int rbr; 121 unsigned int tsh; 122 unsigned int ctrl; 123 unsigned int intr; 124 }; 125 126 /* Diverging flags */ 127 struct uart_flags { 128 unsigned int ctrl_tx_rdy_int; 129 unsigned int ctrl_rx_rdy_int; 130 unsigned int stat_tx_rdy; 131 unsigned int stat_rx_rdy; 132 }; 133 134 /* Driver data, a structure for each UART port */ 135 struct mvebu_uart_driver_data { 136 bool is_ext; 137 struct uart_regs_layout regs; 138 struct uart_flags flags; 139 }; 140 141 /* Saved registers during suspend */ 142 struct mvebu_uart_pm_regs { 143 unsigned int rbr; 144 unsigned int tsh; 145 unsigned int ctrl; 146 unsigned int intr; 147 unsigned int stat; 148 unsigned int brdv; 149 unsigned int osamp; 150 }; 151 152 /* MVEBU UART driver structure */ 153 struct mvebu_uart { 154 struct uart_port *port; 155 struct clk *clk; 156 int irq[UART_IRQ_COUNT]; 157 struct mvebu_uart_driver_data *data; 158 #if defined(CONFIG_PM) 159 struct mvebu_uart_pm_regs pm_regs; 160 #endif /* CONFIG_PM */ 161 }; 162 163 static struct mvebu_uart *to_mvuart(struct uart_port *port) 164 { 165 return (struct mvebu_uart *)port->private_data; 166 } 167 168 #define IS_EXTENDED(port) (to_mvuart(port)->data->is_ext) 169 170 #define UART_RBR(port) (to_mvuart(port)->data->regs.rbr) 171 #define UART_TSH(port) (to_mvuart(port)->data->regs.tsh) 172 #define UART_CTRL(port) (to_mvuart(port)->data->regs.ctrl) 173 #define UART_INTR(port) (to_mvuart(port)->data->regs.intr) 174 175 #define CTRL_TX_RDY_INT(port) (to_mvuart(port)->data->flags.ctrl_tx_rdy_int) 176 #define CTRL_RX_RDY_INT(port) (to_mvuart(port)->data->flags.ctrl_rx_rdy_int) 177 #define STAT_TX_RDY(port) (to_mvuart(port)->data->flags.stat_tx_rdy) 178 #define STAT_RX_RDY(port) (to_mvuart(port)->data->flags.stat_rx_rdy) 179 180 static struct uart_port mvebu_uart_ports[MVEBU_NR_UARTS]; 181 182 static DEFINE_SPINLOCK(mvebu_uart_lock); 183 184 /* Core UART Driver Operations */ 185 static unsigned int mvebu_uart_tx_empty(struct uart_port *port) 186 { 187 unsigned long flags; 188 unsigned int st; 189 190 spin_lock_irqsave(&port->lock, flags); 191 st = readl(port->membase + UART_STAT); 192 spin_unlock_irqrestore(&port->lock, flags); 193 194 return (st & STAT_TX_EMP) ? TIOCSER_TEMT : 0; 195 } 196 197 static unsigned int mvebu_uart_get_mctrl(struct uart_port *port) 198 { 199 return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; 200 } 201 202 static void mvebu_uart_set_mctrl(struct uart_port *port, 203 unsigned int mctrl) 204 { 205 /* 206 * Even if we do not support configuring the modem control lines, this 207 * function must be proided to the serial core 208 */ 209 } 210 211 static void mvebu_uart_stop_tx(struct uart_port *port) 212 { 213 unsigned int ctl = readl(port->membase + UART_INTR(port)); 214 215 ctl &= ~CTRL_TX_RDY_INT(port); 216 writel(ctl, port->membase + UART_INTR(port)); 217 } 218 219 static void mvebu_uart_start_tx(struct uart_port *port) 220 { 221 unsigned int ctl; 222 struct circ_buf *xmit = &port->state->xmit; 223 224 if (IS_EXTENDED(port) && !uart_circ_empty(xmit)) { 225 writel(xmit->buf[xmit->tail], port->membase + UART_TSH(port)); 226 uart_xmit_advance(port, 1); 227 } 228 229 ctl = readl(port->membase + UART_INTR(port)); 230 ctl |= CTRL_TX_RDY_INT(port); 231 writel(ctl, port->membase + UART_INTR(port)); 232 } 233 234 static void mvebu_uart_stop_rx(struct uart_port *port) 235 { 236 unsigned int ctl; 237 238 ctl = readl(port->membase + UART_CTRL(port)); 239 ctl &= ~CTRL_BRK_INT; 240 writel(ctl, port->membase + UART_CTRL(port)); 241 242 ctl = readl(port->membase + UART_INTR(port)); 243 ctl &= ~CTRL_RX_RDY_INT(port); 244 writel(ctl, port->membase + UART_INTR(port)); 245 } 246 247 static void mvebu_uart_break_ctl(struct uart_port *port, int brk) 248 { 249 unsigned int ctl; 250 unsigned long flags; 251 252 spin_lock_irqsave(&port->lock, flags); 253 ctl = readl(port->membase + UART_CTRL(port)); 254 if (brk == -1) 255 ctl |= CTRL_SND_BRK_SEQ; 256 else 257 ctl &= ~CTRL_SND_BRK_SEQ; 258 writel(ctl, port->membase + UART_CTRL(port)); 259 spin_unlock_irqrestore(&port->lock, flags); 260 } 261 262 static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status) 263 { 264 struct tty_port *tport = &port->state->port; 265 unsigned char ch = 0; 266 char flag = 0; 267 int ret; 268 269 do { 270 if (status & STAT_RX_RDY(port)) { 271 ch = readl(port->membase + UART_RBR(port)); 272 ch &= 0xff; 273 flag = TTY_NORMAL; 274 port->icount.rx++; 275 276 if (status & STAT_PAR_ERR) 277 port->icount.parity++; 278 } 279 280 /* 281 * For UART2, error bits are not cleared on buffer read. 282 * This causes interrupt loop and system hang. 283 */ 284 if (IS_EXTENDED(port) && (status & STAT_BRK_ERR)) { 285 ret = readl(port->membase + UART_STAT); 286 ret |= STAT_BRK_ERR; 287 writel(ret, port->membase + UART_STAT); 288 } 289 290 if (status & STAT_BRK_DET) { 291 port->icount.brk++; 292 status &= ~(STAT_FRM_ERR | STAT_PAR_ERR); 293 if (uart_handle_break(port)) 294 goto ignore_char; 295 } 296 297 if (status & STAT_OVR_ERR) 298 port->icount.overrun++; 299 300 if (status & STAT_FRM_ERR) 301 port->icount.frame++; 302 303 if (uart_handle_sysrq_char(port, ch)) 304 goto ignore_char; 305 306 if (status & port->ignore_status_mask & STAT_PAR_ERR) 307 status &= ~STAT_RX_RDY(port); 308 309 status &= port->read_status_mask; 310 311 if (status & STAT_PAR_ERR) 312 flag = TTY_PARITY; 313 314 status &= ~port->ignore_status_mask; 315 316 if (status & STAT_RX_RDY(port)) 317 tty_insert_flip_char(tport, ch, flag); 318 319 if (status & STAT_BRK_DET) 320 tty_insert_flip_char(tport, 0, TTY_BREAK); 321 322 if (status & STAT_FRM_ERR) 323 tty_insert_flip_char(tport, 0, TTY_FRAME); 324 325 if (status & STAT_OVR_ERR) 326 tty_insert_flip_char(tport, 0, TTY_OVERRUN); 327 328 ignore_char: 329 status = readl(port->membase + UART_STAT); 330 } while (status & (STAT_RX_RDY(port) | STAT_BRK_DET)); 331 332 tty_flip_buffer_push(tport); 333 } 334 335 static void mvebu_uart_tx_chars(struct uart_port *port, unsigned int status) 336 { 337 u8 ch; 338 339 uart_port_tx_limited(port, ch, port->fifosize, 340 !(readl(port->membase + UART_STAT) & STAT_TX_FIFO_FUL), 341 writel(ch, port->membase + UART_TSH(port)), 342 ({})); 343 } 344 345 static irqreturn_t mvebu_uart_isr(int irq, void *dev_id) 346 { 347 struct uart_port *port = (struct uart_port *)dev_id; 348 unsigned int st = readl(port->membase + UART_STAT); 349 350 if (st & (STAT_RX_RDY(port) | STAT_OVR_ERR | STAT_FRM_ERR | 351 STAT_BRK_DET)) 352 mvebu_uart_rx_chars(port, st); 353 354 if (st & STAT_TX_RDY(port)) 355 mvebu_uart_tx_chars(port, st); 356 357 return IRQ_HANDLED; 358 } 359 360 static irqreturn_t mvebu_uart_rx_isr(int irq, void *dev_id) 361 { 362 struct uart_port *port = (struct uart_port *)dev_id; 363 unsigned int st = readl(port->membase + UART_STAT); 364 365 if (st & (STAT_RX_RDY(port) | STAT_OVR_ERR | STAT_FRM_ERR | 366 STAT_BRK_DET)) 367 mvebu_uart_rx_chars(port, st); 368 369 return IRQ_HANDLED; 370 } 371 372 static irqreturn_t mvebu_uart_tx_isr(int irq, void *dev_id) 373 { 374 struct uart_port *port = (struct uart_port *)dev_id; 375 unsigned int st = readl(port->membase + UART_STAT); 376 377 if (st & STAT_TX_RDY(port)) 378 mvebu_uart_tx_chars(port, st); 379 380 return IRQ_HANDLED; 381 } 382 383 static int mvebu_uart_startup(struct uart_port *port) 384 { 385 struct mvebu_uart *mvuart = to_mvuart(port); 386 unsigned int ctl; 387 int ret; 388 389 writel(CTRL_TXFIFO_RST | CTRL_RXFIFO_RST, 390 port->membase + UART_CTRL(port)); 391 udelay(1); 392 393 /* Clear the error bits of state register before IRQ request */ 394 ret = readl(port->membase + UART_STAT); 395 ret |= STAT_BRK_ERR; 396 writel(ret, port->membase + UART_STAT); 397 398 writel(CTRL_BRK_INT, port->membase + UART_CTRL(port)); 399 400 ctl = readl(port->membase + UART_INTR(port)); 401 ctl |= CTRL_RX_RDY_INT(port); 402 writel(ctl, port->membase + UART_INTR(port)); 403 404 if (!mvuart->irq[UART_TX_IRQ]) { 405 /* Old bindings with just one interrupt (UART0 only) */ 406 ret = devm_request_irq(port->dev, mvuart->irq[UART_IRQ_SUM], 407 mvebu_uart_isr, port->irqflags, 408 dev_name(port->dev), port); 409 if (ret) { 410 dev_err(port->dev, "unable to request IRQ %d\n", 411 mvuart->irq[UART_IRQ_SUM]); 412 return ret; 413 } 414 } else { 415 /* New bindings with an IRQ for RX and TX (both UART) */ 416 ret = devm_request_irq(port->dev, mvuart->irq[UART_RX_IRQ], 417 mvebu_uart_rx_isr, port->irqflags, 418 dev_name(port->dev), port); 419 if (ret) { 420 dev_err(port->dev, "unable to request IRQ %d\n", 421 mvuart->irq[UART_RX_IRQ]); 422 return ret; 423 } 424 425 ret = devm_request_irq(port->dev, mvuart->irq[UART_TX_IRQ], 426 mvebu_uart_tx_isr, port->irqflags, 427 dev_name(port->dev), 428 port); 429 if (ret) { 430 dev_err(port->dev, "unable to request IRQ %d\n", 431 mvuart->irq[UART_TX_IRQ]); 432 devm_free_irq(port->dev, mvuart->irq[UART_RX_IRQ], 433 port); 434 return ret; 435 } 436 } 437 438 return 0; 439 } 440 441 static void mvebu_uart_shutdown(struct uart_port *port) 442 { 443 struct mvebu_uart *mvuart = to_mvuart(port); 444 445 writel(0, port->membase + UART_INTR(port)); 446 447 if (!mvuart->irq[UART_TX_IRQ]) { 448 devm_free_irq(port->dev, mvuart->irq[UART_IRQ_SUM], port); 449 } else { 450 devm_free_irq(port->dev, mvuart->irq[UART_RX_IRQ], port); 451 devm_free_irq(port->dev, mvuart->irq[UART_TX_IRQ], port); 452 } 453 } 454 455 static unsigned int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud) 456 { 457 unsigned int d_divisor, m_divisor; 458 unsigned long flags; 459 u32 brdv, osamp; 460 461 if (!port->uartclk) 462 return 0; 463 464 /* 465 * The baudrate is derived from the UART clock thanks to divisors: 466 * > d1 * d2 ("TBG divisors"): can divide only TBG clock from 1 to 6 467 * > D ("baud generator"): can divide the clock from 1 to 1023 468 * > M ("fractional divisor"): allows a better accuracy (from 1 to 63) 469 * 470 * Exact formulas for calculating baudrate: 471 * 472 * with default x16 scheme: 473 * baudrate = xtal / (d * 16) 474 * baudrate = tbg / (d1 * d2 * d * 16) 475 * 476 * with fractional divisor: 477 * baudrate = 10 * xtal / (d * (3 * (m1 + m2) + 2 * (m3 + m4))) 478 * baudrate = 10 * tbg / (d1*d2 * d * (3 * (m1 + m2) + 2 * (m3 + m4))) 479 * 480 * Oversampling value: 481 * osamp = (m1 << 0) | (m2 << 8) | (m3 << 16) | (m4 << 24); 482 * 483 * Where m1 controls number of clock cycles per bit for bits 1,2,3; 484 * m2 for bits 4,5,6; m3 for bits 7,8 and m4 for bits 9,10. 485 * 486 * To simplify baudrate setup set all the M prescalers to the same 487 * value. For baudrates 9600 Bd and higher, it is enough to use the 488 * default (x16) divisor or fractional divisor with M = 63, so there 489 * is no need to use real fractional support (where the M prescalers 490 * are not equal). 491 * 492 * When all the M prescalers are zeroed then default (x16) divisor is 493 * used. Default x16 scheme is more stable than M (fractional divisor), 494 * so use M only when D divisor is not enough to derive baudrate. 495 * 496 * Member port->uartclk is either xtal clock rate or TBG clock rate 497 * divided by (d1 * d2). So d1 and d2 are already set by the UART clock 498 * driver (and UART driver itself cannot change them). Moreover they are 499 * shared between both UARTs. 500 */ 501 502 m_divisor = OSAMP_DEFAULT_DIVISOR; 503 d_divisor = DIV_ROUND_CLOSEST(port->uartclk, baud * m_divisor); 504 505 if (d_divisor > BRDV_BAUD_MAX) { 506 /* 507 * Experiments show that small M divisors are unstable. 508 * Use maximal possible M = 63 and calculate D divisor. 509 */ 510 m_divisor = OSAMP_MAX_DIVISOR; 511 d_divisor = DIV_ROUND_CLOSEST(port->uartclk, baud * m_divisor); 512 } 513 514 if (d_divisor < 1) 515 d_divisor = 1; 516 else if (d_divisor > BRDV_BAUD_MAX) 517 d_divisor = BRDV_BAUD_MAX; 518 519 spin_lock_irqsave(&mvebu_uart_lock, flags); 520 brdv = readl(port->membase + UART_BRDV); 521 brdv &= ~BRDV_BAUD_MASK; 522 brdv |= d_divisor; 523 writel(brdv, port->membase + UART_BRDV); 524 spin_unlock_irqrestore(&mvebu_uart_lock, flags); 525 526 osamp = readl(port->membase + UART_OSAMP); 527 osamp &= ~OSAMP_DIVISORS_MASK; 528 if (m_divisor != OSAMP_DEFAULT_DIVISOR) 529 osamp |= (m_divisor << 0) | (m_divisor << 8) | 530 (m_divisor << 16) | (m_divisor << 24); 531 writel(osamp, port->membase + UART_OSAMP); 532 533 return DIV_ROUND_CLOSEST(port->uartclk, d_divisor * m_divisor); 534 } 535 536 static void mvebu_uart_set_termios(struct uart_port *port, 537 struct ktermios *termios, 538 const struct ktermios *old) 539 { 540 unsigned long flags; 541 unsigned int baud, min_baud, max_baud; 542 543 spin_lock_irqsave(&port->lock, flags); 544 545 port->read_status_mask = STAT_RX_RDY(port) | STAT_OVR_ERR | 546 STAT_TX_RDY(port) | STAT_TX_FIFO_FUL; 547 548 if (termios->c_iflag & INPCK) 549 port->read_status_mask |= STAT_FRM_ERR | STAT_PAR_ERR; 550 551 port->ignore_status_mask = 0; 552 if (termios->c_iflag & IGNPAR) 553 port->ignore_status_mask |= 554 STAT_FRM_ERR | STAT_PAR_ERR | STAT_OVR_ERR; 555 556 if ((termios->c_cflag & CREAD) == 0) 557 port->ignore_status_mask |= STAT_RX_RDY(port) | STAT_BRK_ERR; 558 559 /* 560 * Maximal divisor is 1023 and maximal fractional divisor is 63. And 561 * experiments show that baudrates above 1/80 of parent clock rate are 562 * not stable. So disallow baudrates above 1/80 of the parent clock 563 * rate. If port->uartclk is not available, then 564 * mvebu_uart_baud_rate_set() fails, so values min_baud and max_baud 565 * in this case do not matter. 566 */ 567 min_baud = DIV_ROUND_UP(port->uartclk, BRDV_BAUD_MAX * 568 OSAMP_MAX_DIVISOR); 569 max_baud = port->uartclk / 80; 570 571 baud = uart_get_baud_rate(port, termios, old, min_baud, max_baud); 572 baud = mvebu_uart_baud_rate_set(port, baud); 573 574 /* In case baudrate cannot be changed, report previous old value */ 575 if (baud == 0 && old) 576 baud = tty_termios_baud_rate(old); 577 578 /* Only the following flag changes are supported */ 579 if (old) { 580 termios->c_iflag &= INPCK | IGNPAR; 581 termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR); 582 termios->c_cflag &= CREAD | CBAUD; 583 termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD); 584 termios->c_cflag |= CS8; 585 } 586 587 if (baud != 0) { 588 tty_termios_encode_baud_rate(termios, baud, baud); 589 uart_update_timeout(port, termios->c_cflag, baud); 590 } 591 592 spin_unlock_irqrestore(&port->lock, flags); 593 } 594 595 static const char *mvebu_uart_type(struct uart_port *port) 596 { 597 return MVEBU_UART_TYPE; 598 } 599 600 static void mvebu_uart_release_port(struct uart_port *port) 601 { 602 /* Nothing to do here */ 603 } 604 605 static int mvebu_uart_request_port(struct uart_port *port) 606 { 607 return 0; 608 } 609 610 #ifdef CONFIG_CONSOLE_POLL 611 static int mvebu_uart_get_poll_char(struct uart_port *port) 612 { 613 unsigned int st = readl(port->membase + UART_STAT); 614 615 if (!(st & STAT_RX_RDY(port))) 616 return NO_POLL_CHAR; 617 618 return readl(port->membase + UART_RBR(port)); 619 } 620 621 static void mvebu_uart_put_poll_char(struct uart_port *port, unsigned char c) 622 { 623 unsigned int st; 624 625 for (;;) { 626 st = readl(port->membase + UART_STAT); 627 628 if (!(st & STAT_TX_FIFO_FUL)) 629 break; 630 631 udelay(1); 632 } 633 634 writel(c, port->membase + UART_TSH(port)); 635 } 636 #endif 637 638 static const struct uart_ops mvebu_uart_ops = { 639 .tx_empty = mvebu_uart_tx_empty, 640 .set_mctrl = mvebu_uart_set_mctrl, 641 .get_mctrl = mvebu_uart_get_mctrl, 642 .stop_tx = mvebu_uart_stop_tx, 643 .start_tx = mvebu_uart_start_tx, 644 .stop_rx = mvebu_uart_stop_rx, 645 .break_ctl = mvebu_uart_break_ctl, 646 .startup = mvebu_uart_startup, 647 .shutdown = mvebu_uart_shutdown, 648 .set_termios = mvebu_uart_set_termios, 649 .type = mvebu_uart_type, 650 .release_port = mvebu_uart_release_port, 651 .request_port = mvebu_uart_request_port, 652 #ifdef CONFIG_CONSOLE_POLL 653 .poll_get_char = mvebu_uart_get_poll_char, 654 .poll_put_char = mvebu_uart_put_poll_char, 655 #endif 656 }; 657 658 /* Console Driver Operations */ 659 660 #ifdef CONFIG_SERIAL_MVEBU_CONSOLE 661 /* Early Console */ 662 static void mvebu_uart_putc(struct uart_port *port, unsigned char c) 663 { 664 unsigned int st; 665 666 for (;;) { 667 st = readl(port->membase + UART_STAT); 668 if (!(st & STAT_TX_FIFO_FUL)) 669 break; 670 } 671 672 /* At early stage, DT is not parsed yet, only use UART0 */ 673 writel(c, port->membase + UART_STD_TSH); 674 675 for (;;) { 676 st = readl(port->membase + UART_STAT); 677 if (st & STAT_TX_FIFO_EMP) 678 break; 679 } 680 } 681 682 static void mvebu_uart_putc_early_write(struct console *con, 683 const char *s, 684 unsigned int n) 685 { 686 struct earlycon_device *dev = con->data; 687 688 uart_console_write(&dev->port, s, n, mvebu_uart_putc); 689 } 690 691 static int __init 692 mvebu_uart_early_console_setup(struct earlycon_device *device, 693 const char *opt) 694 { 695 if (!device->port.membase) 696 return -ENODEV; 697 698 device->con->write = mvebu_uart_putc_early_write; 699 700 return 0; 701 } 702 703 EARLYCON_DECLARE(ar3700_uart, mvebu_uart_early_console_setup); 704 OF_EARLYCON_DECLARE(ar3700_uart, "marvell,armada-3700-uart", 705 mvebu_uart_early_console_setup); 706 707 static void wait_for_xmitr(struct uart_port *port) 708 { 709 u32 val; 710 711 readl_poll_timeout_atomic(port->membase + UART_STAT, val, 712 (val & STAT_TX_RDY(port)), 1, 10000); 713 } 714 715 static void wait_for_xmite(struct uart_port *port) 716 { 717 u32 val; 718 719 readl_poll_timeout_atomic(port->membase + UART_STAT, val, 720 (val & STAT_TX_EMP), 1, 10000); 721 } 722 723 static void mvebu_uart_console_putchar(struct uart_port *port, unsigned char ch) 724 { 725 wait_for_xmitr(port); 726 writel(ch, port->membase + UART_TSH(port)); 727 } 728 729 static void mvebu_uart_console_write(struct console *co, const char *s, 730 unsigned int count) 731 { 732 struct uart_port *port = &mvebu_uart_ports[co->index]; 733 unsigned long flags; 734 unsigned int ier, intr, ctl; 735 int locked = 1; 736 737 if (oops_in_progress) 738 locked = spin_trylock_irqsave(&port->lock, flags); 739 else 740 spin_lock_irqsave(&port->lock, flags); 741 742 ier = readl(port->membase + UART_CTRL(port)) & CTRL_BRK_INT; 743 intr = readl(port->membase + UART_INTR(port)) & 744 (CTRL_RX_RDY_INT(port) | CTRL_TX_RDY_INT(port)); 745 writel(0, port->membase + UART_CTRL(port)); 746 writel(0, port->membase + UART_INTR(port)); 747 748 uart_console_write(port, s, count, mvebu_uart_console_putchar); 749 750 wait_for_xmite(port); 751 752 if (ier) 753 writel(ier, port->membase + UART_CTRL(port)); 754 755 if (intr) { 756 ctl = intr | readl(port->membase + UART_INTR(port)); 757 writel(ctl, port->membase + UART_INTR(port)); 758 } 759 760 if (locked) 761 spin_unlock_irqrestore(&port->lock, flags); 762 } 763 764 static int mvebu_uart_console_setup(struct console *co, char *options) 765 { 766 struct uart_port *port; 767 int baud = 9600; 768 int bits = 8; 769 int parity = 'n'; 770 int flow = 'n'; 771 772 if (co->index < 0 || co->index >= MVEBU_NR_UARTS) 773 return -EINVAL; 774 775 port = &mvebu_uart_ports[co->index]; 776 777 if (!port->mapbase || !port->membase) { 778 pr_debug("console on ttyMV%i not present\n", co->index); 779 return -ENODEV; 780 } 781 782 if (options) 783 uart_parse_options(options, &baud, &parity, &bits, &flow); 784 785 return uart_set_options(port, co, baud, parity, bits, flow); 786 } 787 788 static struct uart_driver mvebu_uart_driver; 789 790 static struct console mvebu_uart_console = { 791 .name = "ttyMV", 792 .write = mvebu_uart_console_write, 793 .device = uart_console_device, 794 .setup = mvebu_uart_console_setup, 795 .flags = CON_PRINTBUFFER, 796 .index = -1, 797 .data = &mvebu_uart_driver, 798 }; 799 800 static int __init mvebu_uart_console_init(void) 801 { 802 register_console(&mvebu_uart_console); 803 return 0; 804 } 805 806 console_initcall(mvebu_uart_console_init); 807 808 809 #endif /* CONFIG_SERIAL_MVEBU_CONSOLE */ 810 811 static struct uart_driver mvebu_uart_driver = { 812 .owner = THIS_MODULE, 813 .driver_name = DRIVER_NAME, 814 .dev_name = "ttyMV", 815 .nr = MVEBU_NR_UARTS, 816 #ifdef CONFIG_SERIAL_MVEBU_CONSOLE 817 .cons = &mvebu_uart_console, 818 #endif 819 }; 820 821 #if defined(CONFIG_PM) 822 static int mvebu_uart_suspend(struct device *dev) 823 { 824 struct mvebu_uart *mvuart = dev_get_drvdata(dev); 825 struct uart_port *port = mvuart->port; 826 unsigned long flags; 827 828 uart_suspend_port(&mvebu_uart_driver, port); 829 830 mvuart->pm_regs.rbr = readl(port->membase + UART_RBR(port)); 831 mvuart->pm_regs.tsh = readl(port->membase + UART_TSH(port)); 832 mvuart->pm_regs.ctrl = readl(port->membase + UART_CTRL(port)); 833 mvuart->pm_regs.intr = readl(port->membase + UART_INTR(port)); 834 mvuart->pm_regs.stat = readl(port->membase + UART_STAT); 835 spin_lock_irqsave(&mvebu_uart_lock, flags); 836 mvuart->pm_regs.brdv = readl(port->membase + UART_BRDV); 837 spin_unlock_irqrestore(&mvebu_uart_lock, flags); 838 mvuart->pm_regs.osamp = readl(port->membase + UART_OSAMP); 839 840 device_set_wakeup_enable(dev, true); 841 842 return 0; 843 } 844 845 static int mvebu_uart_resume(struct device *dev) 846 { 847 struct mvebu_uart *mvuart = dev_get_drvdata(dev); 848 struct uart_port *port = mvuart->port; 849 unsigned long flags; 850 851 writel(mvuart->pm_regs.rbr, port->membase + UART_RBR(port)); 852 writel(mvuart->pm_regs.tsh, port->membase + UART_TSH(port)); 853 writel(mvuart->pm_regs.ctrl, port->membase + UART_CTRL(port)); 854 writel(mvuart->pm_regs.intr, port->membase + UART_INTR(port)); 855 writel(mvuart->pm_regs.stat, port->membase + UART_STAT); 856 spin_lock_irqsave(&mvebu_uart_lock, flags); 857 writel(mvuart->pm_regs.brdv, port->membase + UART_BRDV); 858 spin_unlock_irqrestore(&mvebu_uart_lock, flags); 859 writel(mvuart->pm_regs.osamp, port->membase + UART_OSAMP); 860 861 uart_resume_port(&mvebu_uart_driver, port); 862 863 return 0; 864 } 865 866 static const struct dev_pm_ops mvebu_uart_pm_ops = { 867 .suspend = mvebu_uart_suspend, 868 .resume = mvebu_uart_resume, 869 }; 870 #endif /* CONFIG_PM */ 871 872 static const struct of_device_id mvebu_uart_of_match[]; 873 874 /* Counter to keep track of each UART port id when not using CONFIG_OF */ 875 static int uart_num_counter; 876 877 static int mvebu_uart_probe(struct platform_device *pdev) 878 { 879 struct resource *reg = platform_get_resource(pdev, IORESOURCE_MEM, 0); 880 const struct of_device_id *match = of_match_device(mvebu_uart_of_match, 881 &pdev->dev); 882 struct uart_port *port; 883 struct mvebu_uart *mvuart; 884 int id, irq; 885 886 if (!reg) { 887 dev_err(&pdev->dev, "no registers defined\n"); 888 return -EINVAL; 889 } 890 891 /* Assume that all UART ports have a DT alias or none has */ 892 id = of_alias_get_id(pdev->dev.of_node, "serial"); 893 if (!pdev->dev.of_node || id < 0) 894 pdev->id = uart_num_counter++; 895 else 896 pdev->id = id; 897 898 if (pdev->id >= MVEBU_NR_UARTS) { 899 dev_err(&pdev->dev, "cannot have more than %d UART ports\n", 900 MVEBU_NR_UARTS); 901 return -EINVAL; 902 } 903 904 port = &mvebu_uart_ports[pdev->id]; 905 906 spin_lock_init(&port->lock); 907 908 port->dev = &pdev->dev; 909 port->type = PORT_MVEBU; 910 port->ops = &mvebu_uart_ops; 911 port->regshift = 0; 912 913 port->fifosize = 32; 914 port->iotype = UPIO_MEM32; 915 port->flags = UPF_FIXED_PORT; 916 port->line = pdev->id; 917 918 /* 919 * IRQ number is not stored in this structure because we may have two of 920 * them per port (RX and TX). Instead, use the driver UART structure 921 * array so called ->irq[]. 922 */ 923 port->irq = 0; 924 port->irqflags = 0; 925 port->mapbase = reg->start; 926 927 port->membase = devm_ioremap_resource(&pdev->dev, reg); 928 if (IS_ERR(port->membase)) 929 return PTR_ERR(port->membase); 930 931 mvuart = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart), 932 GFP_KERNEL); 933 if (!mvuart) 934 return -ENOMEM; 935 936 /* Get controller data depending on the compatible string */ 937 mvuart->data = (struct mvebu_uart_driver_data *)match->data; 938 mvuart->port = port; 939 940 port->private_data = mvuart; 941 platform_set_drvdata(pdev, mvuart); 942 943 /* Get fixed clock frequency */ 944 mvuart->clk = devm_clk_get(&pdev->dev, NULL); 945 if (IS_ERR(mvuart->clk)) { 946 if (PTR_ERR(mvuart->clk) == -EPROBE_DEFER) 947 return PTR_ERR(mvuart->clk); 948 949 if (IS_EXTENDED(port)) { 950 dev_err(&pdev->dev, "unable to get UART clock\n"); 951 return PTR_ERR(mvuart->clk); 952 } 953 } else { 954 if (!clk_prepare_enable(mvuart->clk)) 955 port->uartclk = clk_get_rate(mvuart->clk); 956 } 957 958 /* Manage interrupts */ 959 if (platform_irq_count(pdev) == 1) { 960 /* Old bindings: no name on the single unamed UART0 IRQ */ 961 irq = platform_get_irq(pdev, 0); 962 if (irq < 0) 963 return irq; 964 965 mvuart->irq[UART_IRQ_SUM] = irq; 966 } else { 967 /* 968 * New bindings: named interrupts (RX, TX) for both UARTS, 969 * only make use of uart-rx and uart-tx interrupts, do not use 970 * uart-sum of UART0 port. 971 */ 972 irq = platform_get_irq_byname(pdev, "uart-rx"); 973 if (irq < 0) 974 return irq; 975 976 mvuart->irq[UART_RX_IRQ] = irq; 977 978 irq = platform_get_irq_byname(pdev, "uart-tx"); 979 if (irq < 0) 980 return irq; 981 982 mvuart->irq[UART_TX_IRQ] = irq; 983 } 984 985 /* UART Soft Reset*/ 986 writel(CTRL_SOFT_RST, port->membase + UART_CTRL(port)); 987 udelay(1); 988 writel(0, port->membase + UART_CTRL(port)); 989 990 return uart_add_one_port(&mvebu_uart_driver, port); 991 } 992 993 static struct mvebu_uart_driver_data uart_std_driver_data = { 994 .is_ext = false, 995 .regs.rbr = UART_STD_RBR, 996 .regs.tsh = UART_STD_TSH, 997 .regs.ctrl = UART_STD_CTRL1, 998 .regs.intr = UART_STD_CTRL2, 999 .flags.ctrl_tx_rdy_int = CTRL_STD_TX_RDY_INT, 1000 .flags.ctrl_rx_rdy_int = CTRL_STD_RX_RDY_INT, 1001 .flags.stat_tx_rdy = STAT_STD_TX_RDY, 1002 .flags.stat_rx_rdy = STAT_STD_RX_RDY, 1003 }; 1004 1005 static struct mvebu_uart_driver_data uart_ext_driver_data = { 1006 .is_ext = true, 1007 .regs.rbr = UART_EXT_RBR, 1008 .regs.tsh = UART_EXT_TSH, 1009 .regs.ctrl = UART_EXT_CTRL1, 1010 .regs.intr = UART_EXT_CTRL2, 1011 .flags.ctrl_tx_rdy_int = CTRL_EXT_TX_RDY_INT, 1012 .flags.ctrl_rx_rdy_int = CTRL_EXT_RX_RDY_INT, 1013 .flags.stat_tx_rdy = STAT_EXT_TX_RDY, 1014 .flags.stat_rx_rdy = STAT_EXT_RX_RDY, 1015 }; 1016 1017 /* Match table for of_platform binding */ 1018 static const struct of_device_id mvebu_uart_of_match[] = { 1019 { 1020 .compatible = "marvell,armada-3700-uart", 1021 .data = (void *)&uart_std_driver_data, 1022 }, 1023 { 1024 .compatible = "marvell,armada-3700-uart-ext", 1025 .data = (void *)&uart_ext_driver_data, 1026 }, 1027 {} 1028 }; 1029 1030 static struct platform_driver mvebu_uart_platform_driver = { 1031 .probe = mvebu_uart_probe, 1032 .driver = { 1033 .name = "mvebu-uart", 1034 .of_match_table = of_match_ptr(mvebu_uart_of_match), 1035 .suppress_bind_attrs = true, 1036 #if defined(CONFIG_PM) 1037 .pm = &mvebu_uart_pm_ops, 1038 #endif /* CONFIG_PM */ 1039 }, 1040 }; 1041 1042 /* This code is based on clk-fixed-factor.c driver and modified. */ 1043 1044 struct mvebu_uart_clock { 1045 struct clk_hw clk_hw; 1046 int clock_idx; 1047 u32 pm_context_reg1; 1048 u32 pm_context_reg2; 1049 }; 1050 1051 struct mvebu_uart_clock_base { 1052 struct mvebu_uart_clock clocks[2]; 1053 unsigned int parent_rates[5]; 1054 int parent_idx; 1055 unsigned int div; 1056 void __iomem *reg1; 1057 void __iomem *reg2; 1058 bool configured; 1059 }; 1060 1061 #define PARENT_CLOCK_XTAL 4 1062 1063 #define to_uart_clock(hw) container_of(hw, struct mvebu_uart_clock, clk_hw) 1064 #define to_uart_clock_base(uart_clock) container_of(uart_clock, \ 1065 struct mvebu_uart_clock_base, clocks[uart_clock->clock_idx]) 1066 1067 static int mvebu_uart_clock_prepare(struct clk_hw *hw) 1068 { 1069 struct mvebu_uart_clock *uart_clock = to_uart_clock(hw); 1070 struct mvebu_uart_clock_base *uart_clock_base = 1071 to_uart_clock_base(uart_clock); 1072 unsigned int prev_clock_idx, prev_clock_rate, prev_d1d2; 1073 unsigned int parent_clock_idx, parent_clock_rate; 1074 unsigned long flags; 1075 unsigned int d1, d2; 1076 u64 divisor; 1077 u32 val; 1078 1079 /* 1080 * This function just reconfigures UART Clock Control register (located 1081 * in UART1 address space which controls both UART1 and UART2) to 1082 * selected UART base clock and recalculates current UART1/UART2 1083 * divisors in their address spaces, so that final baudrate will not be 1084 * changed by switching UART parent clock. This is required for 1085 * otherwise kernel's boot log stops working - we need to ensure that 1086 * UART baudrate does not change during this setup. It is a one time 1087 * operation, it will execute only once and set `configured` to true, 1088 * and be skipped on subsequent calls. Because this UART Clock Control 1089 * register (UART_BRDV) is shared between UART1 baudrate function, 1090 * UART1 clock selector and UART2 clock selector, every access to 1091 * UART_BRDV (reg1) needs to be protected by a lock. 1092 */ 1093 1094 spin_lock_irqsave(&mvebu_uart_lock, flags); 1095 1096 if (uart_clock_base->configured) { 1097 spin_unlock_irqrestore(&mvebu_uart_lock, flags); 1098 return 0; 1099 } 1100 1101 parent_clock_idx = uart_clock_base->parent_idx; 1102 parent_clock_rate = uart_clock_base->parent_rates[parent_clock_idx]; 1103 1104 val = readl(uart_clock_base->reg1); 1105 1106 if (uart_clock_base->div > CLK_TBG_DIV1_MAX) { 1107 d1 = CLK_TBG_DIV1_MAX; 1108 d2 = uart_clock_base->div / CLK_TBG_DIV1_MAX; 1109 } else { 1110 d1 = uart_clock_base->div; 1111 d2 = 1; 1112 } 1113 1114 if (val & CLK_NO_XTAL) { 1115 prev_clock_idx = (val >> CLK_TBG_SEL_SHIFT) & CLK_TBG_SEL_MASK; 1116 prev_d1d2 = ((val >> CLK_TBG_DIV1_SHIFT) & CLK_TBG_DIV1_MASK) * 1117 ((val >> CLK_TBG_DIV2_SHIFT) & CLK_TBG_DIV2_MASK); 1118 } else { 1119 prev_clock_idx = PARENT_CLOCK_XTAL; 1120 prev_d1d2 = 1; 1121 } 1122 1123 /* Note that uart_clock_base->parent_rates[i] may not be available */ 1124 prev_clock_rate = uart_clock_base->parent_rates[prev_clock_idx]; 1125 1126 /* Recalculate UART1 divisor so UART1 baudrate does not change */ 1127 if (prev_clock_rate) { 1128 divisor = DIV_U64_ROUND_CLOSEST((u64)(val & BRDV_BAUD_MASK) * 1129 parent_clock_rate * prev_d1d2, 1130 prev_clock_rate * d1 * d2); 1131 if (divisor < 1) 1132 divisor = 1; 1133 else if (divisor > BRDV_BAUD_MAX) 1134 divisor = BRDV_BAUD_MAX; 1135 val = (val & ~BRDV_BAUD_MASK) | divisor; 1136 } 1137 1138 if (parent_clock_idx != PARENT_CLOCK_XTAL) { 1139 /* Do not use XTAL, select TBG clock and TBG d1 * d2 divisors */ 1140 val |= CLK_NO_XTAL; 1141 val &= ~(CLK_TBG_DIV1_MASK << CLK_TBG_DIV1_SHIFT); 1142 val |= d1 << CLK_TBG_DIV1_SHIFT; 1143 val &= ~(CLK_TBG_DIV2_MASK << CLK_TBG_DIV2_SHIFT); 1144 val |= d2 << CLK_TBG_DIV2_SHIFT; 1145 val &= ~(CLK_TBG_SEL_MASK << CLK_TBG_SEL_SHIFT); 1146 val |= parent_clock_idx << CLK_TBG_SEL_SHIFT; 1147 } else { 1148 /* Use XTAL, TBG bits are then ignored */ 1149 val &= ~CLK_NO_XTAL; 1150 } 1151 1152 writel(val, uart_clock_base->reg1); 1153 1154 /* Recalculate UART2 divisor so UART2 baudrate does not change */ 1155 if (prev_clock_rate) { 1156 val = readl(uart_clock_base->reg2); 1157 divisor = DIV_U64_ROUND_CLOSEST((u64)(val & BRDV_BAUD_MASK) * 1158 parent_clock_rate * prev_d1d2, 1159 prev_clock_rate * d1 * d2); 1160 if (divisor < 1) 1161 divisor = 1; 1162 else if (divisor > BRDV_BAUD_MAX) 1163 divisor = BRDV_BAUD_MAX; 1164 val = (val & ~BRDV_BAUD_MASK) | divisor; 1165 writel(val, uart_clock_base->reg2); 1166 } 1167 1168 uart_clock_base->configured = true; 1169 1170 spin_unlock_irqrestore(&mvebu_uart_lock, flags); 1171 1172 return 0; 1173 } 1174 1175 static int mvebu_uart_clock_enable(struct clk_hw *hw) 1176 { 1177 struct mvebu_uart_clock *uart_clock = to_uart_clock(hw); 1178 struct mvebu_uart_clock_base *uart_clock_base = 1179 to_uart_clock_base(uart_clock); 1180 unsigned long flags; 1181 u32 val; 1182 1183 spin_lock_irqsave(&mvebu_uart_lock, flags); 1184 1185 val = readl(uart_clock_base->reg1); 1186 1187 if (uart_clock->clock_idx == 0) 1188 val &= ~UART1_CLK_DIS; 1189 else 1190 val &= ~UART2_CLK_DIS; 1191 1192 writel(val, uart_clock_base->reg1); 1193 1194 spin_unlock_irqrestore(&mvebu_uart_lock, flags); 1195 1196 return 0; 1197 } 1198 1199 static void mvebu_uart_clock_disable(struct clk_hw *hw) 1200 { 1201 struct mvebu_uart_clock *uart_clock = to_uart_clock(hw); 1202 struct mvebu_uart_clock_base *uart_clock_base = 1203 to_uart_clock_base(uart_clock); 1204 unsigned long flags; 1205 u32 val; 1206 1207 spin_lock_irqsave(&mvebu_uart_lock, flags); 1208 1209 val = readl(uart_clock_base->reg1); 1210 1211 if (uart_clock->clock_idx == 0) 1212 val |= UART1_CLK_DIS; 1213 else 1214 val |= UART2_CLK_DIS; 1215 1216 writel(val, uart_clock_base->reg1); 1217 1218 spin_unlock_irqrestore(&mvebu_uart_lock, flags); 1219 } 1220 1221 static int mvebu_uart_clock_is_enabled(struct clk_hw *hw) 1222 { 1223 struct mvebu_uart_clock *uart_clock = to_uart_clock(hw); 1224 struct mvebu_uart_clock_base *uart_clock_base = 1225 to_uart_clock_base(uart_clock); 1226 u32 val; 1227 1228 val = readl(uart_clock_base->reg1); 1229 1230 if (uart_clock->clock_idx == 0) 1231 return !(val & UART1_CLK_DIS); 1232 else 1233 return !(val & UART2_CLK_DIS); 1234 } 1235 1236 static int mvebu_uart_clock_save_context(struct clk_hw *hw) 1237 { 1238 struct mvebu_uart_clock *uart_clock = to_uart_clock(hw); 1239 struct mvebu_uart_clock_base *uart_clock_base = 1240 to_uart_clock_base(uart_clock); 1241 unsigned long flags; 1242 1243 spin_lock_irqsave(&mvebu_uart_lock, flags); 1244 uart_clock->pm_context_reg1 = readl(uart_clock_base->reg1); 1245 uart_clock->pm_context_reg2 = readl(uart_clock_base->reg2); 1246 spin_unlock_irqrestore(&mvebu_uart_lock, flags); 1247 1248 return 0; 1249 } 1250 1251 static void mvebu_uart_clock_restore_context(struct clk_hw *hw) 1252 { 1253 struct mvebu_uart_clock *uart_clock = to_uart_clock(hw); 1254 struct mvebu_uart_clock_base *uart_clock_base = 1255 to_uart_clock_base(uart_clock); 1256 unsigned long flags; 1257 1258 spin_lock_irqsave(&mvebu_uart_lock, flags); 1259 writel(uart_clock->pm_context_reg1, uart_clock_base->reg1); 1260 writel(uart_clock->pm_context_reg2, uart_clock_base->reg2); 1261 spin_unlock_irqrestore(&mvebu_uart_lock, flags); 1262 } 1263 1264 static unsigned long mvebu_uart_clock_recalc_rate(struct clk_hw *hw, 1265 unsigned long parent_rate) 1266 { 1267 struct mvebu_uart_clock *uart_clock = to_uart_clock(hw); 1268 struct mvebu_uart_clock_base *uart_clock_base = 1269 to_uart_clock_base(uart_clock); 1270 1271 return parent_rate / uart_clock_base->div; 1272 } 1273 1274 static long mvebu_uart_clock_round_rate(struct clk_hw *hw, unsigned long rate, 1275 unsigned long *parent_rate) 1276 { 1277 struct mvebu_uart_clock *uart_clock = to_uart_clock(hw); 1278 struct mvebu_uart_clock_base *uart_clock_base = 1279 to_uart_clock_base(uart_clock); 1280 1281 return *parent_rate / uart_clock_base->div; 1282 } 1283 1284 static int mvebu_uart_clock_set_rate(struct clk_hw *hw, unsigned long rate, 1285 unsigned long parent_rate) 1286 { 1287 /* 1288 * We must report success but we can do so unconditionally because 1289 * mvebu_uart_clock_round_rate returns values that ensure this call is a 1290 * nop. 1291 */ 1292 1293 return 0; 1294 } 1295 1296 static const struct clk_ops mvebu_uart_clock_ops = { 1297 .prepare = mvebu_uart_clock_prepare, 1298 .enable = mvebu_uart_clock_enable, 1299 .disable = mvebu_uart_clock_disable, 1300 .is_enabled = mvebu_uart_clock_is_enabled, 1301 .save_context = mvebu_uart_clock_save_context, 1302 .restore_context = mvebu_uart_clock_restore_context, 1303 .round_rate = mvebu_uart_clock_round_rate, 1304 .set_rate = mvebu_uart_clock_set_rate, 1305 .recalc_rate = mvebu_uart_clock_recalc_rate, 1306 }; 1307 1308 static int mvebu_uart_clock_register(struct device *dev, 1309 struct mvebu_uart_clock *uart_clock, 1310 const char *name, 1311 const char *parent_name) 1312 { 1313 struct clk_init_data init = { }; 1314 1315 uart_clock->clk_hw.init = &init; 1316 1317 init.name = name; 1318 init.ops = &mvebu_uart_clock_ops; 1319 init.flags = 0; 1320 init.num_parents = 1; 1321 init.parent_names = &parent_name; 1322 1323 return devm_clk_hw_register(dev, &uart_clock->clk_hw); 1324 } 1325 1326 static int mvebu_uart_clock_probe(struct platform_device *pdev) 1327 { 1328 static const char *const uart_clk_names[] = { "uart_1", "uart_2" }; 1329 static const char *const parent_clk_names[] = { "TBG-A-P", "TBG-B-P", 1330 "TBG-A-S", "TBG-B-S", 1331 "xtal" }; 1332 struct clk *parent_clks[ARRAY_SIZE(parent_clk_names)]; 1333 struct mvebu_uart_clock_base *uart_clock_base; 1334 struct clk_hw_onecell_data *hw_clk_data; 1335 struct device *dev = &pdev->dev; 1336 int i, parent_clk_idx, ret; 1337 unsigned long div, rate; 1338 struct resource *res; 1339 unsigned int d1, d2; 1340 1341 BUILD_BUG_ON(ARRAY_SIZE(uart_clk_names) != 1342 ARRAY_SIZE(uart_clock_base->clocks)); 1343 BUILD_BUG_ON(ARRAY_SIZE(parent_clk_names) != 1344 ARRAY_SIZE(uart_clock_base->parent_rates)); 1345 1346 uart_clock_base = devm_kzalloc(dev, 1347 sizeof(*uart_clock_base), 1348 GFP_KERNEL); 1349 if (!uart_clock_base) 1350 return -ENOMEM; 1351 1352 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1353 if (!res) { 1354 dev_err(dev, "Couldn't get first register\n"); 1355 return -ENOENT; 1356 } 1357 1358 /* 1359 * UART Clock Control register (reg1 / UART_BRDV) is in the address 1360 * space of UART1 (standard UART variant), controls parent clock and 1361 * dividers for both UART1 and UART2 and is supplied via DT as the first 1362 * resource. Therefore use ioremap() rather than ioremap_resource() to 1363 * avoid conflicts with UART1 driver. Access to UART_BRDV is protected 1364 * by a lock shared between clock and UART driver. 1365 */ 1366 uart_clock_base->reg1 = devm_ioremap(dev, res->start, 1367 resource_size(res)); 1368 if (!uart_clock_base->reg1) 1369 return -ENOMEM; 1370 1371 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1372 if (!res) { 1373 dev_err(dev, "Couldn't get second register\n"); 1374 return -ENOENT; 1375 } 1376 1377 /* 1378 * UART 2 Baud Rate Divisor register (reg2 / UART_BRDV) is in address 1379 * space of UART2 (extended UART variant), controls only one UART2 1380 * specific divider and is supplied via DT as second resource. 1381 * Therefore use ioremap() rather than ioremap_resource() to avoid 1382 * conflicts with UART2 driver. Access to UART_BRDV is protected by a 1383 * by lock shared between clock and UART driver. 1384 */ 1385 uart_clock_base->reg2 = devm_ioremap(dev, res->start, 1386 resource_size(res)); 1387 if (!uart_clock_base->reg2) 1388 return -ENOMEM; 1389 1390 hw_clk_data = devm_kzalloc(dev, 1391 struct_size(hw_clk_data, hws, 1392 ARRAY_SIZE(uart_clk_names)), 1393 GFP_KERNEL); 1394 if (!hw_clk_data) 1395 return -ENOMEM; 1396 1397 hw_clk_data->num = ARRAY_SIZE(uart_clk_names); 1398 for (i = 0; i < ARRAY_SIZE(uart_clk_names); i++) { 1399 hw_clk_data->hws[i] = &uart_clock_base->clocks[i].clk_hw; 1400 uart_clock_base->clocks[i].clock_idx = i; 1401 } 1402 1403 parent_clk_idx = -1; 1404 1405 for (i = 0; i < ARRAY_SIZE(parent_clk_names); i++) { 1406 parent_clks[i] = devm_clk_get(dev, parent_clk_names[i]); 1407 if (IS_ERR(parent_clks[i])) { 1408 if (PTR_ERR(parent_clks[i]) == -EPROBE_DEFER) 1409 return -EPROBE_DEFER; 1410 dev_warn(dev, "Couldn't get the parent clock %s: %ld\n", 1411 parent_clk_names[i], PTR_ERR(parent_clks[i])); 1412 continue; 1413 } 1414 1415 ret = clk_prepare_enable(parent_clks[i]); 1416 if (ret) { 1417 dev_warn(dev, "Couldn't enable parent clock %s: %d\n", 1418 parent_clk_names[i], ret); 1419 continue; 1420 } 1421 rate = clk_get_rate(parent_clks[i]); 1422 uart_clock_base->parent_rates[i] = rate; 1423 1424 if (i != PARENT_CLOCK_XTAL) { 1425 /* 1426 * Calculate the smallest TBG d1 and d2 divisors that 1427 * still can provide 9600 baudrate. 1428 */ 1429 d1 = DIV_ROUND_UP(rate, 9600 * OSAMP_MAX_DIVISOR * 1430 BRDV_BAUD_MAX); 1431 if (d1 < 1) 1432 d1 = 1; 1433 else if (d1 > CLK_TBG_DIV1_MAX) 1434 d1 = CLK_TBG_DIV1_MAX; 1435 1436 d2 = DIV_ROUND_UP(rate, 9600 * OSAMP_MAX_DIVISOR * 1437 BRDV_BAUD_MAX * d1); 1438 if (d2 < 1) 1439 d2 = 1; 1440 else if (d2 > CLK_TBG_DIV2_MAX) 1441 d2 = CLK_TBG_DIV2_MAX; 1442 } else { 1443 /* 1444 * When UART clock uses XTAL clock as a source then it 1445 * is not possible to use d1 and d2 divisors. 1446 */ 1447 d1 = d2 = 1; 1448 } 1449 1450 /* Skip clock source which cannot provide 9600 baudrate */ 1451 if (rate > 9600 * OSAMP_MAX_DIVISOR * BRDV_BAUD_MAX * d1 * d2) 1452 continue; 1453 1454 /* 1455 * Choose TBG clock source with the smallest divisors. Use XTAL 1456 * clock source only in case TBG is not available as XTAL cannot 1457 * be used for baudrates higher than 230400. 1458 */ 1459 if (parent_clk_idx == -1 || 1460 (i != PARENT_CLOCK_XTAL && div > d1 * d2)) { 1461 parent_clk_idx = i; 1462 div = d1 * d2; 1463 } 1464 } 1465 1466 for (i = 0; i < ARRAY_SIZE(parent_clk_names); i++) { 1467 if (i == parent_clk_idx || IS_ERR(parent_clks[i])) 1468 continue; 1469 clk_disable_unprepare(parent_clks[i]); 1470 devm_clk_put(dev, parent_clks[i]); 1471 } 1472 1473 if (parent_clk_idx == -1) { 1474 dev_err(dev, "No usable parent clock\n"); 1475 return -ENOENT; 1476 } 1477 1478 uart_clock_base->parent_idx = parent_clk_idx; 1479 uart_clock_base->div = div; 1480 1481 dev_notice(dev, "Using parent clock %s as base UART clock\n", 1482 __clk_get_name(parent_clks[parent_clk_idx])); 1483 1484 for (i = 0; i < ARRAY_SIZE(uart_clk_names); i++) { 1485 ret = mvebu_uart_clock_register(dev, 1486 &uart_clock_base->clocks[i], 1487 uart_clk_names[i], 1488 __clk_get_name(parent_clks[parent_clk_idx])); 1489 if (ret) { 1490 dev_err(dev, "Can't register UART clock %d: %d\n", 1491 i, ret); 1492 return ret; 1493 } 1494 } 1495 1496 return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, 1497 hw_clk_data); 1498 } 1499 1500 static const struct of_device_id mvebu_uart_clock_of_match[] = { 1501 { .compatible = "marvell,armada-3700-uart-clock", }, 1502 { } 1503 }; 1504 1505 static struct platform_driver mvebu_uart_clock_platform_driver = { 1506 .probe = mvebu_uart_clock_probe, 1507 .driver = { 1508 .name = "mvebu-uart-clock", 1509 .of_match_table = mvebu_uart_clock_of_match, 1510 }, 1511 }; 1512 1513 static int __init mvebu_uart_init(void) 1514 { 1515 int ret; 1516 1517 ret = uart_register_driver(&mvebu_uart_driver); 1518 if (ret) 1519 return ret; 1520 1521 ret = platform_driver_register(&mvebu_uart_clock_platform_driver); 1522 if (ret) { 1523 uart_unregister_driver(&mvebu_uart_driver); 1524 return ret; 1525 } 1526 1527 ret = platform_driver_register(&mvebu_uart_platform_driver); 1528 if (ret) { 1529 platform_driver_unregister(&mvebu_uart_clock_platform_driver); 1530 uart_unregister_driver(&mvebu_uart_driver); 1531 return ret; 1532 } 1533 1534 return 0; 1535 } 1536 arch_initcall(mvebu_uart_init); 1537