1 /* 2 * SuperH on-chip serial module support. (SCI with no FIFO / with FIFO) 3 * 4 * Copyright (C) 2002 - 2011 Paul Mundt 5 * Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007). 6 * 7 * based off of the old drivers/char/sh-sci.c by: 8 * 9 * Copyright (C) 1999, 2000 Niibe Yutaka 10 * Copyright (C) 2000 Sugioka Toshinobu 11 * Modified to support multiple serial ports. Stuart Menefy (May 2000). 12 * Modified to support SecureEdge. David McCullough (2002) 13 * Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003). 14 * Removed SH7300 support (Jul 2007). 15 * 16 * This file is subject to the terms and conditions of the GNU General Public 17 * License. See the file "COPYING" in the main directory of this archive 18 * for more details. 19 */ 20 #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 21 #define SUPPORT_SYSRQ 22 #endif 23 24 #undef DEBUG 25 26 #include <linux/module.h> 27 #include <linux/errno.h> 28 #include <linux/timer.h> 29 #include <linux/interrupt.h> 30 #include <linux/tty.h> 31 #include <linux/tty_flip.h> 32 #include <linux/serial.h> 33 #include <linux/major.h> 34 #include <linux/string.h> 35 #include <linux/sysrq.h> 36 #include <linux/ioport.h> 37 #include <linux/mm.h> 38 #include <linux/init.h> 39 #include <linux/delay.h> 40 #include <linux/console.h> 41 #include <linux/platform_device.h> 42 #include <linux/serial_sci.h> 43 #include <linux/notifier.h> 44 #include <linux/pm_runtime.h> 45 #include <linux/cpufreq.h> 46 #include <linux/clk.h> 47 #include <linux/ctype.h> 48 #include <linux/err.h> 49 #include <linux/dmaengine.h> 50 #include <linux/scatterlist.h> 51 #include <linux/slab.h> 52 53 #ifdef CONFIG_SUPERH 54 #include <asm/sh_bios.h> 55 #endif 56 57 #include "sh-sci.h" 58 59 struct sci_port { 60 struct uart_port port; 61 62 /* Platform configuration */ 63 struct plat_sci_port *cfg; 64 65 /* Port enable callback */ 66 void (*enable)(struct uart_port *port); 67 68 /* Port disable callback */ 69 void (*disable)(struct uart_port *port); 70 71 /* Break timer */ 72 struct timer_list break_timer; 73 int break_flag; 74 75 /* Interface clock */ 76 struct clk *iclk; 77 /* Function clock */ 78 struct clk *fclk; 79 80 struct dma_chan *chan_tx; 81 struct dma_chan *chan_rx; 82 83 #ifdef CONFIG_SERIAL_SH_SCI_DMA 84 struct dma_async_tx_descriptor *desc_tx; 85 struct dma_async_tx_descriptor *desc_rx[2]; 86 dma_cookie_t cookie_tx; 87 dma_cookie_t cookie_rx[2]; 88 dma_cookie_t active_rx; 89 struct scatterlist sg_tx; 90 unsigned int sg_len_tx; 91 struct scatterlist sg_rx[2]; 92 size_t buf_len_rx; 93 struct sh_dmae_slave param_tx; 94 struct sh_dmae_slave param_rx; 95 struct work_struct work_tx; 96 struct work_struct work_rx; 97 struct timer_list rx_timer; 98 unsigned int rx_timeout; 99 #endif 100 101 struct notifier_block freq_transition; 102 }; 103 104 /* Function prototypes */ 105 static void sci_start_tx(struct uart_port *port); 106 static void sci_stop_tx(struct uart_port *port); 107 static void sci_start_rx(struct uart_port *port); 108 109 #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS 110 111 static struct sci_port sci_ports[SCI_NPORTS]; 112 static struct uart_driver sci_uart_driver; 113 114 static inline struct sci_port * 115 to_sci_port(struct uart_port *uart) 116 { 117 return container_of(uart, struct sci_port, port); 118 } 119 120 struct plat_sci_reg { 121 u8 offset, size; 122 }; 123 124 /* Helper for invalidating specific entries of an inherited map. */ 125 #define sci_reg_invalid { .offset = 0, .size = 0 } 126 127 static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = { 128 [SCIx_PROBE_REGTYPE] = { 129 [0 ... SCIx_NR_REGS - 1] = sci_reg_invalid, 130 }, 131 132 /* 133 * Common SCI definitions, dependent on the port's regshift 134 * value. 135 */ 136 [SCIx_SCI_REGTYPE] = { 137 [SCSMR] = { 0x00, 8 }, 138 [SCBRR] = { 0x01, 8 }, 139 [SCSCR] = { 0x02, 8 }, 140 [SCxTDR] = { 0x03, 8 }, 141 [SCxSR] = { 0x04, 8 }, 142 [SCxRDR] = { 0x05, 8 }, 143 [SCFCR] = sci_reg_invalid, 144 [SCFDR] = sci_reg_invalid, 145 [SCTFDR] = sci_reg_invalid, 146 [SCRFDR] = sci_reg_invalid, 147 [SCSPTR] = sci_reg_invalid, 148 [SCLSR] = sci_reg_invalid, 149 }, 150 151 /* 152 * Common definitions for legacy IrDA ports, dependent on 153 * regshift value. 154 */ 155 [SCIx_IRDA_REGTYPE] = { 156 [SCSMR] = { 0x00, 8 }, 157 [SCBRR] = { 0x01, 8 }, 158 [SCSCR] = { 0x02, 8 }, 159 [SCxTDR] = { 0x03, 8 }, 160 [SCxSR] = { 0x04, 8 }, 161 [SCxRDR] = { 0x05, 8 }, 162 [SCFCR] = { 0x06, 8 }, 163 [SCFDR] = { 0x07, 16 }, 164 [SCTFDR] = sci_reg_invalid, 165 [SCRFDR] = sci_reg_invalid, 166 [SCSPTR] = sci_reg_invalid, 167 [SCLSR] = sci_reg_invalid, 168 }, 169 170 /* 171 * Common SCIFA definitions. 172 */ 173 [SCIx_SCIFA_REGTYPE] = { 174 [SCSMR] = { 0x00, 16 }, 175 [SCBRR] = { 0x04, 8 }, 176 [SCSCR] = { 0x08, 16 }, 177 [SCxTDR] = { 0x20, 8 }, 178 [SCxSR] = { 0x14, 16 }, 179 [SCxRDR] = { 0x24, 8 }, 180 [SCFCR] = { 0x18, 16 }, 181 [SCFDR] = { 0x1c, 16 }, 182 [SCTFDR] = sci_reg_invalid, 183 [SCRFDR] = sci_reg_invalid, 184 [SCSPTR] = sci_reg_invalid, 185 [SCLSR] = sci_reg_invalid, 186 }, 187 188 /* 189 * Common SCIFB definitions. 190 */ 191 [SCIx_SCIFB_REGTYPE] = { 192 [SCSMR] = { 0x00, 16 }, 193 [SCBRR] = { 0x04, 8 }, 194 [SCSCR] = { 0x08, 16 }, 195 [SCxTDR] = { 0x40, 8 }, 196 [SCxSR] = { 0x14, 16 }, 197 [SCxRDR] = { 0x60, 8 }, 198 [SCFCR] = { 0x18, 16 }, 199 [SCFDR] = { 0x1c, 16 }, 200 [SCTFDR] = sci_reg_invalid, 201 [SCRFDR] = sci_reg_invalid, 202 [SCSPTR] = sci_reg_invalid, 203 [SCLSR] = sci_reg_invalid, 204 }, 205 206 /* 207 * Common SH-3 SCIF definitions. 208 */ 209 [SCIx_SH3_SCIF_REGTYPE] = { 210 [SCSMR] = { 0x00, 8 }, 211 [SCBRR] = { 0x02, 8 }, 212 [SCSCR] = { 0x04, 8 }, 213 [SCxTDR] = { 0x06, 8 }, 214 [SCxSR] = { 0x08, 16 }, 215 [SCxRDR] = { 0x0a, 8 }, 216 [SCFCR] = { 0x0c, 8 }, 217 [SCFDR] = { 0x0e, 16 }, 218 [SCTFDR] = sci_reg_invalid, 219 [SCRFDR] = sci_reg_invalid, 220 [SCSPTR] = sci_reg_invalid, 221 [SCLSR] = sci_reg_invalid, 222 }, 223 224 /* 225 * Common SH-4(A) SCIF(B) definitions. 226 */ 227 [SCIx_SH4_SCIF_REGTYPE] = { 228 [SCSMR] = { 0x00, 16 }, 229 [SCBRR] = { 0x04, 8 }, 230 [SCSCR] = { 0x08, 16 }, 231 [SCxTDR] = { 0x0c, 8 }, 232 [SCxSR] = { 0x10, 16 }, 233 [SCxRDR] = { 0x14, 8 }, 234 [SCFCR] = { 0x18, 16 }, 235 [SCFDR] = { 0x1c, 16 }, 236 [SCTFDR] = sci_reg_invalid, 237 [SCRFDR] = sci_reg_invalid, 238 [SCSPTR] = { 0x20, 16 }, 239 [SCLSR] = { 0x24, 16 }, 240 }, 241 242 /* 243 * Common SH-4(A) SCIF(B) definitions for ports without an SCSPTR 244 * register. 245 */ 246 [SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE] = { 247 [SCSMR] = { 0x00, 16 }, 248 [SCBRR] = { 0x04, 8 }, 249 [SCSCR] = { 0x08, 16 }, 250 [SCxTDR] = { 0x0c, 8 }, 251 [SCxSR] = { 0x10, 16 }, 252 [SCxRDR] = { 0x14, 8 }, 253 [SCFCR] = { 0x18, 16 }, 254 [SCFDR] = { 0x1c, 16 }, 255 [SCTFDR] = sci_reg_invalid, 256 [SCRFDR] = sci_reg_invalid, 257 [SCSPTR] = sci_reg_invalid, 258 [SCLSR] = { 0x24, 16 }, 259 }, 260 261 /* 262 * Common SH-4(A) SCIF(B) definitions for ports with FIFO data 263 * count registers. 264 */ 265 [SCIx_SH4_SCIF_FIFODATA_REGTYPE] = { 266 [SCSMR] = { 0x00, 16 }, 267 [SCBRR] = { 0x04, 8 }, 268 [SCSCR] = { 0x08, 16 }, 269 [SCxTDR] = { 0x0c, 8 }, 270 [SCxSR] = { 0x10, 16 }, 271 [SCxRDR] = { 0x14, 8 }, 272 [SCFCR] = { 0x18, 16 }, 273 [SCFDR] = { 0x1c, 16 }, 274 [SCTFDR] = { 0x1c, 16 }, /* aliased to SCFDR */ 275 [SCRFDR] = { 0x20, 16 }, 276 [SCSPTR] = { 0x24, 16 }, 277 [SCLSR] = { 0x28, 16 }, 278 }, 279 280 /* 281 * SH7705-style SCIF(B) ports, lacking both SCSPTR and SCLSR 282 * registers. 283 */ 284 [SCIx_SH7705_SCIF_REGTYPE] = { 285 [SCSMR] = { 0x00, 16 }, 286 [SCBRR] = { 0x04, 8 }, 287 [SCSCR] = { 0x08, 16 }, 288 [SCxTDR] = { 0x20, 8 }, 289 [SCxSR] = { 0x14, 16 }, 290 [SCxRDR] = { 0x24, 8 }, 291 [SCFCR] = { 0x18, 16 }, 292 [SCFDR] = { 0x1c, 16 }, 293 [SCTFDR] = sci_reg_invalid, 294 [SCRFDR] = sci_reg_invalid, 295 [SCSPTR] = sci_reg_invalid, 296 [SCLSR] = sci_reg_invalid, 297 }, 298 }; 299 300 /* 301 * The "offset" here is rather misleading, in that it refers to an enum 302 * value relative to the port mapping rather than the fixed offset 303 * itself, which needs to be manually retrieved from the platform's 304 * register map for the given port. 305 */ 306 static unsigned int sci_serial_in(struct uart_port *p, int offset) 307 { 308 struct sci_port *s = to_sci_port(p); 309 struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + offset; 310 311 if (reg->size == 8) 312 return ioread8(p->membase + (reg->offset << p->regshift)); 313 else if (reg->size == 16) 314 return ioread16(p->membase + (reg->offset << p->regshift)); 315 else 316 WARN(1, "Invalid register access\n"); 317 318 return 0; 319 } 320 321 static void sci_serial_out(struct uart_port *p, int offset, int value) 322 { 323 struct sci_port *s = to_sci_port(p); 324 struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + offset; 325 326 if (reg->size == 8) 327 iowrite8(value, p->membase + (reg->offset << p->regshift)); 328 else if (reg->size == 16) 329 iowrite16(value, p->membase + (reg->offset << p->regshift)); 330 else 331 WARN(1, "Invalid register access\n"); 332 } 333 334 #define sci_in(up, offset) (up->serial_in(up, offset)) 335 #define sci_out(up, offset, value) (up->serial_out(up, offset, value)) 336 337 static int sci_probe_regmap(struct plat_sci_port *cfg) 338 { 339 switch (cfg->type) { 340 case PORT_SCI: 341 cfg->regtype = SCIx_SCI_REGTYPE; 342 break; 343 case PORT_IRDA: 344 cfg->regtype = SCIx_IRDA_REGTYPE; 345 break; 346 case PORT_SCIFA: 347 cfg->regtype = SCIx_SCIFA_REGTYPE; 348 break; 349 case PORT_SCIFB: 350 cfg->regtype = SCIx_SCIFB_REGTYPE; 351 break; 352 case PORT_SCIF: 353 /* 354 * The SH-4 is a bit of a misnomer here, although that's 355 * where this particular port layout originated. This 356 * configuration (or some slight variation thereof) 357 * remains the dominant model for all SCIFs. 358 */ 359 cfg->regtype = SCIx_SH4_SCIF_REGTYPE; 360 break; 361 default: 362 printk(KERN_ERR "Can't probe register map for given port\n"); 363 return -EINVAL; 364 } 365 366 return 0; 367 } 368 369 #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE) 370 371 #ifdef CONFIG_CONSOLE_POLL 372 static int sci_poll_get_char(struct uart_port *port) 373 { 374 unsigned short status; 375 int c; 376 377 do { 378 status = sci_in(port, SCxSR); 379 if (status & SCxSR_ERRORS(port)) { 380 sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port)); 381 continue; 382 } 383 break; 384 } while (1); 385 386 if (!(status & SCxSR_RDxF(port))) 387 return NO_POLL_CHAR; 388 389 c = sci_in(port, SCxRDR); 390 391 /* Dummy read */ 392 sci_in(port, SCxSR); 393 sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port)); 394 395 return c; 396 } 397 #endif 398 399 static void sci_poll_put_char(struct uart_port *port, unsigned char c) 400 { 401 unsigned short status; 402 403 do { 404 status = sci_in(port, SCxSR); 405 } while (!(status & SCxSR_TDxE(port))); 406 407 sci_out(port, SCxTDR, c); 408 sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port)); 409 } 410 #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */ 411 412 static void sci_init_pins(struct uart_port *port, unsigned int cflag) 413 { 414 struct sci_port *s = to_sci_port(port); 415 struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + SCSPTR; 416 417 /* 418 * Use port-specific handler if provided. 419 */ 420 if (s->cfg->ops && s->cfg->ops->init_pins) { 421 s->cfg->ops->init_pins(port, cflag); 422 return; 423 } 424 425 /* 426 * For the generic path SCSPTR is necessary. Bail out if that's 427 * unavailable, too. 428 */ 429 if (!reg->size) 430 return; 431 432 if (!(cflag & CRTSCTS)) 433 sci_out(port, SCSPTR, 0x0080); /* Set RTS = 1 */ 434 } 435 436 #if defined(CONFIG_CPU_SUBTYPE_SH7760) || \ 437 defined(CONFIG_CPU_SUBTYPE_SH7780) || \ 438 defined(CONFIG_CPU_SUBTYPE_SH7785) || \ 439 defined(CONFIG_CPU_SUBTYPE_SH7786) 440 static int scif_txfill(struct uart_port *port) 441 { 442 return sci_in(port, SCTFDR) & 0xff; 443 } 444 445 static int scif_txroom(struct uart_port *port) 446 { 447 return SCIF_TXROOM_MAX - scif_txfill(port); 448 } 449 450 static int scif_rxfill(struct uart_port *port) 451 { 452 return sci_in(port, SCRFDR) & 0xff; 453 } 454 #elif defined(CONFIG_CPU_SUBTYPE_SH7763) 455 static int scif_txfill(struct uart_port *port) 456 { 457 if (port->mapbase == 0xffe00000 || 458 port->mapbase == 0xffe08000) 459 /* SCIF0/1*/ 460 return sci_in(port, SCTFDR) & 0xff; 461 else 462 /* SCIF2 */ 463 return sci_in(port, SCFDR) >> 8; 464 } 465 466 static int scif_txroom(struct uart_port *port) 467 { 468 if (port->mapbase == 0xffe00000 || 469 port->mapbase == 0xffe08000) 470 /* SCIF0/1*/ 471 return SCIF_TXROOM_MAX - scif_txfill(port); 472 else 473 /* SCIF2 */ 474 return SCIF2_TXROOM_MAX - scif_txfill(port); 475 } 476 477 static int scif_rxfill(struct uart_port *port) 478 { 479 if ((port->mapbase == 0xffe00000) || 480 (port->mapbase == 0xffe08000)) { 481 /* SCIF0/1*/ 482 return sci_in(port, SCRFDR) & 0xff; 483 } else { 484 /* SCIF2 */ 485 return sci_in(port, SCFDR) & SCIF2_RFDC_MASK; 486 } 487 } 488 #elif defined(CONFIG_ARCH_SH7372) 489 static int scif_txfill(struct uart_port *port) 490 { 491 if (port->type == PORT_SCIFA) 492 return sci_in(port, SCFDR) >> 8; 493 else 494 return sci_in(port, SCTFDR); 495 } 496 497 static int scif_txroom(struct uart_port *port) 498 { 499 return port->fifosize - scif_txfill(port); 500 } 501 502 static int scif_rxfill(struct uart_port *port) 503 { 504 if (port->type == PORT_SCIFA) 505 return sci_in(port, SCFDR) & SCIF_RFDC_MASK; 506 else 507 return sci_in(port, SCRFDR); 508 } 509 #else 510 static int scif_txfill(struct uart_port *port) 511 { 512 return sci_in(port, SCFDR) >> 8; 513 } 514 515 static int scif_txroom(struct uart_port *port) 516 { 517 return SCIF_TXROOM_MAX - scif_txfill(port); 518 } 519 520 static int scif_rxfill(struct uart_port *port) 521 { 522 return sci_in(port, SCFDR) & SCIF_RFDC_MASK; 523 } 524 #endif 525 526 static int sci_txfill(struct uart_port *port) 527 { 528 return !(sci_in(port, SCxSR) & SCI_TDRE); 529 } 530 531 static int sci_txroom(struct uart_port *port) 532 { 533 return !sci_txfill(port); 534 } 535 536 static int sci_rxfill(struct uart_port *port) 537 { 538 return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0; 539 } 540 541 /* 542 * SCI helper for checking the state of the muxed port/RXD pins. 543 */ 544 static inline int sci_rxd_in(struct uart_port *port) 545 { 546 struct sci_port *s = to_sci_port(port); 547 548 if (s->cfg->port_reg <= 0) 549 return 1; 550 551 return !!__raw_readb(s->cfg->port_reg); 552 } 553 554 /* ********************************************************************** * 555 * the interrupt related routines * 556 * ********************************************************************** */ 557 558 static void sci_transmit_chars(struct uart_port *port) 559 { 560 struct circ_buf *xmit = &port->state->xmit; 561 unsigned int stopped = uart_tx_stopped(port); 562 unsigned short status; 563 unsigned short ctrl; 564 int count; 565 566 status = sci_in(port, SCxSR); 567 if (!(status & SCxSR_TDxE(port))) { 568 ctrl = sci_in(port, SCSCR); 569 if (uart_circ_empty(xmit)) 570 ctrl &= ~SCSCR_TIE; 571 else 572 ctrl |= SCSCR_TIE; 573 sci_out(port, SCSCR, ctrl); 574 return; 575 } 576 577 if (port->type == PORT_SCI) 578 count = sci_txroom(port); 579 else 580 count = scif_txroom(port); 581 582 do { 583 unsigned char c; 584 585 if (port->x_char) { 586 c = port->x_char; 587 port->x_char = 0; 588 } else if (!uart_circ_empty(xmit) && !stopped) { 589 c = xmit->buf[xmit->tail]; 590 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 591 } else { 592 break; 593 } 594 595 sci_out(port, SCxTDR, c); 596 597 port->icount.tx++; 598 } while (--count > 0); 599 600 sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port)); 601 602 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 603 uart_write_wakeup(port); 604 if (uart_circ_empty(xmit)) { 605 sci_stop_tx(port); 606 } else { 607 ctrl = sci_in(port, SCSCR); 608 609 if (port->type != PORT_SCI) { 610 sci_in(port, SCxSR); /* Dummy read */ 611 sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port)); 612 } 613 614 ctrl |= SCSCR_TIE; 615 sci_out(port, SCSCR, ctrl); 616 } 617 } 618 619 /* On SH3, SCIF may read end-of-break as a space->mark char */ 620 #define STEPFN(c) ({int __c = (c); (((__c-1)|(__c)) == -1); }) 621 622 static void sci_receive_chars(struct uart_port *port) 623 { 624 struct sci_port *sci_port = to_sci_port(port); 625 struct tty_struct *tty = port->state->port.tty; 626 int i, count, copied = 0; 627 unsigned short status; 628 unsigned char flag; 629 630 status = sci_in(port, SCxSR); 631 if (!(status & SCxSR_RDxF(port))) 632 return; 633 634 while (1) { 635 if (port->type == PORT_SCI) 636 count = sci_rxfill(port); 637 else 638 count = scif_rxfill(port); 639 640 /* Don't copy more bytes than there is room for in the buffer */ 641 count = tty_buffer_request_room(tty, count); 642 643 /* If for any reason we can't copy more data, we're done! */ 644 if (count == 0) 645 break; 646 647 if (port->type == PORT_SCI) { 648 char c = sci_in(port, SCxRDR); 649 if (uart_handle_sysrq_char(port, c) || 650 sci_port->break_flag) 651 count = 0; 652 else 653 tty_insert_flip_char(tty, c, TTY_NORMAL); 654 } else { 655 for (i = 0; i < count; i++) { 656 char c = sci_in(port, SCxRDR); 657 status = sci_in(port, SCxSR); 658 #if defined(CONFIG_CPU_SH3) 659 /* Skip "chars" during break */ 660 if (sci_port->break_flag) { 661 if ((c == 0) && 662 (status & SCxSR_FER(port))) { 663 count--; i--; 664 continue; 665 } 666 667 /* Nonzero => end-of-break */ 668 dev_dbg(port->dev, "debounce<%02x>\n", c); 669 sci_port->break_flag = 0; 670 671 if (STEPFN(c)) { 672 count--; i--; 673 continue; 674 } 675 } 676 #endif /* CONFIG_CPU_SH3 */ 677 if (uart_handle_sysrq_char(port, c)) { 678 count--; i--; 679 continue; 680 } 681 682 /* Store data and status */ 683 if (status & SCxSR_FER(port)) { 684 flag = TTY_FRAME; 685 dev_notice(port->dev, "frame error\n"); 686 } else if (status & SCxSR_PER(port)) { 687 flag = TTY_PARITY; 688 dev_notice(port->dev, "parity error\n"); 689 } else 690 flag = TTY_NORMAL; 691 692 tty_insert_flip_char(tty, c, flag); 693 } 694 } 695 696 sci_in(port, SCxSR); /* dummy read */ 697 sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port)); 698 699 copied += count; 700 port->icount.rx += count; 701 } 702 703 if (copied) { 704 /* Tell the rest of the system the news. New characters! */ 705 tty_flip_buffer_push(tty); 706 } else { 707 sci_in(port, SCxSR); /* dummy read */ 708 sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port)); 709 } 710 } 711 712 #define SCI_BREAK_JIFFIES (HZ/20) 713 714 /* 715 * The sci generates interrupts during the break, 716 * 1 per millisecond or so during the break period, for 9600 baud. 717 * So dont bother disabling interrupts. 718 * But dont want more than 1 break event. 719 * Use a kernel timer to periodically poll the rx line until 720 * the break is finished. 721 */ 722 static inline void sci_schedule_break_timer(struct sci_port *port) 723 { 724 mod_timer(&port->break_timer, jiffies + SCI_BREAK_JIFFIES); 725 } 726 727 /* Ensure that two consecutive samples find the break over. */ 728 static void sci_break_timer(unsigned long data) 729 { 730 struct sci_port *port = (struct sci_port *)data; 731 732 if (port->enable) 733 port->enable(&port->port); 734 735 if (sci_rxd_in(&port->port) == 0) { 736 port->break_flag = 1; 737 sci_schedule_break_timer(port); 738 } else if (port->break_flag == 1) { 739 /* break is over. */ 740 port->break_flag = 2; 741 sci_schedule_break_timer(port); 742 } else 743 port->break_flag = 0; 744 745 if (port->disable) 746 port->disable(&port->port); 747 } 748 749 static int sci_handle_errors(struct uart_port *port) 750 { 751 int copied = 0; 752 unsigned short status = sci_in(port, SCxSR); 753 struct tty_struct *tty = port->state->port.tty; 754 struct sci_port *s = to_sci_port(port); 755 756 /* 757 * Handle overruns, if supported. 758 */ 759 if (s->cfg->overrun_bit != SCIx_NOT_SUPPORTED) { 760 if (status & (1 << s->cfg->overrun_bit)) { 761 /* overrun error */ 762 if (tty_insert_flip_char(tty, 0, TTY_OVERRUN)) 763 copied++; 764 765 dev_notice(port->dev, "overrun error"); 766 } 767 } 768 769 if (status & SCxSR_FER(port)) { 770 if (sci_rxd_in(port) == 0) { 771 /* Notify of BREAK */ 772 struct sci_port *sci_port = to_sci_port(port); 773 774 if (!sci_port->break_flag) { 775 sci_port->break_flag = 1; 776 sci_schedule_break_timer(sci_port); 777 778 /* Do sysrq handling. */ 779 if (uart_handle_break(port)) 780 return 0; 781 782 dev_dbg(port->dev, "BREAK detected\n"); 783 784 if (tty_insert_flip_char(tty, 0, TTY_BREAK)) 785 copied++; 786 } 787 788 } else { 789 /* frame error */ 790 if (tty_insert_flip_char(tty, 0, TTY_FRAME)) 791 copied++; 792 793 dev_notice(port->dev, "frame error\n"); 794 } 795 } 796 797 if (status & SCxSR_PER(port)) { 798 /* parity error */ 799 if (tty_insert_flip_char(tty, 0, TTY_PARITY)) 800 copied++; 801 802 dev_notice(port->dev, "parity error"); 803 } 804 805 if (copied) 806 tty_flip_buffer_push(tty); 807 808 return copied; 809 } 810 811 static int sci_handle_fifo_overrun(struct uart_port *port) 812 { 813 struct tty_struct *tty = port->state->port.tty; 814 struct sci_port *s = to_sci_port(port); 815 int copied = 0; 816 817 /* 818 * XXX: Technically not limited to non-SCIFs, it's simply the 819 * SCLSR check that is for the moment SCIF-specific. This 820 * probably wants to be revisited for SCIFA/B as well as for 821 * factoring in SCI overrun detection. 822 */ 823 if (port->type != PORT_SCIF) 824 return 0; 825 826 if ((sci_in(port, SCLSR) & (1 << s->cfg->overrun_bit))) { 827 sci_out(port, SCLSR, 0); 828 829 tty_insert_flip_char(tty, 0, TTY_OVERRUN); 830 tty_flip_buffer_push(tty); 831 832 dev_notice(port->dev, "overrun error\n"); 833 copied++; 834 } 835 836 return copied; 837 } 838 839 static int sci_handle_breaks(struct uart_port *port) 840 { 841 int copied = 0; 842 unsigned short status = sci_in(port, SCxSR); 843 struct tty_struct *tty = port->state->port.tty; 844 struct sci_port *s = to_sci_port(port); 845 846 if (uart_handle_break(port)) 847 return 0; 848 849 if (!s->break_flag && status & SCxSR_BRK(port)) { 850 #if defined(CONFIG_CPU_SH3) 851 /* Debounce break */ 852 s->break_flag = 1; 853 #endif 854 /* Notify of BREAK */ 855 if (tty_insert_flip_char(tty, 0, TTY_BREAK)) 856 copied++; 857 858 dev_dbg(port->dev, "BREAK detected\n"); 859 } 860 861 if (copied) 862 tty_flip_buffer_push(tty); 863 864 copied += sci_handle_fifo_overrun(port); 865 866 return copied; 867 } 868 869 static irqreturn_t sci_rx_interrupt(int irq, void *ptr) 870 { 871 #ifdef CONFIG_SERIAL_SH_SCI_DMA 872 struct uart_port *port = ptr; 873 struct sci_port *s = to_sci_port(port); 874 875 if (s->chan_rx) { 876 u16 scr = sci_in(port, SCSCR); 877 u16 ssr = sci_in(port, SCxSR); 878 879 /* Disable future Rx interrupts */ 880 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { 881 disable_irq_nosync(irq); 882 scr |= 0x4000; 883 } else { 884 scr &= ~SCSCR_RIE; 885 } 886 sci_out(port, SCSCR, scr); 887 /* Clear current interrupt */ 888 sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port))); 889 dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n", 890 jiffies, s->rx_timeout); 891 mod_timer(&s->rx_timer, jiffies + s->rx_timeout); 892 893 return IRQ_HANDLED; 894 } 895 #endif 896 897 /* I think sci_receive_chars has to be called irrespective 898 * of whether the I_IXOFF is set, otherwise, how is the interrupt 899 * to be disabled? 900 */ 901 sci_receive_chars(ptr); 902 903 return IRQ_HANDLED; 904 } 905 906 static irqreturn_t sci_tx_interrupt(int irq, void *ptr) 907 { 908 struct uart_port *port = ptr; 909 unsigned long flags; 910 911 spin_lock_irqsave(&port->lock, flags); 912 sci_transmit_chars(port); 913 spin_unlock_irqrestore(&port->lock, flags); 914 915 return IRQ_HANDLED; 916 } 917 918 static irqreturn_t sci_er_interrupt(int irq, void *ptr) 919 { 920 struct uart_port *port = ptr; 921 922 /* Handle errors */ 923 if (port->type == PORT_SCI) { 924 if (sci_handle_errors(port)) { 925 /* discard character in rx buffer */ 926 sci_in(port, SCxSR); 927 sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port)); 928 } 929 } else { 930 sci_handle_fifo_overrun(port); 931 sci_rx_interrupt(irq, ptr); 932 } 933 934 sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port)); 935 936 /* Kick the transmission */ 937 sci_tx_interrupt(irq, ptr); 938 939 return IRQ_HANDLED; 940 } 941 942 static irqreturn_t sci_br_interrupt(int irq, void *ptr) 943 { 944 struct uart_port *port = ptr; 945 946 /* Handle BREAKs */ 947 sci_handle_breaks(port); 948 sci_out(port, SCxSR, SCxSR_BREAK_CLEAR(port)); 949 950 return IRQ_HANDLED; 951 } 952 953 static inline unsigned long port_rx_irq_mask(struct uart_port *port) 954 { 955 /* 956 * Not all ports (such as SCIFA) will support REIE. Rather than 957 * special-casing the port type, we check the port initialization 958 * IRQ enable mask to see whether the IRQ is desired at all. If 959 * it's unset, it's logically inferred that there's no point in 960 * testing for it. 961 */ 962 return SCSCR_RIE | (to_sci_port(port)->cfg->scscr & SCSCR_REIE); 963 } 964 965 static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) 966 { 967 unsigned short ssr_status, scr_status, err_enabled; 968 struct uart_port *port = ptr; 969 struct sci_port *s = to_sci_port(port); 970 irqreturn_t ret = IRQ_NONE; 971 972 ssr_status = sci_in(port, SCxSR); 973 scr_status = sci_in(port, SCSCR); 974 err_enabled = scr_status & port_rx_irq_mask(port); 975 976 /* Tx Interrupt */ 977 if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCSCR_TIE) && 978 !s->chan_tx) 979 ret = sci_tx_interrupt(irq, ptr); 980 981 /* 982 * Rx Interrupt: if we're using DMA, the DMA controller clears RDF / 983 * DR flags 984 */ 985 if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) && 986 (scr_status & SCSCR_RIE)) 987 ret = sci_rx_interrupt(irq, ptr); 988 989 /* Error Interrupt */ 990 if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled) 991 ret = sci_er_interrupt(irq, ptr); 992 993 /* Break Interrupt */ 994 if ((ssr_status & SCxSR_BRK(port)) && err_enabled) 995 ret = sci_br_interrupt(irq, ptr); 996 997 return ret; 998 } 999 1000 /* 1001 * Here we define a transition notifier so that we can update all of our 1002 * ports' baud rate when the peripheral clock changes. 1003 */ 1004 static int sci_notifier(struct notifier_block *self, 1005 unsigned long phase, void *p) 1006 { 1007 struct sci_port *sci_port; 1008 unsigned long flags; 1009 1010 sci_port = container_of(self, struct sci_port, freq_transition); 1011 1012 if ((phase == CPUFREQ_POSTCHANGE) || 1013 (phase == CPUFREQ_RESUMECHANGE)) { 1014 struct uart_port *port = &sci_port->port; 1015 1016 spin_lock_irqsave(&port->lock, flags); 1017 port->uartclk = clk_get_rate(sci_port->iclk); 1018 spin_unlock_irqrestore(&port->lock, flags); 1019 } 1020 1021 return NOTIFY_OK; 1022 } 1023 1024 static void sci_clk_enable(struct uart_port *port) 1025 { 1026 struct sci_port *sci_port = to_sci_port(port); 1027 1028 pm_runtime_get_sync(port->dev); 1029 1030 clk_enable(sci_port->iclk); 1031 sci_port->port.uartclk = clk_get_rate(sci_port->iclk); 1032 clk_enable(sci_port->fclk); 1033 } 1034 1035 static void sci_clk_disable(struct uart_port *port) 1036 { 1037 struct sci_port *sci_port = to_sci_port(port); 1038 1039 clk_disable(sci_port->fclk); 1040 clk_disable(sci_port->iclk); 1041 1042 pm_runtime_put_sync(port->dev); 1043 } 1044 1045 static int sci_request_irq(struct sci_port *port) 1046 { 1047 int i; 1048 irqreturn_t (*handlers[4])(int irq, void *ptr) = { 1049 sci_er_interrupt, sci_rx_interrupt, sci_tx_interrupt, 1050 sci_br_interrupt, 1051 }; 1052 const char *desc[] = { "SCI Receive Error", "SCI Receive Data Full", 1053 "SCI Transmit Data Empty", "SCI Break" }; 1054 1055 if (port->cfg->irqs[0] == port->cfg->irqs[1]) { 1056 if (unlikely(!port->cfg->irqs[0])) 1057 return -ENODEV; 1058 1059 if (request_irq(port->cfg->irqs[0], sci_mpxed_interrupt, 1060 IRQF_DISABLED, "sci", port)) { 1061 dev_err(port->port.dev, "Can't allocate IRQ\n"); 1062 return -ENODEV; 1063 } 1064 } else { 1065 for (i = 0; i < ARRAY_SIZE(handlers); i++) { 1066 if (unlikely(!port->cfg->irqs[i])) 1067 continue; 1068 1069 if (request_irq(port->cfg->irqs[i], handlers[i], 1070 IRQF_DISABLED, desc[i], port)) { 1071 dev_err(port->port.dev, "Can't allocate IRQ\n"); 1072 return -ENODEV; 1073 } 1074 } 1075 } 1076 1077 return 0; 1078 } 1079 1080 static void sci_free_irq(struct sci_port *port) 1081 { 1082 int i; 1083 1084 if (port->cfg->irqs[0] == port->cfg->irqs[1]) 1085 free_irq(port->cfg->irqs[0], port); 1086 else { 1087 for (i = 0; i < ARRAY_SIZE(port->cfg->irqs); i++) { 1088 if (!port->cfg->irqs[i]) 1089 continue; 1090 1091 free_irq(port->cfg->irqs[i], port); 1092 } 1093 } 1094 } 1095 1096 static unsigned int sci_tx_empty(struct uart_port *port) 1097 { 1098 unsigned short status = sci_in(port, SCxSR); 1099 unsigned short in_tx_fifo = scif_txfill(port); 1100 1101 return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0; 1102 } 1103 1104 static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl) 1105 { 1106 /* This routine is used for seting signals of: DTR, DCD, CTS/RTS */ 1107 /* We use SCIF's hardware for CTS/RTS, so don't need any for that. */ 1108 /* If you have signals for DTR and DCD, please implement here. */ 1109 } 1110 1111 static unsigned int sci_get_mctrl(struct uart_port *port) 1112 { 1113 /* This routine is used for getting signals of: DTR, DCD, DSR, RI, 1114 and CTS/RTS */ 1115 1116 return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR; 1117 } 1118 1119 #ifdef CONFIG_SERIAL_SH_SCI_DMA 1120 static void sci_dma_tx_complete(void *arg) 1121 { 1122 struct sci_port *s = arg; 1123 struct uart_port *port = &s->port; 1124 struct circ_buf *xmit = &port->state->xmit; 1125 unsigned long flags; 1126 1127 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); 1128 1129 spin_lock_irqsave(&port->lock, flags); 1130 1131 xmit->tail += sg_dma_len(&s->sg_tx); 1132 xmit->tail &= UART_XMIT_SIZE - 1; 1133 1134 port->icount.tx += sg_dma_len(&s->sg_tx); 1135 1136 async_tx_ack(s->desc_tx); 1137 s->cookie_tx = -EINVAL; 1138 s->desc_tx = NULL; 1139 1140 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1141 uart_write_wakeup(port); 1142 1143 if (!uart_circ_empty(xmit)) { 1144 schedule_work(&s->work_tx); 1145 } else if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { 1146 u16 ctrl = sci_in(port, SCSCR); 1147 sci_out(port, SCSCR, ctrl & ~SCSCR_TIE); 1148 } 1149 1150 spin_unlock_irqrestore(&port->lock, flags); 1151 } 1152 1153 /* Locking: called with port lock held */ 1154 static int sci_dma_rx_push(struct sci_port *s, struct tty_struct *tty, 1155 size_t count) 1156 { 1157 struct uart_port *port = &s->port; 1158 int i, active, room; 1159 1160 room = tty_buffer_request_room(tty, count); 1161 1162 if (s->active_rx == s->cookie_rx[0]) { 1163 active = 0; 1164 } else if (s->active_rx == s->cookie_rx[1]) { 1165 active = 1; 1166 } else { 1167 dev_err(port->dev, "cookie %d not found!\n", s->active_rx); 1168 return 0; 1169 } 1170 1171 if (room < count) 1172 dev_warn(port->dev, "Rx overrun: dropping %u bytes\n", 1173 count - room); 1174 if (!room) 1175 return room; 1176 1177 for (i = 0; i < room; i++) 1178 tty_insert_flip_char(tty, ((u8 *)sg_virt(&s->sg_rx[active]))[i], 1179 TTY_NORMAL); 1180 1181 port->icount.rx += room; 1182 1183 return room; 1184 } 1185 1186 static void sci_dma_rx_complete(void *arg) 1187 { 1188 struct sci_port *s = arg; 1189 struct uart_port *port = &s->port; 1190 struct tty_struct *tty = port->state->port.tty; 1191 unsigned long flags; 1192 int count; 1193 1194 dev_dbg(port->dev, "%s(%d) active #%d\n", __func__, port->line, s->active_rx); 1195 1196 spin_lock_irqsave(&port->lock, flags); 1197 1198 count = sci_dma_rx_push(s, tty, s->buf_len_rx); 1199 1200 mod_timer(&s->rx_timer, jiffies + s->rx_timeout); 1201 1202 spin_unlock_irqrestore(&port->lock, flags); 1203 1204 if (count) 1205 tty_flip_buffer_push(tty); 1206 1207 schedule_work(&s->work_rx); 1208 } 1209 1210 static void sci_rx_dma_release(struct sci_port *s, bool enable_pio) 1211 { 1212 struct dma_chan *chan = s->chan_rx; 1213 struct uart_port *port = &s->port; 1214 1215 s->chan_rx = NULL; 1216 s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL; 1217 dma_release_channel(chan); 1218 if (sg_dma_address(&s->sg_rx[0])) 1219 dma_free_coherent(port->dev, s->buf_len_rx * 2, 1220 sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0])); 1221 if (enable_pio) 1222 sci_start_rx(port); 1223 } 1224 1225 static void sci_tx_dma_release(struct sci_port *s, bool enable_pio) 1226 { 1227 struct dma_chan *chan = s->chan_tx; 1228 struct uart_port *port = &s->port; 1229 1230 s->chan_tx = NULL; 1231 s->cookie_tx = -EINVAL; 1232 dma_release_channel(chan); 1233 if (enable_pio) 1234 sci_start_tx(port); 1235 } 1236 1237 static void sci_submit_rx(struct sci_port *s) 1238 { 1239 struct dma_chan *chan = s->chan_rx; 1240 int i; 1241 1242 for (i = 0; i < 2; i++) { 1243 struct scatterlist *sg = &s->sg_rx[i]; 1244 struct dma_async_tx_descriptor *desc; 1245 1246 desc = chan->device->device_prep_slave_sg(chan, 1247 sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT); 1248 1249 if (desc) { 1250 s->desc_rx[i] = desc; 1251 desc->callback = sci_dma_rx_complete; 1252 desc->callback_param = s; 1253 s->cookie_rx[i] = desc->tx_submit(desc); 1254 } 1255 1256 if (!desc || s->cookie_rx[i] < 0) { 1257 if (i) { 1258 async_tx_ack(s->desc_rx[0]); 1259 s->cookie_rx[0] = -EINVAL; 1260 } 1261 if (desc) { 1262 async_tx_ack(desc); 1263 s->cookie_rx[i] = -EINVAL; 1264 } 1265 dev_warn(s->port.dev, 1266 "failed to re-start DMA, using PIO\n"); 1267 sci_rx_dma_release(s, true); 1268 return; 1269 } 1270 dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__, 1271 s->cookie_rx[i], i); 1272 } 1273 1274 s->active_rx = s->cookie_rx[0]; 1275 1276 dma_async_issue_pending(chan); 1277 } 1278 1279 static void work_fn_rx(struct work_struct *work) 1280 { 1281 struct sci_port *s = container_of(work, struct sci_port, work_rx); 1282 struct uart_port *port = &s->port; 1283 struct dma_async_tx_descriptor *desc; 1284 int new; 1285 1286 if (s->active_rx == s->cookie_rx[0]) { 1287 new = 0; 1288 } else if (s->active_rx == s->cookie_rx[1]) { 1289 new = 1; 1290 } else { 1291 dev_err(port->dev, "cookie %d not found!\n", s->active_rx); 1292 return; 1293 } 1294 desc = s->desc_rx[new]; 1295 1296 if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) != 1297 DMA_SUCCESS) { 1298 /* Handle incomplete DMA receive */ 1299 struct tty_struct *tty = port->state->port.tty; 1300 struct dma_chan *chan = s->chan_rx; 1301 struct sh_desc *sh_desc = container_of(desc, struct sh_desc, 1302 async_tx); 1303 unsigned long flags; 1304 int count; 1305 1306 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); 1307 dev_dbg(port->dev, "Read %u bytes with cookie %d\n", 1308 sh_desc->partial, sh_desc->cookie); 1309 1310 spin_lock_irqsave(&port->lock, flags); 1311 count = sci_dma_rx_push(s, tty, sh_desc->partial); 1312 spin_unlock_irqrestore(&port->lock, flags); 1313 1314 if (count) 1315 tty_flip_buffer_push(tty); 1316 1317 sci_submit_rx(s); 1318 1319 return; 1320 } 1321 1322 s->cookie_rx[new] = desc->tx_submit(desc); 1323 if (s->cookie_rx[new] < 0) { 1324 dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n"); 1325 sci_rx_dma_release(s, true); 1326 return; 1327 } 1328 1329 s->active_rx = s->cookie_rx[!new]; 1330 1331 dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n", __func__, 1332 s->cookie_rx[new], new, s->active_rx); 1333 } 1334 1335 static void work_fn_tx(struct work_struct *work) 1336 { 1337 struct sci_port *s = container_of(work, struct sci_port, work_tx); 1338 struct dma_async_tx_descriptor *desc; 1339 struct dma_chan *chan = s->chan_tx; 1340 struct uart_port *port = &s->port; 1341 struct circ_buf *xmit = &port->state->xmit; 1342 struct scatterlist *sg = &s->sg_tx; 1343 1344 /* 1345 * DMA is idle now. 1346 * Port xmit buffer is already mapped, and it is one page... Just adjust 1347 * offsets and lengths. Since it is a circular buffer, we have to 1348 * transmit till the end, and then the rest. Take the port lock to get a 1349 * consistent xmit buffer state. 1350 */ 1351 spin_lock_irq(&port->lock); 1352 sg->offset = xmit->tail & (UART_XMIT_SIZE - 1); 1353 sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) + 1354 sg->offset; 1355 sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE), 1356 CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE)); 1357 spin_unlock_irq(&port->lock); 1358 1359 BUG_ON(!sg_dma_len(sg)); 1360 1361 desc = chan->device->device_prep_slave_sg(chan, 1362 sg, s->sg_len_tx, DMA_TO_DEVICE, 1363 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1364 if (!desc) { 1365 /* switch to PIO */ 1366 sci_tx_dma_release(s, true); 1367 return; 1368 } 1369 1370 dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE); 1371 1372 spin_lock_irq(&port->lock); 1373 s->desc_tx = desc; 1374 desc->callback = sci_dma_tx_complete; 1375 desc->callback_param = s; 1376 spin_unlock_irq(&port->lock); 1377 s->cookie_tx = desc->tx_submit(desc); 1378 if (s->cookie_tx < 0) { 1379 dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n"); 1380 /* switch to PIO */ 1381 sci_tx_dma_release(s, true); 1382 return; 1383 } 1384 1385 dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", __func__, 1386 xmit->buf, xmit->tail, xmit->head, s->cookie_tx); 1387 1388 dma_async_issue_pending(chan); 1389 } 1390 #endif 1391 1392 static void sci_start_tx(struct uart_port *port) 1393 { 1394 struct sci_port *s = to_sci_port(port); 1395 unsigned short ctrl; 1396 1397 #ifdef CONFIG_SERIAL_SH_SCI_DMA 1398 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { 1399 u16 new, scr = sci_in(port, SCSCR); 1400 if (s->chan_tx) 1401 new = scr | 0x8000; 1402 else 1403 new = scr & ~0x8000; 1404 if (new != scr) 1405 sci_out(port, SCSCR, new); 1406 } 1407 1408 if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) && 1409 s->cookie_tx < 0) 1410 schedule_work(&s->work_tx); 1411 #endif 1412 1413 if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) { 1414 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */ 1415 ctrl = sci_in(port, SCSCR); 1416 sci_out(port, SCSCR, ctrl | SCSCR_TIE); 1417 } 1418 } 1419 1420 static void sci_stop_tx(struct uart_port *port) 1421 { 1422 unsigned short ctrl; 1423 1424 /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */ 1425 ctrl = sci_in(port, SCSCR); 1426 1427 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) 1428 ctrl &= ~0x8000; 1429 1430 ctrl &= ~SCSCR_TIE; 1431 1432 sci_out(port, SCSCR, ctrl); 1433 } 1434 1435 static void sci_start_rx(struct uart_port *port) 1436 { 1437 unsigned short ctrl; 1438 1439 ctrl = sci_in(port, SCSCR) | port_rx_irq_mask(port); 1440 1441 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) 1442 ctrl &= ~0x4000; 1443 1444 sci_out(port, SCSCR, ctrl); 1445 } 1446 1447 static void sci_stop_rx(struct uart_port *port) 1448 { 1449 unsigned short ctrl; 1450 1451 ctrl = sci_in(port, SCSCR); 1452 1453 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) 1454 ctrl &= ~0x4000; 1455 1456 ctrl &= ~port_rx_irq_mask(port); 1457 1458 sci_out(port, SCSCR, ctrl); 1459 } 1460 1461 static void sci_enable_ms(struct uart_port *port) 1462 { 1463 /* Nothing here yet .. */ 1464 } 1465 1466 static void sci_break_ctl(struct uart_port *port, int break_state) 1467 { 1468 /* Nothing here yet .. */ 1469 } 1470 1471 #ifdef CONFIG_SERIAL_SH_SCI_DMA 1472 static bool filter(struct dma_chan *chan, void *slave) 1473 { 1474 struct sh_dmae_slave *param = slave; 1475 1476 dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__, 1477 param->slave_id); 1478 1479 if (param->dma_dev == chan->device->dev) { 1480 chan->private = param; 1481 return true; 1482 } else { 1483 return false; 1484 } 1485 } 1486 1487 static void rx_timer_fn(unsigned long arg) 1488 { 1489 struct sci_port *s = (struct sci_port *)arg; 1490 struct uart_port *port = &s->port; 1491 u16 scr = sci_in(port, SCSCR); 1492 1493 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { 1494 scr &= ~0x4000; 1495 enable_irq(s->cfg->irqs[1]); 1496 } 1497 sci_out(port, SCSCR, scr | SCSCR_RIE); 1498 dev_dbg(port->dev, "DMA Rx timed out\n"); 1499 schedule_work(&s->work_rx); 1500 } 1501 1502 static void sci_request_dma(struct uart_port *port) 1503 { 1504 struct sci_port *s = to_sci_port(port); 1505 struct sh_dmae_slave *param; 1506 struct dma_chan *chan; 1507 dma_cap_mask_t mask; 1508 int nent; 1509 1510 dev_dbg(port->dev, "%s: port %d DMA %p\n", __func__, 1511 port->line, s->cfg->dma_dev); 1512 1513 if (!s->cfg->dma_dev) 1514 return; 1515 1516 dma_cap_zero(mask); 1517 dma_cap_set(DMA_SLAVE, mask); 1518 1519 param = &s->param_tx; 1520 1521 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */ 1522 param->slave_id = s->cfg->dma_slave_tx; 1523 param->dma_dev = s->cfg->dma_dev; 1524 1525 s->cookie_tx = -EINVAL; 1526 chan = dma_request_channel(mask, filter, param); 1527 dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan); 1528 if (chan) { 1529 s->chan_tx = chan; 1530 sg_init_table(&s->sg_tx, 1); 1531 /* UART circular tx buffer is an aligned page. */ 1532 BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK); 1533 sg_set_page(&s->sg_tx, virt_to_page(port->state->xmit.buf), 1534 UART_XMIT_SIZE, (int)port->state->xmit.buf & ~PAGE_MASK); 1535 nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE); 1536 if (!nent) 1537 sci_tx_dma_release(s, false); 1538 else 1539 dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__, 1540 sg_dma_len(&s->sg_tx), 1541 port->state->xmit.buf, sg_dma_address(&s->sg_tx)); 1542 1543 s->sg_len_tx = nent; 1544 1545 INIT_WORK(&s->work_tx, work_fn_tx); 1546 } 1547 1548 param = &s->param_rx; 1549 1550 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */ 1551 param->slave_id = s->cfg->dma_slave_rx; 1552 param->dma_dev = s->cfg->dma_dev; 1553 1554 chan = dma_request_channel(mask, filter, param); 1555 dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan); 1556 if (chan) { 1557 dma_addr_t dma[2]; 1558 void *buf[2]; 1559 int i; 1560 1561 s->chan_rx = chan; 1562 1563 s->buf_len_rx = 2 * max(16, (int)port->fifosize); 1564 buf[0] = dma_alloc_coherent(port->dev, s->buf_len_rx * 2, 1565 &dma[0], GFP_KERNEL); 1566 1567 if (!buf[0]) { 1568 dev_warn(port->dev, 1569 "failed to allocate dma buffer, using PIO\n"); 1570 sci_rx_dma_release(s, true); 1571 return; 1572 } 1573 1574 buf[1] = buf[0] + s->buf_len_rx; 1575 dma[1] = dma[0] + s->buf_len_rx; 1576 1577 for (i = 0; i < 2; i++) { 1578 struct scatterlist *sg = &s->sg_rx[i]; 1579 1580 sg_init_table(sg, 1); 1581 sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx, 1582 (int)buf[i] & ~PAGE_MASK); 1583 sg_dma_address(sg) = dma[i]; 1584 } 1585 1586 INIT_WORK(&s->work_rx, work_fn_rx); 1587 setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s); 1588 1589 sci_submit_rx(s); 1590 } 1591 } 1592 1593 static void sci_free_dma(struct uart_port *port) 1594 { 1595 struct sci_port *s = to_sci_port(port); 1596 1597 if (!s->cfg->dma_dev) 1598 return; 1599 1600 if (s->chan_tx) 1601 sci_tx_dma_release(s, false); 1602 if (s->chan_rx) 1603 sci_rx_dma_release(s, false); 1604 } 1605 #else 1606 static inline void sci_request_dma(struct uart_port *port) 1607 { 1608 } 1609 1610 static inline void sci_free_dma(struct uart_port *port) 1611 { 1612 } 1613 #endif 1614 1615 static int sci_startup(struct uart_port *port) 1616 { 1617 struct sci_port *s = to_sci_port(port); 1618 int ret; 1619 1620 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); 1621 1622 if (s->enable) 1623 s->enable(port); 1624 1625 ret = sci_request_irq(s); 1626 if (unlikely(ret < 0)) 1627 return ret; 1628 1629 sci_request_dma(port); 1630 1631 sci_start_tx(port); 1632 sci_start_rx(port); 1633 1634 return 0; 1635 } 1636 1637 static void sci_shutdown(struct uart_port *port) 1638 { 1639 struct sci_port *s = to_sci_port(port); 1640 1641 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); 1642 1643 sci_stop_rx(port); 1644 sci_stop_tx(port); 1645 1646 sci_free_dma(port); 1647 sci_free_irq(s); 1648 1649 if (s->disable) 1650 s->disable(port); 1651 } 1652 1653 static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps, 1654 unsigned long freq) 1655 { 1656 switch (algo_id) { 1657 case SCBRR_ALGO_1: 1658 return ((freq + 16 * bps) / (16 * bps) - 1); 1659 case SCBRR_ALGO_2: 1660 return ((freq + 16 * bps) / (32 * bps) - 1); 1661 case SCBRR_ALGO_3: 1662 return (((freq * 2) + 16 * bps) / (16 * bps) - 1); 1663 case SCBRR_ALGO_4: 1664 return (((freq * 2) + 16 * bps) / (32 * bps) - 1); 1665 case SCBRR_ALGO_5: 1666 return (((freq * 1000 / 32) / bps) - 1); 1667 } 1668 1669 /* Warn, but use a safe default */ 1670 WARN_ON(1); 1671 1672 return ((freq + 16 * bps) / (32 * bps) - 1); 1673 } 1674 1675 static void sci_set_termios(struct uart_port *port, struct ktermios *termios, 1676 struct ktermios *old) 1677 { 1678 struct sci_port *s = to_sci_port(port); 1679 unsigned int status, baud, smr_val, max_baud; 1680 int t = -1; 1681 u16 scfcr = 0; 1682 1683 /* 1684 * earlyprintk comes here early on with port->uartclk set to zero. 1685 * the clock framework is not up and running at this point so here 1686 * we assume that 115200 is the maximum baud rate. please note that 1687 * the baud rate is not programmed during earlyprintk - it is assumed 1688 * that the previous boot loader has enabled required clocks and 1689 * setup the baud rate generator hardware for us already. 1690 */ 1691 max_baud = port->uartclk ? port->uartclk / 16 : 115200; 1692 1693 baud = uart_get_baud_rate(port, termios, old, 0, max_baud); 1694 if (likely(baud && port->uartclk)) 1695 t = sci_scbrr_calc(s->cfg->scbrr_algo_id, baud, port->uartclk); 1696 1697 if (s->enable) 1698 s->enable(port); 1699 1700 do { 1701 status = sci_in(port, SCxSR); 1702 } while (!(status & SCxSR_TEND(port))); 1703 1704 sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */ 1705 1706 if (port->type != PORT_SCI) 1707 sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST); 1708 1709 smr_val = sci_in(port, SCSMR) & 3; 1710 1711 if ((termios->c_cflag & CSIZE) == CS7) 1712 smr_val |= 0x40; 1713 if (termios->c_cflag & PARENB) 1714 smr_val |= 0x20; 1715 if (termios->c_cflag & PARODD) 1716 smr_val |= 0x30; 1717 if (termios->c_cflag & CSTOPB) 1718 smr_val |= 0x08; 1719 1720 uart_update_timeout(port, termios->c_cflag, baud); 1721 1722 sci_out(port, SCSMR, smr_val); 1723 1724 dev_dbg(port->dev, "%s: SMR %x, t %x, SCSCR %x\n", __func__, smr_val, t, 1725 s->cfg->scscr); 1726 1727 if (t > 0) { 1728 if (t >= 256) { 1729 sci_out(port, SCSMR, (sci_in(port, SCSMR) & ~3) | 1); 1730 t >>= 2; 1731 } else 1732 sci_out(port, SCSMR, sci_in(port, SCSMR) & ~3); 1733 1734 sci_out(port, SCBRR, t); 1735 udelay((1000000+(baud-1)) / baud); /* Wait one bit interval */ 1736 } 1737 1738 sci_init_pins(port, termios->c_cflag); 1739 sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0)); 1740 1741 sci_out(port, SCSCR, s->cfg->scscr); 1742 1743 #ifdef CONFIG_SERIAL_SH_SCI_DMA 1744 /* 1745 * Calculate delay for 1.5 DMA buffers: see 1746 * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits 1747 * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function 1748 * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)." 1749 * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO 1750 * sizes), but it has been found out experimentally, that this is not 1751 * enough: the driver too often needlessly runs on a DMA timeout. 20ms 1752 * as a minimum seem to work perfectly. 1753 */ 1754 if (s->chan_rx) { 1755 s->rx_timeout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 / 1756 port->fifosize / 2; 1757 dev_dbg(port->dev, 1758 "DMA Rx t-out %ums, tty t-out %u jiffies\n", 1759 s->rx_timeout * 1000 / HZ, port->timeout); 1760 if (s->rx_timeout < msecs_to_jiffies(20)) 1761 s->rx_timeout = msecs_to_jiffies(20); 1762 } 1763 #endif 1764 1765 if ((termios->c_cflag & CREAD) != 0) 1766 sci_start_rx(port); 1767 1768 if (s->disable) 1769 s->disable(port); 1770 } 1771 1772 static const char *sci_type(struct uart_port *port) 1773 { 1774 switch (port->type) { 1775 case PORT_IRDA: 1776 return "irda"; 1777 case PORT_SCI: 1778 return "sci"; 1779 case PORT_SCIF: 1780 return "scif"; 1781 case PORT_SCIFA: 1782 return "scifa"; 1783 case PORT_SCIFB: 1784 return "scifb"; 1785 } 1786 1787 return NULL; 1788 } 1789 1790 static inline unsigned long sci_port_size(struct uart_port *port) 1791 { 1792 /* 1793 * Pick an arbitrary size that encapsulates all of the base 1794 * registers by default. This can be optimized later, or derived 1795 * from platform resource data at such a time that ports begin to 1796 * behave more erratically. 1797 */ 1798 return 64; 1799 } 1800 1801 static int sci_remap_port(struct uart_port *port) 1802 { 1803 unsigned long size = sci_port_size(port); 1804 1805 /* 1806 * Nothing to do if there's already an established membase. 1807 */ 1808 if (port->membase) 1809 return 0; 1810 1811 if (port->flags & UPF_IOREMAP) { 1812 port->membase = ioremap_nocache(port->mapbase, size); 1813 if (unlikely(!port->membase)) { 1814 dev_err(port->dev, "can't remap port#%d\n", port->line); 1815 return -ENXIO; 1816 } 1817 } else { 1818 /* 1819 * For the simple (and majority of) cases where we don't 1820 * need to do any remapping, just cast the cookie 1821 * directly. 1822 */ 1823 port->membase = (void __iomem *)port->mapbase; 1824 } 1825 1826 return 0; 1827 } 1828 1829 static void sci_release_port(struct uart_port *port) 1830 { 1831 if (port->flags & UPF_IOREMAP) { 1832 iounmap(port->membase); 1833 port->membase = NULL; 1834 } 1835 1836 release_mem_region(port->mapbase, sci_port_size(port)); 1837 } 1838 1839 static int sci_request_port(struct uart_port *port) 1840 { 1841 unsigned long size = sci_port_size(port); 1842 struct resource *res; 1843 int ret; 1844 1845 res = request_mem_region(port->mapbase, size, dev_name(port->dev)); 1846 if (unlikely(res == NULL)) 1847 return -EBUSY; 1848 1849 ret = sci_remap_port(port); 1850 if (unlikely(ret != 0)) { 1851 release_resource(res); 1852 return ret; 1853 } 1854 1855 return 0; 1856 } 1857 1858 static void sci_config_port(struct uart_port *port, int flags) 1859 { 1860 if (flags & UART_CONFIG_TYPE) { 1861 struct sci_port *sport = to_sci_port(port); 1862 1863 port->type = sport->cfg->type; 1864 sci_request_port(port); 1865 } 1866 } 1867 1868 static int sci_verify_port(struct uart_port *port, struct serial_struct *ser) 1869 { 1870 struct sci_port *s = to_sci_port(port); 1871 1872 if (ser->irq != s->cfg->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs) 1873 return -EINVAL; 1874 if (ser->baud_base < 2400) 1875 /* No paper tape reader for Mitch.. */ 1876 return -EINVAL; 1877 1878 return 0; 1879 } 1880 1881 static struct uart_ops sci_uart_ops = { 1882 .tx_empty = sci_tx_empty, 1883 .set_mctrl = sci_set_mctrl, 1884 .get_mctrl = sci_get_mctrl, 1885 .start_tx = sci_start_tx, 1886 .stop_tx = sci_stop_tx, 1887 .stop_rx = sci_stop_rx, 1888 .enable_ms = sci_enable_ms, 1889 .break_ctl = sci_break_ctl, 1890 .startup = sci_startup, 1891 .shutdown = sci_shutdown, 1892 .set_termios = sci_set_termios, 1893 .type = sci_type, 1894 .release_port = sci_release_port, 1895 .request_port = sci_request_port, 1896 .config_port = sci_config_port, 1897 .verify_port = sci_verify_port, 1898 #ifdef CONFIG_CONSOLE_POLL 1899 .poll_get_char = sci_poll_get_char, 1900 .poll_put_char = sci_poll_put_char, 1901 #endif 1902 }; 1903 1904 static int __devinit sci_init_single(struct platform_device *dev, 1905 struct sci_port *sci_port, 1906 unsigned int index, 1907 struct plat_sci_port *p) 1908 { 1909 struct uart_port *port = &sci_port->port; 1910 1911 port->ops = &sci_uart_ops; 1912 port->iotype = UPIO_MEM; 1913 port->line = index; 1914 1915 switch (p->type) { 1916 case PORT_SCIFB: 1917 port->fifosize = 256; 1918 break; 1919 case PORT_SCIFA: 1920 port->fifosize = 64; 1921 break; 1922 case PORT_SCIF: 1923 port->fifosize = 16; 1924 break; 1925 default: 1926 port->fifosize = 1; 1927 break; 1928 } 1929 1930 if (p->regtype == SCIx_PROBE_REGTYPE) 1931 BUG_ON(sci_probe_regmap(p) != 0); 1932 1933 if (dev) { 1934 sci_port->iclk = clk_get(&dev->dev, "sci_ick"); 1935 if (IS_ERR(sci_port->iclk)) { 1936 sci_port->iclk = clk_get(&dev->dev, "peripheral_clk"); 1937 if (IS_ERR(sci_port->iclk)) { 1938 dev_err(&dev->dev, "can't get iclk\n"); 1939 return PTR_ERR(sci_port->iclk); 1940 } 1941 } 1942 1943 /* 1944 * The function clock is optional, ignore it if we can't 1945 * find it. 1946 */ 1947 sci_port->fclk = clk_get(&dev->dev, "sci_fck"); 1948 if (IS_ERR(sci_port->fclk)) 1949 sci_port->fclk = NULL; 1950 1951 sci_port->enable = sci_clk_enable; 1952 sci_port->disable = sci_clk_disable; 1953 port->dev = &dev->dev; 1954 1955 pm_runtime_enable(&dev->dev); 1956 } 1957 1958 sci_port->break_timer.data = (unsigned long)sci_port; 1959 sci_port->break_timer.function = sci_break_timer; 1960 init_timer(&sci_port->break_timer); 1961 1962 /* 1963 * Establish some sensible defaults for the error detection. 1964 */ 1965 if (!p->error_mask) 1966 p->error_mask = (p->type == PORT_SCI) ? 1967 SCI_DEFAULT_ERROR_MASK : SCIF_DEFAULT_ERROR_MASK; 1968 1969 /* 1970 * Establish sensible defaults for the overrun detection, unless 1971 * the part has explicitly disabled support for it. 1972 */ 1973 if (p->overrun_bit != SCIx_NOT_SUPPORTED) { 1974 if (p->type == PORT_SCI) 1975 p->overrun_bit = 5; 1976 else if (p->scbrr_algo_id == SCBRR_ALGO_4) 1977 p->overrun_bit = 9; 1978 else 1979 p->overrun_bit = 0; 1980 1981 /* 1982 * Make the error mask inclusive of overrun detection, if 1983 * supported. 1984 */ 1985 p->error_mask |= (1 << p->overrun_bit); 1986 } 1987 1988 sci_port->cfg = p; 1989 1990 port->mapbase = p->mapbase; 1991 port->type = p->type; 1992 port->flags = p->flags; 1993 port->regshift = p->regshift; 1994 1995 /* 1996 * The UART port needs an IRQ value, so we peg this to the RX IRQ 1997 * for the multi-IRQ ports, which is where we are primarily 1998 * concerned with the shutdown path synchronization. 1999 * 2000 * For the muxed case there's nothing more to do. 2001 */ 2002 port->irq = p->irqs[SCIx_RXI_IRQ]; 2003 2004 port->serial_in = sci_serial_in; 2005 port->serial_out = sci_serial_out; 2006 2007 if (p->dma_dev) 2008 dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n", 2009 p->dma_dev, p->dma_slave_tx, p->dma_slave_rx); 2010 2011 return 0; 2012 } 2013 2014 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE 2015 static void serial_console_putchar(struct uart_port *port, int ch) 2016 { 2017 sci_poll_put_char(port, ch); 2018 } 2019 2020 /* 2021 * Print a string to the serial port trying not to disturb 2022 * any possible real use of the port... 2023 */ 2024 static void serial_console_write(struct console *co, const char *s, 2025 unsigned count) 2026 { 2027 struct sci_port *sci_port = &sci_ports[co->index]; 2028 struct uart_port *port = &sci_port->port; 2029 unsigned short bits; 2030 2031 if (sci_port->enable) 2032 sci_port->enable(port); 2033 2034 uart_console_write(port, s, count, serial_console_putchar); 2035 2036 /* wait until fifo is empty and last bit has been transmitted */ 2037 bits = SCxSR_TDxE(port) | SCxSR_TEND(port); 2038 while ((sci_in(port, SCxSR) & bits) != bits) 2039 cpu_relax(); 2040 2041 if (sci_port->disable) 2042 sci_port->disable(port); 2043 } 2044 2045 static int __devinit serial_console_setup(struct console *co, char *options) 2046 { 2047 struct sci_port *sci_port; 2048 struct uart_port *port; 2049 int baud = 115200; 2050 int bits = 8; 2051 int parity = 'n'; 2052 int flow = 'n'; 2053 int ret; 2054 2055 /* 2056 * Refuse to handle any bogus ports. 2057 */ 2058 if (co->index < 0 || co->index >= SCI_NPORTS) 2059 return -ENODEV; 2060 2061 sci_port = &sci_ports[co->index]; 2062 port = &sci_port->port; 2063 2064 /* 2065 * Refuse to handle uninitialized ports. 2066 */ 2067 if (!port->ops) 2068 return -ENODEV; 2069 2070 ret = sci_remap_port(port); 2071 if (unlikely(ret != 0)) 2072 return ret; 2073 2074 if (sci_port->enable) 2075 sci_port->enable(port); 2076 2077 if (options) 2078 uart_parse_options(options, &baud, &parity, &bits, &flow); 2079 2080 /* TODO: disable clock */ 2081 return uart_set_options(port, co, baud, parity, bits, flow); 2082 } 2083 2084 static struct console serial_console = { 2085 .name = "ttySC", 2086 .device = uart_console_device, 2087 .write = serial_console_write, 2088 .setup = serial_console_setup, 2089 .flags = CON_PRINTBUFFER, 2090 .index = -1, 2091 .data = &sci_uart_driver, 2092 }; 2093 2094 static struct console early_serial_console = { 2095 .name = "early_ttySC", 2096 .write = serial_console_write, 2097 .flags = CON_PRINTBUFFER, 2098 .index = -1, 2099 }; 2100 2101 static char early_serial_buf[32]; 2102 2103 static int __devinit sci_probe_earlyprintk(struct platform_device *pdev) 2104 { 2105 struct plat_sci_port *cfg = pdev->dev.platform_data; 2106 2107 if (early_serial_console.data) 2108 return -EEXIST; 2109 2110 early_serial_console.index = pdev->id; 2111 2112 sci_init_single(NULL, &sci_ports[pdev->id], pdev->id, cfg); 2113 2114 serial_console_setup(&early_serial_console, early_serial_buf); 2115 2116 if (!strstr(early_serial_buf, "keep")) 2117 early_serial_console.flags |= CON_BOOT; 2118 2119 register_console(&early_serial_console); 2120 return 0; 2121 } 2122 2123 #define SCI_CONSOLE (&serial_console) 2124 2125 #else 2126 static inline int __devinit sci_probe_earlyprintk(struct platform_device *pdev) 2127 { 2128 return -EINVAL; 2129 } 2130 2131 #define SCI_CONSOLE NULL 2132 2133 #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */ 2134 2135 static char banner[] __initdata = 2136 KERN_INFO "SuperH SCI(F) driver initialized\n"; 2137 2138 static struct uart_driver sci_uart_driver = { 2139 .owner = THIS_MODULE, 2140 .driver_name = "sci", 2141 .dev_name = "ttySC", 2142 .major = SCI_MAJOR, 2143 .minor = SCI_MINOR_START, 2144 .nr = SCI_NPORTS, 2145 .cons = SCI_CONSOLE, 2146 }; 2147 2148 static int sci_remove(struct platform_device *dev) 2149 { 2150 struct sci_port *port = platform_get_drvdata(dev); 2151 2152 cpufreq_unregister_notifier(&port->freq_transition, 2153 CPUFREQ_TRANSITION_NOTIFIER); 2154 2155 uart_remove_one_port(&sci_uart_driver, &port->port); 2156 2157 clk_put(port->iclk); 2158 clk_put(port->fclk); 2159 2160 pm_runtime_disable(&dev->dev); 2161 return 0; 2162 } 2163 2164 static int __devinit sci_probe_single(struct platform_device *dev, 2165 unsigned int index, 2166 struct plat_sci_port *p, 2167 struct sci_port *sciport) 2168 { 2169 int ret; 2170 2171 /* Sanity check */ 2172 if (unlikely(index >= SCI_NPORTS)) { 2173 dev_notice(&dev->dev, "Attempting to register port " 2174 "%d when only %d are available.\n", 2175 index+1, SCI_NPORTS); 2176 dev_notice(&dev->dev, "Consider bumping " 2177 "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n"); 2178 return 0; 2179 } 2180 2181 ret = sci_init_single(dev, sciport, index, p); 2182 if (ret) 2183 return ret; 2184 2185 return uart_add_one_port(&sci_uart_driver, &sciport->port); 2186 } 2187 2188 static int __devinit sci_probe(struct platform_device *dev) 2189 { 2190 struct plat_sci_port *p = dev->dev.platform_data; 2191 struct sci_port *sp = &sci_ports[dev->id]; 2192 int ret; 2193 2194 /* 2195 * If we've come here via earlyprintk initialization, head off to 2196 * the special early probe. We don't have sufficient device state 2197 * to make it beyond this yet. 2198 */ 2199 if (is_early_platform_device(dev)) 2200 return sci_probe_earlyprintk(dev); 2201 2202 platform_set_drvdata(dev, sp); 2203 2204 ret = sci_probe_single(dev, dev->id, p, sp); 2205 if (ret) 2206 goto err_unreg; 2207 2208 sp->freq_transition.notifier_call = sci_notifier; 2209 2210 ret = cpufreq_register_notifier(&sp->freq_transition, 2211 CPUFREQ_TRANSITION_NOTIFIER); 2212 if (unlikely(ret < 0)) 2213 goto err_unreg; 2214 2215 #ifdef CONFIG_SH_STANDARD_BIOS 2216 sh_bios_gdb_detach(); 2217 #endif 2218 2219 return 0; 2220 2221 err_unreg: 2222 sci_remove(dev); 2223 return ret; 2224 } 2225 2226 static int sci_suspend(struct device *dev) 2227 { 2228 struct sci_port *sport = dev_get_drvdata(dev); 2229 2230 if (sport) 2231 uart_suspend_port(&sci_uart_driver, &sport->port); 2232 2233 return 0; 2234 } 2235 2236 static int sci_resume(struct device *dev) 2237 { 2238 struct sci_port *sport = dev_get_drvdata(dev); 2239 2240 if (sport) 2241 uart_resume_port(&sci_uart_driver, &sport->port); 2242 2243 return 0; 2244 } 2245 2246 static const struct dev_pm_ops sci_dev_pm_ops = { 2247 .suspend = sci_suspend, 2248 .resume = sci_resume, 2249 }; 2250 2251 static struct platform_driver sci_driver = { 2252 .probe = sci_probe, 2253 .remove = sci_remove, 2254 .driver = { 2255 .name = "sh-sci", 2256 .owner = THIS_MODULE, 2257 .pm = &sci_dev_pm_ops, 2258 }, 2259 }; 2260 2261 static int __init sci_init(void) 2262 { 2263 int ret; 2264 2265 printk(banner); 2266 2267 ret = uart_register_driver(&sci_uart_driver); 2268 if (likely(ret == 0)) { 2269 ret = platform_driver_register(&sci_driver); 2270 if (unlikely(ret)) 2271 uart_unregister_driver(&sci_uart_driver); 2272 } 2273 2274 return ret; 2275 } 2276 2277 static void __exit sci_exit(void) 2278 { 2279 platform_driver_unregister(&sci_driver); 2280 uart_unregister_driver(&sci_uart_driver); 2281 } 2282 2283 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE 2284 early_platform_init_buffer("earlyprintk", &sci_driver, 2285 early_serial_buf, ARRAY_SIZE(early_serial_buf)); 2286 #endif 2287 module_init(sci_init); 2288 module_exit(sci_exit); 2289 2290 MODULE_LICENSE("GPL"); 2291 MODULE_ALIAS("platform:sh-sci"); 2292