1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * xhci-dbgtty.c - tty glue for xHCI debug capability 4 * 5 * Copyright (C) 2017 Intel Corporation 6 * 7 * Author: Lu Baolu <baolu.lu@linux.intel.com> 8 */ 9 10 #include <linux/slab.h> 11 #include <linux/tty.h> 12 #include <linux/tty_flip.h> 13 #include <linux/idr.h> 14 15 #include "xhci.h" 16 #include "xhci-dbgcap.h" 17 18 static struct tty_driver *dbc_tty_driver; 19 static struct idr dbc_tty_minors; 20 static DEFINE_MUTEX(dbc_tty_minors_lock); 21 22 static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc) 23 { 24 return dbc->priv; 25 } 26 27 static unsigned int 28 dbc_kfifo_to_req(struct dbc_port *port, char *packet) 29 { 30 unsigned int len; 31 32 len = kfifo_len(&port->port.xmit_fifo); 33 34 if (len == 0) 35 return 0; 36 37 len = min(len, DBC_MAX_PACKET); 38 39 if (port->tx_boundary) 40 len = min(port->tx_boundary, len); 41 42 len = kfifo_out(&port->port.xmit_fifo, packet, len); 43 44 if (port->tx_boundary) 45 port->tx_boundary -= len; 46 47 return len; 48 } 49 50 static int dbc_start_tx(struct dbc_port *port) 51 __releases(&port->port_lock) 52 __acquires(&port->port_lock) 53 { 54 int len; 55 struct dbc_request *req; 56 int status = 0; 57 bool do_tty_wake = false; 58 struct list_head *pool = &port->write_pool; 59 60 while (!list_empty(pool)) { 61 req = list_entry(pool->next, struct dbc_request, list_pool); 62 len = dbc_kfifo_to_req(port, req->buf); 63 if (len == 0) 64 break; 65 do_tty_wake = true; 66 67 req->length = len; 68 list_del(&req->list_pool); 69 70 spin_unlock(&port->port_lock); 71 status = dbc_ep_queue(req); 72 spin_lock(&port->port_lock); 73 74 if (status) { 75 list_add(&req->list_pool, pool); 76 break; 77 } 78 } 79 80 if (do_tty_wake && port->port.tty) 81 tty_wakeup(port->port.tty); 82 83 return status; 84 } 85 86 static void dbc_start_rx(struct dbc_port *port) 87 __releases(&port->port_lock) 88 __acquires(&port->port_lock) 89 { 90 struct dbc_request *req; 91 int status; 92 struct list_head *pool = &port->read_pool; 93 94 while (!list_empty(pool)) { 95 if (!port->port.tty) 96 break; 97 98 req = list_entry(pool->next, struct dbc_request, list_pool); 99 list_del(&req->list_pool); 100 req->length = DBC_MAX_PACKET; 101 102 spin_unlock(&port->port_lock); 103 status = dbc_ep_queue(req); 104 spin_lock(&port->port_lock); 105 106 if (status) { 107 list_add(&req->list_pool, pool); 108 break; 109 } 110 } 111 } 112 113 /* 114 * Queue received data to tty buffer and push it. 115 * 116 * Returns nr of remaining bytes that didn't fit tty buffer, i.e. 0 if all 117 * bytes sucessfullt moved. In case of error returns negative errno. 118 * Call with lock held 119 */ 120 static int dbc_rx_push_buffer(struct dbc_port *port, struct dbc_request *req) 121 { 122 char *packet = req->buf; 123 unsigned int n, size = req->actual; 124 int count; 125 126 if (!req->actual) 127 return 0; 128 129 /* if n_read is set then request was partially moved to tty buffer */ 130 n = port->n_read; 131 if (n) { 132 packet += n; 133 size -= n; 134 } 135 136 count = tty_insert_flip_string(&port->port, packet, size); 137 if (count) 138 tty_flip_buffer_push(&port->port); 139 if (count != size) { 140 port->n_read += count; 141 return size - count; 142 } 143 144 port->n_read = 0; 145 return 0; 146 } 147 148 static void 149 dbc_read_complete(struct xhci_dbc *dbc, struct dbc_request *req) 150 { 151 unsigned long flags; 152 struct dbc_port *port = dbc_to_port(dbc); 153 struct tty_struct *tty; 154 int untransferred; 155 156 tty = port->port.tty; 157 158 spin_lock_irqsave(&port->port_lock, flags); 159 160 /* 161 * Only defer copyig data to tty buffer in case: 162 * - !list_empty(&port->read_queue), there are older pending data 163 * - tty is throttled 164 * - failed to copy all data to buffer, defer remaining part 165 */ 166 167 if (list_empty(&port->read_queue) && tty && !tty_throttled(tty)) { 168 untransferred = dbc_rx_push_buffer(port, req); 169 if (untransferred == 0) { 170 list_add_tail(&req->list_pool, &port->read_pool); 171 if (req->status != -ESHUTDOWN) 172 dbc_start_rx(port); 173 goto out; 174 } 175 } 176 177 /* defer moving data from req to tty buffer to a tasklet */ 178 list_add_tail(&req->list_pool, &port->read_queue); 179 tasklet_schedule(&port->push); 180 out: 181 spin_unlock_irqrestore(&port->port_lock, flags); 182 } 183 184 static void dbc_write_complete(struct xhci_dbc *dbc, struct dbc_request *req) 185 { 186 unsigned long flags; 187 struct dbc_port *port = dbc_to_port(dbc); 188 189 spin_lock_irqsave(&port->port_lock, flags); 190 list_add(&req->list_pool, &port->write_pool); 191 switch (req->status) { 192 case 0: 193 dbc_start_tx(port); 194 break; 195 case -ESHUTDOWN: 196 break; 197 default: 198 dev_warn(dbc->dev, "unexpected write complete status %d\n", 199 req->status); 200 break; 201 } 202 spin_unlock_irqrestore(&port->port_lock, flags); 203 } 204 205 static void xhci_dbc_free_req(struct dbc_request *req) 206 { 207 kfree(req->buf); 208 dbc_free_request(req); 209 } 210 211 static int 212 xhci_dbc_alloc_requests(struct xhci_dbc *dbc, unsigned int direction, 213 struct list_head *head, 214 void (*fn)(struct xhci_dbc *, struct dbc_request *)) 215 { 216 int i; 217 struct dbc_request *req; 218 219 for (i = 0; i < DBC_QUEUE_SIZE; i++) { 220 req = dbc_alloc_request(dbc, direction, GFP_KERNEL); 221 if (!req) 222 break; 223 224 req->length = DBC_MAX_PACKET; 225 req->buf = kmalloc(req->length, GFP_KERNEL); 226 if (!req->buf) { 227 dbc_free_request(req); 228 break; 229 } 230 231 req->complete = fn; 232 list_add_tail(&req->list_pool, head); 233 } 234 235 return list_empty(head) ? -ENOMEM : 0; 236 } 237 238 static void 239 xhci_dbc_free_requests(struct list_head *head) 240 { 241 struct dbc_request *req; 242 243 while (!list_empty(head)) { 244 req = list_entry(head->next, struct dbc_request, list_pool); 245 list_del(&req->list_pool); 246 xhci_dbc_free_req(req); 247 } 248 } 249 250 static int dbc_tty_install(struct tty_driver *driver, struct tty_struct *tty) 251 { 252 struct dbc_port *port; 253 254 mutex_lock(&dbc_tty_minors_lock); 255 port = idr_find(&dbc_tty_minors, tty->index); 256 mutex_unlock(&dbc_tty_minors_lock); 257 258 if (!port) 259 return -ENXIO; 260 261 tty->driver_data = port; 262 263 return tty_port_install(&port->port, driver, tty); 264 } 265 266 static int dbc_tty_open(struct tty_struct *tty, struct file *file) 267 { 268 struct dbc_port *port = tty->driver_data; 269 270 return tty_port_open(&port->port, tty, file); 271 } 272 273 static void dbc_tty_close(struct tty_struct *tty, struct file *file) 274 { 275 struct dbc_port *port = tty->driver_data; 276 277 tty_port_close(&port->port, tty, file); 278 } 279 280 static ssize_t dbc_tty_write(struct tty_struct *tty, const u8 *buf, 281 size_t count) 282 { 283 struct dbc_port *port = tty->driver_data; 284 unsigned long flags; 285 unsigned int written = 0; 286 287 spin_lock_irqsave(&port->port_lock, flags); 288 289 /* 290 * Treat tty write as one usb transfer. Make sure the writes are turned 291 * into TRB request having the same size boundaries as the tty writes. 292 * Don't add data to kfifo before previous write is turned into TRBs 293 */ 294 if (port->tx_boundary) { 295 spin_unlock_irqrestore(&port->port_lock, flags); 296 return 0; 297 } 298 299 if (count) { 300 written = kfifo_in(&port->port.xmit_fifo, buf, count); 301 302 if (written == count) 303 port->tx_boundary = kfifo_len(&port->port.xmit_fifo); 304 305 dbc_start_tx(port); 306 } 307 308 spin_unlock_irqrestore(&port->port_lock, flags); 309 310 return written; 311 } 312 313 static int dbc_tty_put_char(struct tty_struct *tty, u8 ch) 314 { 315 struct dbc_port *port = tty->driver_data; 316 unsigned long flags; 317 int status; 318 319 spin_lock_irqsave(&port->port_lock, flags); 320 status = kfifo_put(&port->port.xmit_fifo, ch); 321 spin_unlock_irqrestore(&port->port_lock, flags); 322 323 return status; 324 } 325 326 static void dbc_tty_flush_chars(struct tty_struct *tty) 327 { 328 struct dbc_port *port = tty->driver_data; 329 unsigned long flags; 330 331 spin_lock_irqsave(&port->port_lock, flags); 332 dbc_start_tx(port); 333 spin_unlock_irqrestore(&port->port_lock, flags); 334 } 335 336 static unsigned int dbc_tty_write_room(struct tty_struct *tty) 337 { 338 struct dbc_port *port = tty->driver_data; 339 unsigned long flags; 340 unsigned int room; 341 342 spin_lock_irqsave(&port->port_lock, flags); 343 room = kfifo_avail(&port->port.xmit_fifo); 344 345 if (port->tx_boundary) 346 room = 0; 347 348 spin_unlock_irqrestore(&port->port_lock, flags); 349 350 return room; 351 } 352 353 static unsigned int dbc_tty_chars_in_buffer(struct tty_struct *tty) 354 { 355 struct dbc_port *port = tty->driver_data; 356 unsigned long flags; 357 unsigned int chars; 358 359 spin_lock_irqsave(&port->port_lock, flags); 360 chars = kfifo_len(&port->port.xmit_fifo); 361 spin_unlock_irqrestore(&port->port_lock, flags); 362 363 return chars; 364 } 365 366 static void dbc_tty_unthrottle(struct tty_struct *tty) 367 { 368 struct dbc_port *port = tty->driver_data; 369 unsigned long flags; 370 371 spin_lock_irqsave(&port->port_lock, flags); 372 tasklet_schedule(&port->push); 373 spin_unlock_irqrestore(&port->port_lock, flags); 374 } 375 376 static const struct tty_operations dbc_tty_ops = { 377 .install = dbc_tty_install, 378 .open = dbc_tty_open, 379 .close = dbc_tty_close, 380 .write = dbc_tty_write, 381 .put_char = dbc_tty_put_char, 382 .flush_chars = dbc_tty_flush_chars, 383 .write_room = dbc_tty_write_room, 384 .chars_in_buffer = dbc_tty_chars_in_buffer, 385 .unthrottle = dbc_tty_unthrottle, 386 }; 387 388 static void dbc_rx_push(struct tasklet_struct *t) 389 { 390 struct dbc_request *req; 391 struct tty_struct *tty; 392 unsigned long flags; 393 bool disconnect = false; 394 struct dbc_port *port = from_tasklet(port, t, push); 395 struct list_head *queue = &port->read_queue; 396 int untransferred; 397 398 spin_lock_irqsave(&port->port_lock, flags); 399 tty = port->port.tty; 400 while (!list_empty(queue)) { 401 req = list_first_entry(queue, struct dbc_request, list_pool); 402 403 if (tty && tty_throttled(tty)) 404 break; 405 406 switch (req->status) { 407 case 0: 408 break; 409 case -ESHUTDOWN: 410 disconnect = true; 411 break; 412 default: 413 pr_warn("ttyDBC0: unexpected RX status %d\n", 414 req->status); 415 break; 416 } 417 418 untransferred = dbc_rx_push_buffer(port, req); 419 if (untransferred > 0) 420 break; 421 422 list_move_tail(&req->list_pool, &port->read_pool); 423 } 424 425 if (!list_empty(queue)) 426 tasklet_schedule(&port->push); 427 428 if (!disconnect) 429 dbc_start_rx(port); 430 431 spin_unlock_irqrestore(&port->port_lock, flags); 432 } 433 434 static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty) 435 { 436 unsigned long flags; 437 struct dbc_port *port = container_of(_port, struct dbc_port, port); 438 439 spin_lock_irqsave(&port->port_lock, flags); 440 dbc_start_rx(port); 441 spin_unlock_irqrestore(&port->port_lock, flags); 442 443 return 0; 444 } 445 446 static const struct tty_port_operations dbc_port_ops = { 447 .activate = dbc_port_activate, 448 }; 449 450 static void 451 xhci_dbc_tty_init_port(struct xhci_dbc *dbc, struct dbc_port *port) 452 { 453 tty_port_init(&port->port); 454 spin_lock_init(&port->port_lock); 455 tasklet_setup(&port->push, dbc_rx_push); 456 INIT_LIST_HEAD(&port->read_pool); 457 INIT_LIST_HEAD(&port->read_queue); 458 INIT_LIST_HEAD(&port->write_pool); 459 460 port->port.ops = &dbc_port_ops; 461 port->n_read = 0; 462 } 463 464 static void 465 xhci_dbc_tty_exit_port(struct dbc_port *port) 466 { 467 tasklet_kill(&port->push); 468 tty_port_destroy(&port->port); 469 } 470 471 static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc) 472 { 473 int ret; 474 struct device *tty_dev; 475 struct dbc_port *port = dbc_to_port(dbc); 476 477 if (port->registered) 478 return -EBUSY; 479 480 xhci_dbc_tty_init_port(dbc, port); 481 482 mutex_lock(&dbc_tty_minors_lock); 483 port->minor = idr_alloc(&dbc_tty_minors, port, 0, 64, GFP_KERNEL); 484 mutex_unlock(&dbc_tty_minors_lock); 485 486 if (port->minor < 0) { 487 ret = port->minor; 488 goto err_idr; 489 } 490 491 ret = kfifo_alloc(&port->port.xmit_fifo, DBC_WRITE_BUF_SIZE, 492 GFP_KERNEL); 493 if (ret) 494 goto err_exit_port; 495 496 ret = xhci_dbc_alloc_requests(dbc, BULK_IN, &port->read_pool, 497 dbc_read_complete); 498 if (ret) 499 goto err_free_fifo; 500 501 ret = xhci_dbc_alloc_requests(dbc, BULK_OUT, &port->write_pool, 502 dbc_write_complete); 503 if (ret) 504 goto err_free_requests; 505 506 tty_dev = tty_port_register_device(&port->port, 507 dbc_tty_driver, port->minor, NULL); 508 if (IS_ERR(tty_dev)) { 509 ret = PTR_ERR(tty_dev); 510 goto err_free_requests; 511 } 512 513 port->registered = true; 514 515 return 0; 516 517 err_free_requests: 518 xhci_dbc_free_requests(&port->read_pool); 519 xhci_dbc_free_requests(&port->write_pool); 520 err_free_fifo: 521 kfifo_free(&port->port.xmit_fifo); 522 err_exit_port: 523 idr_remove(&dbc_tty_minors, port->minor); 524 err_idr: 525 xhci_dbc_tty_exit_port(port); 526 527 dev_err(dbc->dev, "can't register tty port, err %d\n", ret); 528 529 return ret; 530 } 531 532 static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc) 533 { 534 struct dbc_port *port = dbc_to_port(dbc); 535 536 if (!port->registered) 537 return; 538 tty_unregister_device(dbc_tty_driver, port->minor); 539 xhci_dbc_tty_exit_port(port); 540 port->registered = false; 541 542 mutex_lock(&dbc_tty_minors_lock); 543 idr_remove(&dbc_tty_minors, port->minor); 544 mutex_unlock(&dbc_tty_minors_lock); 545 546 kfifo_free(&port->port.xmit_fifo); 547 xhci_dbc_free_requests(&port->read_pool); 548 xhci_dbc_free_requests(&port->read_queue); 549 xhci_dbc_free_requests(&port->write_pool); 550 } 551 552 static const struct dbc_driver dbc_driver = { 553 .configure = xhci_dbc_tty_register_device, 554 .disconnect = xhci_dbc_tty_unregister_device, 555 }; 556 557 int xhci_dbc_tty_probe(struct device *dev, void __iomem *base, struct xhci_hcd *xhci) 558 { 559 struct xhci_dbc *dbc; 560 struct dbc_port *port; 561 int status; 562 563 if (!dbc_tty_driver) 564 return -ENODEV; 565 566 port = kzalloc(sizeof(*port), GFP_KERNEL); 567 if (!port) 568 return -ENOMEM; 569 570 dbc = xhci_alloc_dbc(dev, base, &dbc_driver); 571 572 if (!dbc) { 573 status = -ENOMEM; 574 goto out2; 575 } 576 577 dbc->priv = port; 578 579 /* get rid of xhci once this is a real driver binding to a device */ 580 xhci->dbc = dbc; 581 582 return 0; 583 out2: 584 kfree(port); 585 586 return status; 587 } 588 589 /* 590 * undo what probe did, assume dbc is stopped already. 591 * we also assume tty_unregister_device() is called before this 592 */ 593 void xhci_dbc_tty_remove(struct xhci_dbc *dbc) 594 { 595 struct dbc_port *port = dbc_to_port(dbc); 596 597 xhci_dbc_remove(dbc); 598 kfree(port); 599 } 600 601 int dbc_tty_init(void) 602 { 603 int ret; 604 605 idr_init(&dbc_tty_minors); 606 607 dbc_tty_driver = tty_alloc_driver(64, TTY_DRIVER_REAL_RAW | 608 TTY_DRIVER_DYNAMIC_DEV); 609 if (IS_ERR(dbc_tty_driver)) { 610 idr_destroy(&dbc_tty_minors); 611 return PTR_ERR(dbc_tty_driver); 612 } 613 614 dbc_tty_driver->driver_name = "dbc_serial"; 615 dbc_tty_driver->name = "ttyDBC"; 616 617 dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; 618 dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL; 619 dbc_tty_driver->init_termios = tty_std_termios; 620 dbc_tty_driver->init_termios.c_cflag = 621 B9600 | CS8 | CREAD | HUPCL | CLOCAL; 622 dbc_tty_driver->init_termios.c_ispeed = 9600; 623 dbc_tty_driver->init_termios.c_ospeed = 9600; 624 625 tty_set_operations(dbc_tty_driver, &dbc_tty_ops); 626 627 ret = tty_register_driver(dbc_tty_driver); 628 if (ret) { 629 pr_err("Can't register dbc tty driver\n"); 630 tty_driver_kref_put(dbc_tty_driver); 631 idr_destroy(&dbc_tty_minors); 632 } 633 634 return ret; 635 } 636 637 void dbc_tty_exit(void) 638 { 639 if (dbc_tty_driver) { 640 tty_unregister_driver(dbc_tty_driver); 641 tty_driver_kref_put(dbc_tty_driver); 642 dbc_tty_driver = NULL; 643 } 644 645 idr_destroy(&dbc_tty_minors); 646 } 647