1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * xhci-dbgtty.c - tty glue for xHCI debug capability 4 * 5 * Copyright (C) 2017 Intel Corporation 6 * 7 * Author: Lu Baolu <baolu.lu@linux.intel.com> 8 */ 9 10 #include <linux/slab.h> 11 #include <linux/tty.h> 12 #include <linux/tty_flip.h> 13 #include <linux/idr.h> 14 15 #include "xhci.h" 16 #include "xhci-dbgcap.h" 17 18 static struct tty_driver *dbc_tty_driver; 19 static struct idr dbc_tty_minors; 20 static DEFINE_MUTEX(dbc_tty_minors_lock); 21 22 static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc) 23 { 24 return dbc->priv; 25 } 26 27 static int dbc_start_tx(struct dbc_port *port) 28 __releases(&port->port_lock) 29 __acquires(&port->port_lock) 30 { 31 int len; 32 struct dbc_request *req; 33 int status = 0; 34 bool do_tty_wake = false; 35 struct list_head *pool = &port->write_pool; 36 37 while (!list_empty(pool)) { 38 req = list_entry(pool->next, struct dbc_request, list_pool); 39 len = kfifo_out(&port->port.xmit_fifo, req->buf, DBC_MAX_PACKET); 40 if (len == 0) 41 break; 42 do_tty_wake = true; 43 44 req->length = len; 45 list_del(&req->list_pool); 46 47 spin_unlock(&port->port_lock); 48 status = dbc_ep_queue(req); 49 spin_lock(&port->port_lock); 50 51 if (status) { 52 list_add(&req->list_pool, pool); 53 break; 54 } 55 } 56 57 if (do_tty_wake && port->port.tty) 58 tty_wakeup(port->port.tty); 59 60 return status; 61 } 62 63 static void dbc_start_rx(struct dbc_port *port) 64 __releases(&port->port_lock) 65 __acquires(&port->port_lock) 66 { 67 struct dbc_request *req; 68 int status; 69 struct list_head *pool = &port->read_pool; 70 71 while (!list_empty(pool)) { 72 if (!port->port.tty) 73 break; 74 75 req = list_entry(pool->next, struct dbc_request, list_pool); 76 list_del(&req->list_pool); 77 req->length = DBC_MAX_PACKET; 78 79 spin_unlock(&port->port_lock); 80 status = dbc_ep_queue(req); 81 spin_lock(&port->port_lock); 82 83 if (status) { 84 list_add(&req->list_pool, pool); 85 break; 86 } 87 } 88 } 89 90 static void 91 dbc_read_complete(struct xhci_dbc *dbc, struct dbc_request *req) 92 { 93 unsigned long flags; 94 struct dbc_port *port = dbc_to_port(dbc); 95 96 spin_lock_irqsave(&port->port_lock, flags); 97 list_add_tail(&req->list_pool, &port->read_queue); 98 tasklet_schedule(&port->push); 99 spin_unlock_irqrestore(&port->port_lock, flags); 100 } 101 102 static void dbc_write_complete(struct xhci_dbc *dbc, struct dbc_request *req) 103 { 104 unsigned long flags; 105 struct dbc_port *port = dbc_to_port(dbc); 106 107 spin_lock_irqsave(&port->port_lock, flags); 108 list_add(&req->list_pool, &port->write_pool); 109 switch (req->status) { 110 case 0: 111 dbc_start_tx(port); 112 break; 113 case -ESHUTDOWN: 114 break; 115 default: 116 dev_warn(dbc->dev, "unexpected write complete status %d\n", 117 req->status); 118 break; 119 } 120 spin_unlock_irqrestore(&port->port_lock, flags); 121 } 122 123 static void xhci_dbc_free_req(struct dbc_request *req) 124 { 125 kfree(req->buf); 126 dbc_free_request(req); 127 } 128 129 static int 130 xhci_dbc_alloc_requests(struct xhci_dbc *dbc, unsigned int direction, 131 struct list_head *head, 132 void (*fn)(struct xhci_dbc *, struct dbc_request *)) 133 { 134 int i; 135 struct dbc_request *req; 136 137 for (i = 0; i < DBC_QUEUE_SIZE; i++) { 138 req = dbc_alloc_request(dbc, direction, GFP_KERNEL); 139 if (!req) 140 break; 141 142 req->length = DBC_MAX_PACKET; 143 req->buf = kmalloc(req->length, GFP_KERNEL); 144 if (!req->buf) { 145 dbc_free_request(req); 146 break; 147 } 148 149 req->complete = fn; 150 list_add_tail(&req->list_pool, head); 151 } 152 153 return list_empty(head) ? -ENOMEM : 0; 154 } 155 156 static void 157 xhci_dbc_free_requests(struct list_head *head) 158 { 159 struct dbc_request *req; 160 161 while (!list_empty(head)) { 162 req = list_entry(head->next, struct dbc_request, list_pool); 163 list_del(&req->list_pool); 164 xhci_dbc_free_req(req); 165 } 166 } 167 168 static int dbc_tty_install(struct tty_driver *driver, struct tty_struct *tty) 169 { 170 struct dbc_port *port; 171 172 mutex_lock(&dbc_tty_minors_lock); 173 port = idr_find(&dbc_tty_minors, tty->index); 174 mutex_unlock(&dbc_tty_minors_lock); 175 176 if (!port) 177 return -ENXIO; 178 179 tty->driver_data = port; 180 181 return tty_port_install(&port->port, driver, tty); 182 } 183 184 static int dbc_tty_open(struct tty_struct *tty, struct file *file) 185 { 186 struct dbc_port *port = tty->driver_data; 187 188 return tty_port_open(&port->port, tty, file); 189 } 190 191 static void dbc_tty_close(struct tty_struct *tty, struct file *file) 192 { 193 struct dbc_port *port = tty->driver_data; 194 195 tty_port_close(&port->port, tty, file); 196 } 197 198 static ssize_t dbc_tty_write(struct tty_struct *tty, const u8 *buf, 199 size_t count) 200 { 201 struct dbc_port *port = tty->driver_data; 202 unsigned long flags; 203 204 spin_lock_irqsave(&port->port_lock, flags); 205 if (count) 206 count = kfifo_in(&port->port.xmit_fifo, buf, count); 207 dbc_start_tx(port); 208 spin_unlock_irqrestore(&port->port_lock, flags); 209 210 return count; 211 } 212 213 static int dbc_tty_put_char(struct tty_struct *tty, u8 ch) 214 { 215 struct dbc_port *port = tty->driver_data; 216 unsigned long flags; 217 int status; 218 219 spin_lock_irqsave(&port->port_lock, flags); 220 status = kfifo_put(&port->port.xmit_fifo, ch); 221 spin_unlock_irqrestore(&port->port_lock, flags); 222 223 return status; 224 } 225 226 static void dbc_tty_flush_chars(struct tty_struct *tty) 227 { 228 struct dbc_port *port = tty->driver_data; 229 unsigned long flags; 230 231 spin_lock_irqsave(&port->port_lock, flags); 232 dbc_start_tx(port); 233 spin_unlock_irqrestore(&port->port_lock, flags); 234 } 235 236 static unsigned int dbc_tty_write_room(struct tty_struct *tty) 237 { 238 struct dbc_port *port = tty->driver_data; 239 unsigned long flags; 240 unsigned int room; 241 242 spin_lock_irqsave(&port->port_lock, flags); 243 room = kfifo_avail(&port->port.xmit_fifo); 244 spin_unlock_irqrestore(&port->port_lock, flags); 245 246 return room; 247 } 248 249 static unsigned int dbc_tty_chars_in_buffer(struct tty_struct *tty) 250 { 251 struct dbc_port *port = tty->driver_data; 252 unsigned long flags; 253 unsigned int chars; 254 255 spin_lock_irqsave(&port->port_lock, flags); 256 chars = kfifo_len(&port->port.xmit_fifo); 257 spin_unlock_irqrestore(&port->port_lock, flags); 258 259 return chars; 260 } 261 262 static void dbc_tty_unthrottle(struct tty_struct *tty) 263 { 264 struct dbc_port *port = tty->driver_data; 265 unsigned long flags; 266 267 spin_lock_irqsave(&port->port_lock, flags); 268 tasklet_schedule(&port->push); 269 spin_unlock_irqrestore(&port->port_lock, flags); 270 } 271 272 static const struct tty_operations dbc_tty_ops = { 273 .install = dbc_tty_install, 274 .open = dbc_tty_open, 275 .close = dbc_tty_close, 276 .write = dbc_tty_write, 277 .put_char = dbc_tty_put_char, 278 .flush_chars = dbc_tty_flush_chars, 279 .write_room = dbc_tty_write_room, 280 .chars_in_buffer = dbc_tty_chars_in_buffer, 281 .unthrottle = dbc_tty_unthrottle, 282 }; 283 284 static void dbc_rx_push(struct tasklet_struct *t) 285 { 286 struct dbc_request *req; 287 struct tty_struct *tty; 288 unsigned long flags; 289 bool do_push = false; 290 bool disconnect = false; 291 struct dbc_port *port = from_tasklet(port, t, push); 292 struct list_head *queue = &port->read_queue; 293 294 spin_lock_irqsave(&port->port_lock, flags); 295 tty = port->port.tty; 296 while (!list_empty(queue)) { 297 req = list_first_entry(queue, struct dbc_request, list_pool); 298 299 if (tty && tty_throttled(tty)) 300 break; 301 302 switch (req->status) { 303 case 0: 304 break; 305 case -ESHUTDOWN: 306 disconnect = true; 307 break; 308 default: 309 pr_warn("ttyDBC0: unexpected RX status %d\n", 310 req->status); 311 break; 312 } 313 314 if (req->actual) { 315 char *packet = req->buf; 316 unsigned int n, size = req->actual; 317 int count; 318 319 n = port->n_read; 320 if (n) { 321 packet += n; 322 size -= n; 323 } 324 325 count = tty_insert_flip_string(&port->port, packet, 326 size); 327 if (count) 328 do_push = true; 329 if (count != size) { 330 port->n_read += count; 331 break; 332 } 333 port->n_read = 0; 334 } 335 336 list_move_tail(&req->list_pool, &port->read_pool); 337 } 338 339 if (do_push) 340 tty_flip_buffer_push(&port->port); 341 342 if (!list_empty(queue) && tty) { 343 if (!tty_throttled(tty)) { 344 if (do_push) 345 tasklet_schedule(&port->push); 346 else 347 pr_warn("ttyDBC0: RX not scheduled?\n"); 348 } 349 } 350 351 if (!disconnect) 352 dbc_start_rx(port); 353 354 spin_unlock_irqrestore(&port->port_lock, flags); 355 } 356 357 static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty) 358 { 359 unsigned long flags; 360 struct dbc_port *port = container_of(_port, struct dbc_port, port); 361 362 spin_lock_irqsave(&port->port_lock, flags); 363 dbc_start_rx(port); 364 spin_unlock_irqrestore(&port->port_lock, flags); 365 366 return 0; 367 } 368 369 static const struct tty_port_operations dbc_port_ops = { 370 .activate = dbc_port_activate, 371 }; 372 373 static void 374 xhci_dbc_tty_init_port(struct xhci_dbc *dbc, struct dbc_port *port) 375 { 376 tty_port_init(&port->port); 377 spin_lock_init(&port->port_lock); 378 tasklet_setup(&port->push, dbc_rx_push); 379 INIT_LIST_HEAD(&port->read_pool); 380 INIT_LIST_HEAD(&port->read_queue); 381 INIT_LIST_HEAD(&port->write_pool); 382 383 port->port.ops = &dbc_port_ops; 384 port->n_read = 0; 385 } 386 387 static void 388 xhci_dbc_tty_exit_port(struct dbc_port *port) 389 { 390 tasklet_kill(&port->push); 391 tty_port_destroy(&port->port); 392 } 393 394 static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc) 395 { 396 int ret; 397 struct device *tty_dev; 398 struct dbc_port *port = dbc_to_port(dbc); 399 400 if (port->registered) 401 return -EBUSY; 402 403 xhci_dbc_tty_init_port(dbc, port); 404 405 mutex_lock(&dbc_tty_minors_lock); 406 port->minor = idr_alloc(&dbc_tty_minors, port, 0, 64, GFP_KERNEL); 407 mutex_unlock(&dbc_tty_minors_lock); 408 409 if (port->minor < 0) { 410 ret = port->minor; 411 goto err_idr; 412 } 413 414 ret = kfifo_alloc(&port->port.xmit_fifo, DBC_WRITE_BUF_SIZE, 415 GFP_KERNEL); 416 if (ret) 417 goto err_exit_port; 418 419 ret = xhci_dbc_alloc_requests(dbc, BULK_IN, &port->read_pool, 420 dbc_read_complete); 421 if (ret) 422 goto err_free_fifo; 423 424 ret = xhci_dbc_alloc_requests(dbc, BULK_OUT, &port->write_pool, 425 dbc_write_complete); 426 if (ret) 427 goto err_free_requests; 428 429 tty_dev = tty_port_register_device(&port->port, 430 dbc_tty_driver, port->minor, NULL); 431 if (IS_ERR(tty_dev)) { 432 ret = PTR_ERR(tty_dev); 433 goto err_free_requests; 434 } 435 436 port->registered = true; 437 438 return 0; 439 440 err_free_requests: 441 xhci_dbc_free_requests(&port->read_pool); 442 xhci_dbc_free_requests(&port->write_pool); 443 err_free_fifo: 444 kfifo_free(&port->port.xmit_fifo); 445 err_exit_port: 446 idr_remove(&dbc_tty_minors, port->minor); 447 err_idr: 448 xhci_dbc_tty_exit_port(port); 449 450 dev_err(dbc->dev, "can't register tty port, err %d\n", ret); 451 452 return ret; 453 } 454 455 static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc) 456 { 457 struct dbc_port *port = dbc_to_port(dbc); 458 459 if (!port->registered) 460 return; 461 tty_unregister_device(dbc_tty_driver, port->minor); 462 xhci_dbc_tty_exit_port(port); 463 port->registered = false; 464 465 mutex_lock(&dbc_tty_minors_lock); 466 idr_remove(&dbc_tty_minors, port->minor); 467 mutex_unlock(&dbc_tty_minors_lock); 468 469 kfifo_free(&port->port.xmit_fifo); 470 xhci_dbc_free_requests(&port->read_pool); 471 xhci_dbc_free_requests(&port->read_queue); 472 xhci_dbc_free_requests(&port->write_pool); 473 } 474 475 static const struct dbc_driver dbc_driver = { 476 .configure = xhci_dbc_tty_register_device, 477 .disconnect = xhci_dbc_tty_unregister_device, 478 }; 479 480 int xhci_dbc_tty_probe(struct device *dev, void __iomem *base, struct xhci_hcd *xhci) 481 { 482 struct xhci_dbc *dbc; 483 struct dbc_port *port; 484 int status; 485 486 if (!dbc_tty_driver) 487 return -ENODEV; 488 489 port = kzalloc(sizeof(*port), GFP_KERNEL); 490 if (!port) 491 return -ENOMEM; 492 493 dbc = xhci_alloc_dbc(dev, base, &dbc_driver); 494 495 if (!dbc) { 496 status = -ENOMEM; 497 goto out2; 498 } 499 500 dbc->priv = port; 501 502 /* get rid of xhci once this is a real driver binding to a device */ 503 xhci->dbc = dbc; 504 505 return 0; 506 out2: 507 kfree(port); 508 509 return status; 510 } 511 512 /* 513 * undo what probe did, assume dbc is stopped already. 514 * we also assume tty_unregister_device() is called before this 515 */ 516 void xhci_dbc_tty_remove(struct xhci_dbc *dbc) 517 { 518 struct dbc_port *port = dbc_to_port(dbc); 519 520 xhci_dbc_remove(dbc); 521 kfree(port); 522 } 523 524 int dbc_tty_init(void) 525 { 526 int ret; 527 528 idr_init(&dbc_tty_minors); 529 530 dbc_tty_driver = tty_alloc_driver(64, TTY_DRIVER_REAL_RAW | 531 TTY_DRIVER_DYNAMIC_DEV); 532 if (IS_ERR(dbc_tty_driver)) { 533 idr_destroy(&dbc_tty_minors); 534 return PTR_ERR(dbc_tty_driver); 535 } 536 537 dbc_tty_driver->driver_name = "dbc_serial"; 538 dbc_tty_driver->name = "ttyDBC"; 539 540 dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; 541 dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL; 542 dbc_tty_driver->init_termios = tty_std_termios; 543 dbc_tty_driver->init_termios.c_cflag = 544 B9600 | CS8 | CREAD | HUPCL | CLOCAL; 545 dbc_tty_driver->init_termios.c_ispeed = 9600; 546 dbc_tty_driver->init_termios.c_ospeed = 9600; 547 548 tty_set_operations(dbc_tty_driver, &dbc_tty_ops); 549 550 ret = tty_register_driver(dbc_tty_driver); 551 if (ret) { 552 pr_err("Can't register dbc tty driver\n"); 553 tty_driver_kref_put(dbc_tty_driver); 554 idr_destroy(&dbc_tty_minors); 555 } 556 557 return ret; 558 } 559 560 void dbc_tty_exit(void) 561 { 562 if (dbc_tty_driver) { 563 tty_unregister_driver(dbc_tty_driver); 564 tty_driver_kref_put(dbc_tty_driver); 565 dbc_tty_driver = NULL; 566 } 567 568 idr_destroy(&dbc_tty_minors); 569 } 570