xref: /linux/drivers/usb/host/xhci-dbgtty.c (revision 7fc2cd2e4b398c57c9cf961cfea05eadbf34c05c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * xhci-dbgtty.c - tty glue for xHCI debug capability
4  *
5  * Copyright (C) 2017 Intel Corporation
6  *
7  * Author: Lu Baolu <baolu.lu@linux.intel.com>
8  */
9 
10 #include <linux/slab.h>
11 #include <linux/tty.h>
12 #include <linux/tty_flip.h>
13 #include <linux/idr.h>
14 
15 #include "xhci.h"
16 #include "xhci-dbgcap.h"
17 
18 static struct tty_driver *dbc_tty_driver;
19 static struct idr dbc_tty_minors;
20 static DEFINE_MUTEX(dbc_tty_minors_lock);
21 
22 static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc)
23 {
24 	return dbc->priv;
25 }
26 
27 static unsigned int
28 dbc_kfifo_to_req(struct dbc_port *port, char *packet)
29 {
30 	unsigned int	len;
31 
32 	len = kfifo_len(&port->port.xmit_fifo);
33 
34 	if (len == 0)
35 		return 0;
36 
37 	len = min(len, DBC_MAX_PACKET);
38 
39 	if (port->tx_boundary)
40 		len = min(port->tx_boundary, len);
41 
42 	len = kfifo_out(&port->port.xmit_fifo, packet, len);
43 
44 	if (port->tx_boundary)
45 		port->tx_boundary -= len;
46 
47 	return len;
48 }
49 
50 static int dbc_do_start_tx(struct dbc_port *port)
51 	__releases(&port->port_lock)
52 	__acquires(&port->port_lock)
53 {
54 	int			len;
55 	struct dbc_request	*req;
56 	int			status = 0;
57 	bool			do_tty_wake = false;
58 	struct list_head	*pool = &port->write_pool;
59 
60 	port->tx_running = true;
61 
62 	while (!list_empty(pool)) {
63 		req = list_entry(pool->next, struct dbc_request, list_pool);
64 		len = dbc_kfifo_to_req(port, req->buf);
65 		if (len == 0)
66 			break;
67 		do_tty_wake = true;
68 
69 		req->length = len;
70 		list_del(&req->list_pool);
71 
72 		spin_unlock(&port->port_lock);
73 		status = dbc_ep_queue(req);
74 		spin_lock(&port->port_lock);
75 
76 		if (status) {
77 			list_add(&req->list_pool, pool);
78 			break;
79 		}
80 	}
81 
82 	port->tx_running = false;
83 
84 	if (do_tty_wake && port->port.tty)
85 		tty_wakeup(port->port.tty);
86 
87 	return status;
88 }
89 
90 /* must be called with port->port_lock held */
91 static int dbc_start_tx(struct dbc_port *port)
92 {
93 	lockdep_assert_held(&port->port_lock);
94 
95 	if (port->tx_running)
96 		return -EBUSY;
97 
98 	return dbc_do_start_tx(port);
99 }
100 
101 static void dbc_start_rx(struct dbc_port *port)
102 	__releases(&port->port_lock)
103 	__acquires(&port->port_lock)
104 {
105 	struct dbc_request	*req;
106 	int			status;
107 	struct list_head	*pool = &port->read_pool;
108 
109 	while (!list_empty(pool)) {
110 		if (!port->port.tty)
111 			break;
112 
113 		req = list_entry(pool->next, struct dbc_request, list_pool);
114 		list_del(&req->list_pool);
115 		req->length = DBC_MAX_PACKET;
116 
117 		spin_unlock(&port->port_lock);
118 		status = dbc_ep_queue(req);
119 		spin_lock(&port->port_lock);
120 
121 		if (status) {
122 			list_add(&req->list_pool, pool);
123 			break;
124 		}
125 	}
126 }
127 
128 /*
129  * Queue received data to tty buffer and push it.
130  *
131  * Returns nr of remaining bytes that didn't fit tty buffer, i.e. 0 if all
132  * bytes sucessfullt moved. In case of error returns negative errno.
133  * Call with lock held
134  */
135 static int dbc_rx_push_buffer(struct dbc_port *port, struct dbc_request *req)
136 {
137 	char		*packet = req->buf;
138 	unsigned int	n, size = req->actual;
139 	int		count;
140 
141 	if (!req->actual)
142 		return 0;
143 
144 	/* if n_read is set then request was partially moved to tty buffer */
145 	n = port->n_read;
146 	if (n) {
147 		packet += n;
148 		size -= n;
149 	}
150 
151 	count = tty_insert_flip_string(&port->port, packet, size);
152 	if (count)
153 		tty_flip_buffer_push(&port->port);
154 	if (count != size) {
155 		port->n_read += count;
156 		return size - count;
157 	}
158 
159 	port->n_read = 0;
160 	return 0;
161 }
162 
163 static void
164 dbc_read_complete(struct xhci_dbc *dbc, struct dbc_request *req)
165 {
166 	unsigned long		flags;
167 	struct dbc_port		*port = dbc_to_port(dbc);
168 	struct tty_struct	*tty;
169 	int			untransferred;
170 
171 	tty = port->port.tty;
172 
173 	spin_lock_irqsave(&port->port_lock, flags);
174 
175 	/*
176 	 * Only defer copyig data to tty buffer in case:
177 	 * - !list_empty(&port->read_queue), there are older pending data
178 	 * - tty is throttled
179 	 * - failed to copy all data to buffer, defer remaining part
180 	 */
181 
182 	if (list_empty(&port->read_queue) && tty && !tty_throttled(tty)) {
183 		untransferred = dbc_rx_push_buffer(port, req);
184 		if (untransferred == 0) {
185 			list_add_tail(&req->list_pool, &port->read_pool);
186 			if (req->status != -ESHUTDOWN)
187 				dbc_start_rx(port);
188 			goto out;
189 		}
190 	}
191 
192 	/* defer moving data from req to tty buffer to a tasklet */
193 	list_add_tail(&req->list_pool, &port->read_queue);
194 	tasklet_schedule(&port->push);
195 out:
196 	spin_unlock_irqrestore(&port->port_lock, flags);
197 }
198 
199 static void dbc_write_complete(struct xhci_dbc *dbc, struct dbc_request *req)
200 {
201 	unsigned long		flags;
202 	struct dbc_port		*port = dbc_to_port(dbc);
203 
204 	spin_lock_irqsave(&port->port_lock, flags);
205 	list_add(&req->list_pool, &port->write_pool);
206 	switch (req->status) {
207 	case 0:
208 		dbc_start_tx(port);
209 		break;
210 	case -ESHUTDOWN:
211 		break;
212 	default:
213 		dev_warn(dbc->dev, "unexpected write complete status %d\n",
214 			  req->status);
215 		break;
216 	}
217 	spin_unlock_irqrestore(&port->port_lock, flags);
218 }
219 
220 static void xhci_dbc_free_req(struct dbc_request *req)
221 {
222 	kfree(req->buf);
223 	dbc_free_request(req);
224 }
225 
226 static int
227 xhci_dbc_alloc_requests(struct xhci_dbc *dbc, unsigned int direction,
228 			struct list_head *head,
229 			void (*fn)(struct xhci_dbc *, struct dbc_request *))
230 {
231 	int			i;
232 	struct dbc_request	*req;
233 
234 	for (i = 0; i < DBC_QUEUE_SIZE; i++) {
235 		req = dbc_alloc_request(dbc, direction, GFP_KERNEL);
236 		if (!req)
237 			break;
238 
239 		req->length = DBC_MAX_PACKET;
240 		req->buf = kmalloc(req->length, GFP_KERNEL);
241 		if (!req->buf) {
242 			dbc_free_request(req);
243 			break;
244 		}
245 
246 		req->complete = fn;
247 		list_add_tail(&req->list_pool, head);
248 	}
249 
250 	return list_empty(head) ? -ENOMEM : 0;
251 }
252 
253 static void
254 xhci_dbc_free_requests(struct list_head *head)
255 {
256 	struct dbc_request	*req;
257 
258 	while (!list_empty(head)) {
259 		req = list_entry(head->next, struct dbc_request, list_pool);
260 		list_del(&req->list_pool);
261 		xhci_dbc_free_req(req);
262 	}
263 }
264 
265 static int dbc_tty_install(struct tty_driver *driver, struct tty_struct *tty)
266 {
267 	struct dbc_port		*port;
268 
269 	mutex_lock(&dbc_tty_minors_lock);
270 	port = idr_find(&dbc_tty_minors, tty->index);
271 	mutex_unlock(&dbc_tty_minors_lock);
272 
273 	if (!port)
274 		return -ENXIO;
275 
276 	tty->driver_data = port;
277 
278 	return tty_port_install(&port->port, driver, tty);
279 }
280 
281 static int dbc_tty_open(struct tty_struct *tty, struct file *file)
282 {
283 	struct dbc_port		*port = tty->driver_data;
284 
285 	return tty_port_open(&port->port, tty, file);
286 }
287 
288 static void dbc_tty_close(struct tty_struct *tty, struct file *file)
289 {
290 	struct dbc_port		*port = tty->driver_data;
291 
292 	tty_port_close(&port->port, tty, file);
293 }
294 
295 static ssize_t dbc_tty_write(struct tty_struct *tty, const u8 *buf,
296 			     size_t count)
297 {
298 	struct dbc_port		*port = tty->driver_data;
299 	unsigned long		flags;
300 	unsigned int		written = 0;
301 
302 	spin_lock_irqsave(&port->port_lock, flags);
303 
304 	/*
305 	 * Treat tty write as one usb transfer. Make sure the writes are turned
306 	 * into TRB request having the same size boundaries as the tty writes.
307 	 * Don't add data to kfifo before previous write is turned into TRBs
308 	 */
309 	if (port->tx_boundary) {
310 		spin_unlock_irqrestore(&port->port_lock, flags);
311 		return 0;
312 	}
313 
314 	if (count) {
315 		written = kfifo_in(&port->port.xmit_fifo, buf, count);
316 
317 		if (written == count)
318 			port->tx_boundary = kfifo_len(&port->port.xmit_fifo);
319 
320 		dbc_start_tx(port);
321 	}
322 
323 	spin_unlock_irqrestore(&port->port_lock, flags);
324 
325 	return written;
326 }
327 
328 static int dbc_tty_put_char(struct tty_struct *tty, u8 ch)
329 {
330 	struct dbc_port		*port = tty->driver_data;
331 	unsigned long		flags;
332 	int			status;
333 
334 	spin_lock_irqsave(&port->port_lock, flags);
335 	status = kfifo_put(&port->port.xmit_fifo, ch);
336 	spin_unlock_irqrestore(&port->port_lock, flags);
337 
338 	return status;
339 }
340 
341 static void dbc_tty_flush_chars(struct tty_struct *tty)
342 {
343 	struct dbc_port		*port = tty->driver_data;
344 	unsigned long		flags;
345 
346 	spin_lock_irqsave(&port->port_lock, flags);
347 	dbc_start_tx(port);
348 	spin_unlock_irqrestore(&port->port_lock, flags);
349 }
350 
351 static unsigned int dbc_tty_write_room(struct tty_struct *tty)
352 {
353 	struct dbc_port		*port = tty->driver_data;
354 	unsigned long		flags;
355 	unsigned int		room;
356 
357 	spin_lock_irqsave(&port->port_lock, flags);
358 	room = kfifo_avail(&port->port.xmit_fifo);
359 
360 	if (port->tx_boundary)
361 		room = 0;
362 
363 	spin_unlock_irqrestore(&port->port_lock, flags);
364 
365 	return room;
366 }
367 
368 static unsigned int dbc_tty_chars_in_buffer(struct tty_struct *tty)
369 {
370 	struct dbc_port		*port = tty->driver_data;
371 	unsigned long		flags;
372 	unsigned int		chars;
373 
374 	spin_lock_irqsave(&port->port_lock, flags);
375 	chars = kfifo_len(&port->port.xmit_fifo);
376 	spin_unlock_irqrestore(&port->port_lock, flags);
377 
378 	return chars;
379 }
380 
381 static void dbc_tty_unthrottle(struct tty_struct *tty)
382 {
383 	struct dbc_port		*port = tty->driver_data;
384 	unsigned long		flags;
385 
386 	spin_lock_irqsave(&port->port_lock, flags);
387 	tasklet_schedule(&port->push);
388 	spin_unlock_irqrestore(&port->port_lock, flags);
389 }
390 
391 static const struct tty_operations dbc_tty_ops = {
392 	.install		= dbc_tty_install,
393 	.open			= dbc_tty_open,
394 	.close			= dbc_tty_close,
395 	.write			= dbc_tty_write,
396 	.put_char		= dbc_tty_put_char,
397 	.flush_chars		= dbc_tty_flush_chars,
398 	.write_room		= dbc_tty_write_room,
399 	.chars_in_buffer	= dbc_tty_chars_in_buffer,
400 	.unthrottle		= dbc_tty_unthrottle,
401 };
402 
403 static void dbc_rx_push(struct tasklet_struct *t)
404 {
405 	struct dbc_request	*req;
406 	struct tty_struct	*tty;
407 	unsigned long		flags;
408 	bool			disconnect = false;
409 	struct dbc_port		*port = from_tasklet(port, t, push);
410 	struct list_head	*queue = &port->read_queue;
411 	int			untransferred;
412 
413 	spin_lock_irqsave(&port->port_lock, flags);
414 	tty = port->port.tty;
415 	while (!list_empty(queue)) {
416 		req = list_first_entry(queue, struct dbc_request, list_pool);
417 
418 		if (tty && tty_throttled(tty))
419 			break;
420 
421 		switch (req->status) {
422 		case 0:
423 			break;
424 		case -ESHUTDOWN:
425 			disconnect = true;
426 			break;
427 		default:
428 			pr_warn("ttyDBC0: unexpected RX status %d\n",
429 				req->status);
430 			break;
431 		}
432 
433 		untransferred = dbc_rx_push_buffer(port, req);
434 		if (untransferred > 0)
435 			break;
436 
437 		list_move_tail(&req->list_pool, &port->read_pool);
438 	}
439 
440 	if (!list_empty(queue))
441 		tasklet_schedule(&port->push);
442 
443 	if (!disconnect)
444 		dbc_start_rx(port);
445 
446 	spin_unlock_irqrestore(&port->port_lock, flags);
447 }
448 
449 static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty)
450 {
451 	unsigned long	flags;
452 	struct dbc_port	*port = container_of(_port, struct dbc_port, port);
453 
454 	spin_lock_irqsave(&port->port_lock, flags);
455 	dbc_start_rx(port);
456 	spin_unlock_irqrestore(&port->port_lock, flags);
457 
458 	return 0;
459 }
460 
461 static const struct tty_port_operations dbc_port_ops = {
462 	.activate =	dbc_port_activate,
463 };
464 
465 static void
466 xhci_dbc_tty_init_port(struct xhci_dbc *dbc, struct dbc_port *port)
467 {
468 	tty_port_init(&port->port);
469 	spin_lock_init(&port->port_lock);
470 	tasklet_setup(&port->push, dbc_rx_push);
471 	INIT_LIST_HEAD(&port->read_pool);
472 	INIT_LIST_HEAD(&port->read_queue);
473 	INIT_LIST_HEAD(&port->write_pool);
474 
475 	port->port.ops =	&dbc_port_ops;
476 	port->n_read =		0;
477 }
478 
479 static void
480 xhci_dbc_tty_exit_port(struct dbc_port *port)
481 {
482 	tasklet_kill(&port->push);
483 	tty_port_destroy(&port->port);
484 }
485 
486 static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc)
487 {
488 	int			ret;
489 	struct device		*tty_dev;
490 	struct dbc_port		*port = dbc_to_port(dbc);
491 
492 	if (port->registered)
493 		return -EBUSY;
494 
495 	xhci_dbc_tty_init_port(dbc, port);
496 
497 	mutex_lock(&dbc_tty_minors_lock);
498 	port->minor = idr_alloc(&dbc_tty_minors, port, 0, 64, GFP_KERNEL);
499 	mutex_unlock(&dbc_tty_minors_lock);
500 
501 	if (port->minor < 0) {
502 		ret = port->minor;
503 		goto err_idr;
504 	}
505 
506 	ret = kfifo_alloc(&port->port.xmit_fifo, DBC_WRITE_BUF_SIZE,
507 			  GFP_KERNEL);
508 	if (ret)
509 		goto err_exit_port;
510 
511 	ret = xhci_dbc_alloc_requests(dbc, BULK_IN, &port->read_pool,
512 				      dbc_read_complete);
513 	if (ret)
514 		goto err_free_fifo;
515 
516 	ret = xhci_dbc_alloc_requests(dbc, BULK_OUT, &port->write_pool,
517 				      dbc_write_complete);
518 	if (ret)
519 		goto err_free_requests;
520 
521 	tty_dev = tty_port_register_device(&port->port,
522 					   dbc_tty_driver, port->minor, NULL);
523 	if (IS_ERR(tty_dev)) {
524 		ret = PTR_ERR(tty_dev);
525 		goto err_free_requests;
526 	}
527 
528 	port->registered = true;
529 
530 	return 0;
531 
532 err_free_requests:
533 	xhci_dbc_free_requests(&port->read_pool);
534 	xhci_dbc_free_requests(&port->write_pool);
535 err_free_fifo:
536 	kfifo_free(&port->port.xmit_fifo);
537 err_exit_port:
538 	idr_remove(&dbc_tty_minors, port->minor);
539 err_idr:
540 	xhci_dbc_tty_exit_port(port);
541 
542 	dev_err(dbc->dev, "can't register tty port, err %d\n", ret);
543 
544 	return ret;
545 }
546 
547 static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc)
548 {
549 	struct dbc_port		*port = dbc_to_port(dbc);
550 
551 	if (!port->registered)
552 		return;
553 	/*
554 	 * Hang up the TTY. This wakes up any blocked
555 	 * writers and causes subsequent writes to fail.
556 	 */
557 	tty_vhangup(port->port.tty);
558 
559 	tty_unregister_device(dbc_tty_driver, port->minor);
560 	xhci_dbc_tty_exit_port(port);
561 	port->registered = false;
562 
563 	mutex_lock(&dbc_tty_minors_lock);
564 	idr_remove(&dbc_tty_minors, port->minor);
565 	mutex_unlock(&dbc_tty_minors_lock);
566 
567 	kfifo_free(&port->port.xmit_fifo);
568 	xhci_dbc_free_requests(&port->read_pool);
569 	xhci_dbc_free_requests(&port->read_queue);
570 	xhci_dbc_free_requests(&port->write_pool);
571 }
572 
573 static const struct dbc_driver dbc_driver = {
574 	.configure		= xhci_dbc_tty_register_device,
575 	.disconnect		= xhci_dbc_tty_unregister_device,
576 };
577 
578 int xhci_dbc_tty_probe(struct device *dev, void __iomem *base, struct xhci_hcd *xhci)
579 {
580 	struct xhci_dbc		*dbc;
581 	struct dbc_port		*port;
582 	int			status;
583 
584 	if (!dbc_tty_driver)
585 		return -ENODEV;
586 
587 	port = kzalloc(sizeof(*port), GFP_KERNEL);
588 	if (!port)
589 		return -ENOMEM;
590 
591 	dbc = xhci_alloc_dbc(dev, base, &dbc_driver);
592 
593 	if (!dbc) {
594 		status = -ENOMEM;
595 		goto out2;
596 	}
597 
598 	dbc->priv = port;
599 
600 	/* get rid of xhci once this is a real driver binding to a device */
601 	xhci->dbc = dbc;
602 
603 	return 0;
604 out2:
605 	kfree(port);
606 
607 	return status;
608 }
609 
610 /*
611  * undo what probe did, assume dbc is stopped already.
612  * we also assume tty_unregister_device() is called before this
613  */
614 void xhci_dbc_tty_remove(struct xhci_dbc *dbc)
615 {
616 	struct dbc_port         *port = dbc_to_port(dbc);
617 
618 	xhci_dbc_remove(dbc);
619 	kfree(port);
620 }
621 
622 int dbc_tty_init(void)
623 {
624 	int		ret;
625 
626 	idr_init(&dbc_tty_minors);
627 
628 	dbc_tty_driver = tty_alloc_driver(64, TTY_DRIVER_REAL_RAW |
629 					  TTY_DRIVER_DYNAMIC_DEV);
630 	if (IS_ERR(dbc_tty_driver)) {
631 		idr_destroy(&dbc_tty_minors);
632 		return PTR_ERR(dbc_tty_driver);
633 	}
634 
635 	dbc_tty_driver->driver_name = "dbc_serial";
636 	dbc_tty_driver->name = "ttyDBC";
637 
638 	dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
639 	dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
640 	dbc_tty_driver->init_termios = tty_std_termios;
641 	dbc_tty_driver->init_termios.c_lflag &= ~ECHO;
642 	dbc_tty_driver->init_termios.c_cflag =
643 			B9600 | CS8 | CREAD | HUPCL | CLOCAL;
644 	dbc_tty_driver->init_termios.c_ispeed = 9600;
645 	dbc_tty_driver->init_termios.c_ospeed = 9600;
646 
647 	tty_set_operations(dbc_tty_driver, &dbc_tty_ops);
648 
649 	ret = tty_register_driver(dbc_tty_driver);
650 	if (ret) {
651 		pr_err("Can't register dbc tty driver\n");
652 		tty_driver_kref_put(dbc_tty_driver);
653 		idr_destroy(&dbc_tty_minors);
654 	}
655 
656 	return ret;
657 }
658 
659 void dbc_tty_exit(void)
660 {
661 	if (dbc_tty_driver) {
662 		tty_unregister_driver(dbc_tty_driver);
663 		tty_driver_kref_put(dbc_tty_driver);
664 		dbc_tty_driver = NULL;
665 	}
666 
667 	idr_destroy(&dbc_tty_minors);
668 }
669