xref: /linux/drivers/usb/host/xhci-dbgtty.c (revision 722ecdbce68a87de2d9296f91308f44ea900a039)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * xhci-dbgtty.c - tty glue for xHCI debug capability
4  *
5  * Copyright (C) 2017 Intel Corporation
6  *
7  * Author: Lu Baolu <baolu.lu@linux.intel.com>
8  */
9 
10 #include <linux/slab.h>
11 #include <linux/tty.h>
12 #include <linux/tty_flip.h>
13 #include <linux/idr.h>
14 
15 #include "xhci.h"
16 #include "xhci-dbgcap.h"
17 
18 static struct tty_driver *dbc_tty_driver;
19 static struct idr dbc_tty_minors;
20 static DEFINE_MUTEX(dbc_tty_minors_lock);
21 
22 static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc)
23 {
24 	return dbc->priv;
25 }
26 
27 static unsigned int
28 dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size)
29 {
30 	unsigned int		len;
31 
32 	len = kfifo_len(&port->write_fifo);
33 	if (len < size)
34 		size = len;
35 	if (size != 0)
36 		size = kfifo_out(&port->write_fifo, packet, size);
37 	return size;
38 }
39 
40 static int dbc_start_tx(struct dbc_port *port)
41 	__releases(&port->port_lock)
42 	__acquires(&port->port_lock)
43 {
44 	int			len;
45 	struct dbc_request	*req;
46 	int			status = 0;
47 	bool			do_tty_wake = false;
48 	struct list_head	*pool = &port->write_pool;
49 
50 	while (!list_empty(pool)) {
51 		req = list_entry(pool->next, struct dbc_request, list_pool);
52 		len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET);
53 		if (len == 0)
54 			break;
55 		do_tty_wake = true;
56 
57 		req->length = len;
58 		list_del(&req->list_pool);
59 
60 		spin_unlock(&port->port_lock);
61 		status = dbc_ep_queue(req);
62 		spin_lock(&port->port_lock);
63 
64 		if (status) {
65 			list_add(&req->list_pool, pool);
66 			break;
67 		}
68 	}
69 
70 	if (do_tty_wake && port->port.tty)
71 		tty_wakeup(port->port.tty);
72 
73 	return status;
74 }
75 
76 static void dbc_start_rx(struct dbc_port *port)
77 	__releases(&port->port_lock)
78 	__acquires(&port->port_lock)
79 {
80 	struct dbc_request	*req;
81 	int			status;
82 	struct list_head	*pool = &port->read_pool;
83 
84 	while (!list_empty(pool)) {
85 		if (!port->port.tty)
86 			break;
87 
88 		req = list_entry(pool->next, struct dbc_request, list_pool);
89 		list_del(&req->list_pool);
90 		req->length = DBC_MAX_PACKET;
91 
92 		spin_unlock(&port->port_lock);
93 		status = dbc_ep_queue(req);
94 		spin_lock(&port->port_lock);
95 
96 		if (status) {
97 			list_add(&req->list_pool, pool);
98 			break;
99 		}
100 	}
101 }
102 
103 static void
104 dbc_read_complete(struct xhci_dbc *dbc, struct dbc_request *req)
105 {
106 	unsigned long		flags;
107 	struct dbc_port		*port = dbc_to_port(dbc);
108 
109 	spin_lock_irqsave(&port->port_lock, flags);
110 	list_add_tail(&req->list_pool, &port->read_queue);
111 	tasklet_schedule(&port->push);
112 	spin_unlock_irqrestore(&port->port_lock, flags);
113 }
114 
115 static void dbc_write_complete(struct xhci_dbc *dbc, struct dbc_request *req)
116 {
117 	unsigned long		flags;
118 	struct dbc_port		*port = dbc_to_port(dbc);
119 
120 	spin_lock_irqsave(&port->port_lock, flags);
121 	list_add(&req->list_pool, &port->write_pool);
122 	switch (req->status) {
123 	case 0:
124 		dbc_start_tx(port);
125 		break;
126 	case -ESHUTDOWN:
127 		break;
128 	default:
129 		dev_warn(dbc->dev, "unexpected write complete status %d\n",
130 			  req->status);
131 		break;
132 	}
133 	spin_unlock_irqrestore(&port->port_lock, flags);
134 }
135 
136 static void xhci_dbc_free_req(struct dbc_request *req)
137 {
138 	kfree(req->buf);
139 	dbc_free_request(req);
140 }
141 
142 static int
143 xhci_dbc_alloc_requests(struct xhci_dbc *dbc, unsigned int direction,
144 			struct list_head *head,
145 			void (*fn)(struct xhci_dbc *, struct dbc_request *))
146 {
147 	int			i;
148 	struct dbc_request	*req;
149 
150 	for (i = 0; i < DBC_QUEUE_SIZE; i++) {
151 		req = dbc_alloc_request(dbc, direction, GFP_KERNEL);
152 		if (!req)
153 			break;
154 
155 		req->length = DBC_MAX_PACKET;
156 		req->buf = kmalloc(req->length, GFP_KERNEL);
157 		if (!req->buf) {
158 			dbc_free_request(req);
159 			break;
160 		}
161 
162 		req->complete = fn;
163 		list_add_tail(&req->list_pool, head);
164 	}
165 
166 	return list_empty(head) ? -ENOMEM : 0;
167 }
168 
169 static void
170 xhci_dbc_free_requests(struct list_head *head)
171 {
172 	struct dbc_request	*req;
173 
174 	while (!list_empty(head)) {
175 		req = list_entry(head->next, struct dbc_request, list_pool);
176 		list_del(&req->list_pool);
177 		xhci_dbc_free_req(req);
178 	}
179 }
180 
181 static int dbc_tty_install(struct tty_driver *driver, struct tty_struct *tty)
182 {
183 	struct dbc_port		*port;
184 
185 	mutex_lock(&dbc_tty_minors_lock);
186 	port = idr_find(&dbc_tty_minors, tty->index);
187 	mutex_unlock(&dbc_tty_minors_lock);
188 
189 	if (!port)
190 		return -ENXIO;
191 
192 	tty->driver_data = port;
193 
194 	return tty_port_install(&port->port, driver, tty);
195 }
196 
197 static int dbc_tty_open(struct tty_struct *tty, struct file *file)
198 {
199 	struct dbc_port		*port = tty->driver_data;
200 
201 	return tty_port_open(&port->port, tty, file);
202 }
203 
204 static void dbc_tty_close(struct tty_struct *tty, struct file *file)
205 {
206 	struct dbc_port		*port = tty->driver_data;
207 
208 	tty_port_close(&port->port, tty, file);
209 }
210 
211 static int dbc_tty_write(struct tty_struct *tty,
212 			 const unsigned char *buf,
213 			 int count)
214 {
215 	struct dbc_port		*port = tty->driver_data;
216 	unsigned long		flags;
217 
218 	spin_lock_irqsave(&port->port_lock, flags);
219 	if (count)
220 		count = kfifo_in(&port->write_fifo, buf, count);
221 	dbc_start_tx(port);
222 	spin_unlock_irqrestore(&port->port_lock, flags);
223 
224 	return count;
225 }
226 
227 static int dbc_tty_put_char(struct tty_struct *tty, unsigned char ch)
228 {
229 	struct dbc_port		*port = tty->driver_data;
230 	unsigned long		flags;
231 	int			status;
232 
233 	spin_lock_irqsave(&port->port_lock, flags);
234 	status = kfifo_put(&port->write_fifo, ch);
235 	spin_unlock_irqrestore(&port->port_lock, flags);
236 
237 	return status;
238 }
239 
240 static void dbc_tty_flush_chars(struct tty_struct *tty)
241 {
242 	struct dbc_port		*port = tty->driver_data;
243 	unsigned long		flags;
244 
245 	spin_lock_irqsave(&port->port_lock, flags);
246 	dbc_start_tx(port);
247 	spin_unlock_irqrestore(&port->port_lock, flags);
248 }
249 
250 static unsigned int dbc_tty_write_room(struct tty_struct *tty)
251 {
252 	struct dbc_port		*port = tty->driver_data;
253 	unsigned long		flags;
254 	unsigned int		room;
255 
256 	spin_lock_irqsave(&port->port_lock, flags);
257 	room = kfifo_avail(&port->write_fifo);
258 	spin_unlock_irqrestore(&port->port_lock, flags);
259 
260 	return room;
261 }
262 
263 static unsigned int dbc_tty_chars_in_buffer(struct tty_struct *tty)
264 {
265 	struct dbc_port		*port = tty->driver_data;
266 	unsigned long		flags;
267 	unsigned int		chars;
268 
269 	spin_lock_irqsave(&port->port_lock, flags);
270 	chars = kfifo_len(&port->write_fifo);
271 	spin_unlock_irqrestore(&port->port_lock, flags);
272 
273 	return chars;
274 }
275 
276 static void dbc_tty_unthrottle(struct tty_struct *tty)
277 {
278 	struct dbc_port		*port = tty->driver_data;
279 	unsigned long		flags;
280 
281 	spin_lock_irqsave(&port->port_lock, flags);
282 	tasklet_schedule(&port->push);
283 	spin_unlock_irqrestore(&port->port_lock, flags);
284 }
285 
286 static const struct tty_operations dbc_tty_ops = {
287 	.install		= dbc_tty_install,
288 	.open			= dbc_tty_open,
289 	.close			= dbc_tty_close,
290 	.write			= dbc_tty_write,
291 	.put_char		= dbc_tty_put_char,
292 	.flush_chars		= dbc_tty_flush_chars,
293 	.write_room		= dbc_tty_write_room,
294 	.chars_in_buffer	= dbc_tty_chars_in_buffer,
295 	.unthrottle		= dbc_tty_unthrottle,
296 };
297 
298 static void dbc_rx_push(struct tasklet_struct *t)
299 {
300 	struct dbc_request	*req;
301 	struct tty_struct	*tty;
302 	unsigned long		flags;
303 	bool			do_push = false;
304 	bool			disconnect = false;
305 	struct dbc_port		*port = from_tasklet(port, t, push);
306 	struct list_head	*queue = &port->read_queue;
307 
308 	spin_lock_irqsave(&port->port_lock, flags);
309 	tty = port->port.tty;
310 	while (!list_empty(queue)) {
311 		req = list_first_entry(queue, struct dbc_request, list_pool);
312 
313 		if (tty && tty_throttled(tty))
314 			break;
315 
316 		switch (req->status) {
317 		case 0:
318 			break;
319 		case -ESHUTDOWN:
320 			disconnect = true;
321 			break;
322 		default:
323 			pr_warn("ttyDBC0: unexpected RX status %d\n",
324 				req->status);
325 			break;
326 		}
327 
328 		if (req->actual) {
329 			char		*packet = req->buf;
330 			unsigned int	n, size = req->actual;
331 			int		count;
332 
333 			n = port->n_read;
334 			if (n) {
335 				packet += n;
336 				size -= n;
337 			}
338 
339 			count = tty_insert_flip_string(&port->port, packet,
340 						       size);
341 			if (count)
342 				do_push = true;
343 			if (count != size) {
344 				port->n_read += count;
345 				break;
346 			}
347 			port->n_read = 0;
348 		}
349 
350 		list_move(&req->list_pool, &port->read_pool);
351 	}
352 
353 	if (do_push)
354 		tty_flip_buffer_push(&port->port);
355 
356 	if (!list_empty(queue) && tty) {
357 		if (!tty_throttled(tty)) {
358 			if (do_push)
359 				tasklet_schedule(&port->push);
360 			else
361 				pr_warn("ttyDBC0: RX not scheduled?\n");
362 		}
363 	}
364 
365 	if (!disconnect)
366 		dbc_start_rx(port);
367 
368 	spin_unlock_irqrestore(&port->port_lock, flags);
369 }
370 
371 static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty)
372 {
373 	unsigned long	flags;
374 	struct dbc_port	*port = container_of(_port, struct dbc_port, port);
375 
376 	spin_lock_irqsave(&port->port_lock, flags);
377 	dbc_start_rx(port);
378 	spin_unlock_irqrestore(&port->port_lock, flags);
379 
380 	return 0;
381 }
382 
383 static const struct tty_port_operations dbc_port_ops = {
384 	.activate =	dbc_port_activate,
385 };
386 
387 static void
388 xhci_dbc_tty_init_port(struct xhci_dbc *dbc, struct dbc_port *port)
389 {
390 	tty_port_init(&port->port);
391 	spin_lock_init(&port->port_lock);
392 	tasklet_setup(&port->push, dbc_rx_push);
393 	INIT_LIST_HEAD(&port->read_pool);
394 	INIT_LIST_HEAD(&port->read_queue);
395 	INIT_LIST_HEAD(&port->write_pool);
396 
397 	port->port.ops =	&dbc_port_ops;
398 	port->n_read =		0;
399 }
400 
401 static void
402 xhci_dbc_tty_exit_port(struct dbc_port *port)
403 {
404 	tasklet_kill(&port->push);
405 	tty_port_destroy(&port->port);
406 }
407 
408 static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc)
409 {
410 	int			ret;
411 	struct device		*tty_dev;
412 	struct dbc_port		*port = dbc_to_port(dbc);
413 
414 	if (port->registered)
415 		return -EBUSY;
416 
417 	xhci_dbc_tty_init_port(dbc, port);
418 
419 	mutex_lock(&dbc_tty_minors_lock);
420 	port->minor = idr_alloc(&dbc_tty_minors, port, 0, 64, GFP_KERNEL);
421 	mutex_unlock(&dbc_tty_minors_lock);
422 
423 	if (port->minor < 0) {
424 		ret = port->minor;
425 		goto err_idr;
426 	}
427 
428 	ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
429 	if (ret)
430 		goto err_exit_port;
431 
432 	ret = xhci_dbc_alloc_requests(dbc, BULK_IN, &port->read_pool,
433 				      dbc_read_complete);
434 	if (ret)
435 		goto err_free_fifo;
436 
437 	ret = xhci_dbc_alloc_requests(dbc, BULK_OUT, &port->write_pool,
438 				      dbc_write_complete);
439 	if (ret)
440 		goto err_free_requests;
441 
442 	tty_dev = tty_port_register_device(&port->port,
443 					   dbc_tty_driver, port->minor, NULL);
444 	if (IS_ERR(tty_dev)) {
445 		ret = PTR_ERR(tty_dev);
446 		goto err_free_requests;
447 	}
448 
449 	port->registered = true;
450 
451 	return 0;
452 
453 err_free_requests:
454 	xhci_dbc_free_requests(&port->read_pool);
455 	xhci_dbc_free_requests(&port->write_pool);
456 err_free_fifo:
457 	kfifo_free(&port->write_fifo);
458 err_exit_port:
459 	idr_remove(&dbc_tty_minors, port->minor);
460 err_idr:
461 	xhci_dbc_tty_exit_port(port);
462 
463 	dev_err(dbc->dev, "can't register tty port, err %d\n", ret);
464 
465 	return ret;
466 }
467 
468 static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc)
469 {
470 	struct dbc_port		*port = dbc_to_port(dbc);
471 
472 	if (!port->registered)
473 		return;
474 	tty_unregister_device(dbc_tty_driver, port->minor);
475 	xhci_dbc_tty_exit_port(port);
476 	port->registered = false;
477 
478 	mutex_lock(&dbc_tty_minors_lock);
479 	idr_remove(&dbc_tty_minors, port->minor);
480 	mutex_unlock(&dbc_tty_minors_lock);
481 
482 	kfifo_free(&port->write_fifo);
483 	xhci_dbc_free_requests(&port->read_pool);
484 	xhci_dbc_free_requests(&port->read_queue);
485 	xhci_dbc_free_requests(&port->write_pool);
486 }
487 
488 static const struct dbc_driver dbc_driver = {
489 	.configure		= xhci_dbc_tty_register_device,
490 	.disconnect		= xhci_dbc_tty_unregister_device,
491 };
492 
493 int xhci_dbc_tty_probe(struct device *dev, void __iomem *base, struct xhci_hcd *xhci)
494 {
495 	struct xhci_dbc		*dbc;
496 	struct dbc_port		*port;
497 	int			status;
498 
499 	if (!dbc_tty_driver)
500 		return -ENODEV;
501 
502 	port = kzalloc(sizeof(*port), GFP_KERNEL);
503 	if (!port)
504 		return -ENOMEM;
505 
506 	dbc = xhci_alloc_dbc(dev, base, &dbc_driver);
507 
508 	if (!dbc) {
509 		status = -ENOMEM;
510 		goto out2;
511 	}
512 
513 	dbc->priv = port;
514 
515 	/* get rid of xhci once this is a real driver binding to a device */
516 	xhci->dbc = dbc;
517 
518 	return 0;
519 out2:
520 	kfree(port);
521 
522 	return status;
523 }
524 
525 /*
526  * undo what probe did, assume dbc is stopped already.
527  * we also assume tty_unregister_device() is called before this
528  */
529 void xhci_dbc_tty_remove(struct xhci_dbc *dbc)
530 {
531 	struct dbc_port         *port = dbc_to_port(dbc);
532 
533 	xhci_dbc_remove(dbc);
534 	kfree(port);
535 }
536 
537 int dbc_tty_init(void)
538 {
539 	int		ret;
540 
541 	idr_init(&dbc_tty_minors);
542 
543 	dbc_tty_driver = tty_alloc_driver(64, TTY_DRIVER_REAL_RAW |
544 					  TTY_DRIVER_DYNAMIC_DEV);
545 	if (IS_ERR(dbc_tty_driver)) {
546 		idr_destroy(&dbc_tty_minors);
547 		return PTR_ERR(dbc_tty_driver);
548 	}
549 
550 	dbc_tty_driver->driver_name = "dbc_serial";
551 	dbc_tty_driver->name = "ttyDBC";
552 
553 	dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
554 	dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
555 	dbc_tty_driver->init_termios = tty_std_termios;
556 	dbc_tty_driver->init_termios.c_cflag =
557 			B9600 | CS8 | CREAD | HUPCL | CLOCAL;
558 	dbc_tty_driver->init_termios.c_ispeed = 9600;
559 	dbc_tty_driver->init_termios.c_ospeed = 9600;
560 
561 	tty_set_operations(dbc_tty_driver, &dbc_tty_ops);
562 
563 	ret = tty_register_driver(dbc_tty_driver);
564 	if (ret) {
565 		pr_err("Can't register dbc tty driver\n");
566 		tty_driver_kref_put(dbc_tty_driver);
567 		idr_destroy(&dbc_tty_minors);
568 	}
569 
570 	return ret;
571 }
572 
573 void dbc_tty_exit(void)
574 {
575 	if (dbc_tty_driver) {
576 		tty_unregister_driver(dbc_tty_driver);
577 		tty_driver_kref_put(dbc_tty_driver);
578 		dbc_tty_driver = NULL;
579 	}
580 
581 	idr_destroy(&dbc_tty_minors);
582 }
583