1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * xhci-dbgcap.c - xHCI debug capability support
4 *
5 * Copyright (C) 2017 Intel Corporation
6 *
7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
8 */
9 #include <linux/bug.h>
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
13 #include <linux/kstrtox.h>
14 #include <linux/list.h>
15 #include <linux/nls.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/string.h>
20 #include <linux/sysfs.h>
21 #include <linux/types.h>
22 #include <linux/workqueue.h>
23
24 #include <linux/io-64-nonatomic-lo-hi.h>
25
26 #include <asm/byteorder.h>
27
28 #include "xhci.h"
29 #include "xhci-trace.h"
30 #include "xhci-dbgcap.h"
31
dbc_free_ctx(struct device * dev,struct xhci_container_ctx * ctx)32 static void dbc_free_ctx(struct device *dev, struct xhci_container_ctx *ctx)
33 {
34 if (!ctx)
35 return;
36 dma_free_coherent(dev, ctx->size, ctx->bytes, ctx->dma);
37 kfree(ctx);
38 }
39
40 /* we use only one segment for DbC rings */
dbc_ring_free(struct device * dev,struct xhci_ring * ring)41 static void dbc_ring_free(struct device *dev, struct xhci_ring *ring)
42 {
43 if (!ring)
44 return;
45
46 if (ring->first_seg) {
47 dma_free_coherent(dev, TRB_SEGMENT_SIZE,
48 ring->first_seg->trbs,
49 ring->first_seg->dma);
50 kfree(ring->first_seg);
51 }
52 kfree(ring);
53 }
54
xhci_dbc_populate_strings(struct dbc_str_descs * strings)55 static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
56 {
57 struct usb_string_descriptor *s_desc;
58 u32 string_length;
59
60 /* Serial string: */
61 s_desc = (struct usb_string_descriptor *)strings->serial;
62 utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
63 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
64 DBC_MAX_STRING_LENGTH);
65
66 s_desc->bLength = (strlen(DBC_STRING_SERIAL) + 1) * 2;
67 s_desc->bDescriptorType = USB_DT_STRING;
68 string_length = s_desc->bLength;
69 string_length <<= 8;
70
71 /* Product string: */
72 s_desc = (struct usb_string_descriptor *)strings->product;
73 utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
74 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
75 DBC_MAX_STRING_LENGTH);
76
77 s_desc->bLength = (strlen(DBC_STRING_PRODUCT) + 1) * 2;
78 s_desc->bDescriptorType = USB_DT_STRING;
79 string_length += s_desc->bLength;
80 string_length <<= 8;
81
82 /* Manufacture string: */
83 s_desc = (struct usb_string_descriptor *)strings->manufacturer;
84 utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
85 strlen(DBC_STRING_MANUFACTURER),
86 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
87 DBC_MAX_STRING_LENGTH);
88
89 s_desc->bLength = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
90 s_desc->bDescriptorType = USB_DT_STRING;
91 string_length += s_desc->bLength;
92 string_length <<= 8;
93
94 /* String0: */
95 strings->string0[0] = 4;
96 strings->string0[1] = USB_DT_STRING;
97 strings->string0[2] = 0x09;
98 strings->string0[3] = 0x04;
99 string_length += 4;
100
101 return string_length;
102 }
103
xhci_dbc_init_contexts(struct xhci_dbc * dbc,u32 string_length)104 static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
105 {
106 struct dbc_info_context *info;
107 struct xhci_ep_ctx *ep_ctx;
108 u32 dev_info;
109 dma_addr_t deq, dma;
110 unsigned int max_burst;
111
112 if (!dbc)
113 return;
114
115 /* Populate info Context: */
116 info = (struct dbc_info_context *)dbc->ctx->bytes;
117 dma = dbc->string_dma;
118 info->string0 = cpu_to_le64(dma);
119 info->manufacturer = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
120 info->product = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
121 info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
122 info->length = cpu_to_le32(string_length);
123
124 /* Populate bulk out endpoint context: */
125 ep_ctx = dbc_bulkout_ctx(dbc);
126 max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
127 deq = dbc_bulkout_enq(dbc);
128 ep_ctx->ep_info = 0;
129 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
130 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
131
132 /* Populate bulk in endpoint context: */
133 ep_ctx = dbc_bulkin_ctx(dbc);
134 deq = dbc_bulkin_enq(dbc);
135 ep_ctx->ep_info = 0;
136 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
137 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
138
139 /* Set DbC context and info registers: */
140 lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
141
142 dev_info = (dbc->idVendor << 16) | dbc->bInterfaceProtocol;
143 writel(dev_info, &dbc->regs->devinfo1);
144
145 dev_info = (dbc->bcdDevice << 16) | dbc->idProduct;
146 writel(dev_info, &dbc->regs->devinfo2);
147 }
148
xhci_dbc_giveback(struct dbc_request * req,int status)149 static void xhci_dbc_giveback(struct dbc_request *req, int status)
150 __releases(&dbc->lock)
151 __acquires(&dbc->lock)
152 {
153 struct xhci_dbc *dbc = req->dbc;
154 struct device *dev = dbc->dev;
155
156 list_del_init(&req->list_pending);
157 req->trb_dma = 0;
158 req->trb = NULL;
159
160 if (req->status == -EINPROGRESS)
161 req->status = status;
162
163 trace_xhci_dbc_giveback_request(req);
164
165 dma_unmap_single(dev,
166 req->dma,
167 req->length,
168 dbc_ep_dma_direction(req));
169
170 /* Give back the transfer request: */
171 spin_unlock(&dbc->lock);
172 req->complete(dbc, req);
173 spin_lock(&dbc->lock);
174 }
175
trb_to_noop(union xhci_trb * trb)176 static void trb_to_noop(union xhci_trb *trb)
177 {
178 trb->generic.field[0] = 0;
179 trb->generic.field[1] = 0;
180 trb->generic.field[2] = 0;
181 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
182 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
183 }
184
xhci_dbc_flush_single_request(struct dbc_request * req)185 static void xhci_dbc_flush_single_request(struct dbc_request *req)
186 {
187 trb_to_noop(req->trb);
188 xhci_dbc_giveback(req, -ESHUTDOWN);
189 }
190
xhci_dbc_flush_endpoint_requests(struct dbc_ep * dep)191 static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
192 {
193 struct dbc_request *req, *tmp;
194
195 list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
196 xhci_dbc_flush_single_request(req);
197 }
198
xhci_dbc_flush_requests(struct xhci_dbc * dbc)199 static void xhci_dbc_flush_requests(struct xhci_dbc *dbc)
200 {
201 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
202 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
203 }
204
205 struct dbc_request *
dbc_alloc_request(struct xhci_dbc * dbc,unsigned int direction,gfp_t flags)206 dbc_alloc_request(struct xhci_dbc *dbc, unsigned int direction, gfp_t flags)
207 {
208 struct dbc_request *req;
209
210 if (direction != BULK_IN &&
211 direction != BULK_OUT)
212 return NULL;
213
214 if (!dbc)
215 return NULL;
216
217 req = kzalloc(sizeof(*req), flags);
218 if (!req)
219 return NULL;
220
221 req->dbc = dbc;
222 INIT_LIST_HEAD(&req->list_pending);
223 INIT_LIST_HEAD(&req->list_pool);
224 req->direction = direction;
225
226 trace_xhci_dbc_alloc_request(req);
227
228 return req;
229 }
230
231 void
dbc_free_request(struct dbc_request * req)232 dbc_free_request(struct dbc_request *req)
233 {
234 trace_xhci_dbc_free_request(req);
235
236 kfree(req);
237 }
238
239 static void
xhci_dbc_queue_trb(struct xhci_ring * ring,u32 field1,u32 field2,u32 field3,u32 field4)240 xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
241 u32 field2, u32 field3, u32 field4)
242 {
243 union xhci_trb *trb, *next;
244
245 trb = ring->enqueue;
246 trb->generic.field[0] = cpu_to_le32(field1);
247 trb->generic.field[1] = cpu_to_le32(field2);
248 trb->generic.field[2] = cpu_to_le32(field3);
249 trb->generic.field[3] = cpu_to_le32(field4);
250
251 trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
252
253 ring->num_trbs_free--;
254 next = ++(ring->enqueue);
255 if (TRB_TYPE_LINK_LE32(next->link.control)) {
256 next->link.control ^= cpu_to_le32(TRB_CYCLE);
257 ring->enqueue = ring->enq_seg->trbs;
258 ring->cycle_state ^= 1;
259 }
260 }
261
xhci_dbc_queue_bulk_tx(struct dbc_ep * dep,struct dbc_request * req)262 static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
263 struct dbc_request *req)
264 {
265 u64 addr;
266 union xhci_trb *trb;
267 unsigned int num_trbs;
268 struct xhci_dbc *dbc = req->dbc;
269 struct xhci_ring *ring = dep->ring;
270 u32 length, control, cycle;
271
272 num_trbs = count_trbs(req->dma, req->length);
273 WARN_ON(num_trbs != 1);
274 if (ring->num_trbs_free < num_trbs)
275 return -EBUSY;
276
277 addr = req->dma;
278 trb = ring->enqueue;
279 cycle = ring->cycle_state;
280 length = TRB_LEN(req->length);
281 control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
282
283 if (cycle)
284 control &= cpu_to_le32(~TRB_CYCLE);
285 else
286 control |= cpu_to_le32(TRB_CYCLE);
287
288 req->trb = ring->enqueue;
289 req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
290 xhci_dbc_queue_trb(ring,
291 lower_32_bits(addr),
292 upper_32_bits(addr),
293 length, control);
294
295 /*
296 * Add a barrier between writes of trb fields and flipping
297 * the cycle bit:
298 */
299 wmb();
300
301 if (cycle)
302 trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
303 else
304 trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
305
306 writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
307
308 return 0;
309 }
310
311 static int
dbc_ep_do_queue(struct dbc_request * req)312 dbc_ep_do_queue(struct dbc_request *req)
313 {
314 int ret;
315 struct xhci_dbc *dbc = req->dbc;
316 struct device *dev = dbc->dev;
317 struct dbc_ep *dep = &dbc->eps[req->direction];
318
319 if (!req->length || !req->buf)
320 return -EINVAL;
321
322 req->actual = 0;
323 req->status = -EINPROGRESS;
324
325 req->dma = dma_map_single(dev,
326 req->buf,
327 req->length,
328 dbc_ep_dma_direction(dep));
329 if (dma_mapping_error(dev, req->dma)) {
330 dev_err(dbc->dev, "failed to map buffer\n");
331 return -EFAULT;
332 }
333
334 ret = xhci_dbc_queue_bulk_tx(dep, req);
335 if (ret) {
336 dev_err(dbc->dev, "failed to queue trbs\n");
337 dma_unmap_single(dev,
338 req->dma,
339 req->length,
340 dbc_ep_dma_direction(dep));
341 return -EFAULT;
342 }
343
344 list_add_tail(&req->list_pending, &dep->list_pending);
345
346 return 0;
347 }
348
dbc_ep_queue(struct dbc_request * req)349 int dbc_ep_queue(struct dbc_request *req)
350 {
351 unsigned long flags;
352 struct xhci_dbc *dbc = req->dbc;
353 int ret = -ESHUTDOWN;
354
355 if (!dbc)
356 return -ENODEV;
357
358 if (req->direction != BULK_IN &&
359 req->direction != BULK_OUT)
360 return -EINVAL;
361
362 spin_lock_irqsave(&dbc->lock, flags);
363 if (dbc->state == DS_CONFIGURED)
364 ret = dbc_ep_do_queue(req);
365 spin_unlock_irqrestore(&dbc->lock, flags);
366
367 mod_delayed_work(system_wq, &dbc->event_work, 0);
368
369 trace_xhci_dbc_queue_request(req);
370
371 return ret;
372 }
373
xhci_dbc_do_eps_init(struct xhci_dbc * dbc,bool direction)374 static inline void xhci_dbc_do_eps_init(struct xhci_dbc *dbc, bool direction)
375 {
376 struct dbc_ep *dep;
377
378 dep = &dbc->eps[direction];
379 dep->dbc = dbc;
380 dep->direction = direction;
381 dep->ring = direction ? dbc->ring_in : dbc->ring_out;
382
383 INIT_LIST_HEAD(&dep->list_pending);
384 }
385
xhci_dbc_eps_init(struct xhci_dbc * dbc)386 static void xhci_dbc_eps_init(struct xhci_dbc *dbc)
387 {
388 xhci_dbc_do_eps_init(dbc, BULK_OUT);
389 xhci_dbc_do_eps_init(dbc, BULK_IN);
390 }
391
xhci_dbc_eps_exit(struct xhci_dbc * dbc)392 static void xhci_dbc_eps_exit(struct xhci_dbc *dbc)
393 {
394 memset(dbc->eps, 0, sizeof_field(struct xhci_dbc, eps));
395 }
396
dbc_erst_alloc(struct device * dev,struct xhci_ring * evt_ring,struct xhci_erst * erst,gfp_t flags)397 static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring,
398 struct xhci_erst *erst, gfp_t flags)
399 {
400 erst->entries = dma_alloc_coherent(dev, sizeof(*erst->entries),
401 &erst->erst_dma_addr, flags);
402 if (!erst->entries)
403 return -ENOMEM;
404
405 erst->num_entries = 1;
406 erst->entries[0].seg_addr = cpu_to_le64(evt_ring->first_seg->dma);
407 erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
408 erst->entries[0].rsvd = 0;
409 return 0;
410 }
411
dbc_erst_free(struct device * dev,struct xhci_erst * erst)412 static void dbc_erst_free(struct device *dev, struct xhci_erst *erst)
413 {
414 dma_free_coherent(dev, sizeof(*erst->entries), erst->entries,
415 erst->erst_dma_addr);
416 erst->entries = NULL;
417 }
418
419 static struct xhci_container_ctx *
dbc_alloc_ctx(struct device * dev,gfp_t flags)420 dbc_alloc_ctx(struct device *dev, gfp_t flags)
421 {
422 struct xhci_container_ctx *ctx;
423
424 ctx = kzalloc(sizeof(*ctx), flags);
425 if (!ctx)
426 return NULL;
427
428 /* xhci 7.6.9, all three contexts; info, ep-out and ep-in. Each 64 bytes*/
429 ctx->size = 3 * DBC_CONTEXT_SIZE;
430 ctx->bytes = dma_alloc_coherent(dev, ctx->size, &ctx->dma, flags);
431 if (!ctx->bytes) {
432 kfree(ctx);
433 return NULL;
434 }
435 return ctx;
436 }
437
438 static struct xhci_ring *
xhci_dbc_ring_alloc(struct device * dev,enum xhci_ring_type type,gfp_t flags)439 xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
440 {
441 struct xhci_ring *ring;
442 struct xhci_segment *seg;
443 dma_addr_t dma;
444
445 ring = kzalloc(sizeof(*ring), flags);
446 if (!ring)
447 return NULL;
448
449 ring->num_segs = 1;
450 ring->type = type;
451
452 seg = kzalloc(sizeof(*seg), flags);
453 if (!seg)
454 goto seg_fail;
455
456 ring->first_seg = seg;
457 ring->last_seg = seg;
458 seg->next = seg;
459
460 seg->trbs = dma_alloc_coherent(dev, TRB_SEGMENT_SIZE, &dma, flags);
461 if (!seg->trbs)
462 goto dma_fail;
463
464 seg->dma = dma;
465
466 /* Only event ring does not use link TRB */
467 if (type != TYPE_EVENT) {
468 union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
469
470 trb->link.segment_ptr = cpu_to_le64(dma);
471 trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
472 }
473 INIT_LIST_HEAD(&ring->td_list);
474 xhci_initialize_ring_info(ring, 1);
475 return ring;
476 dma_fail:
477 kfree(seg);
478 seg_fail:
479 kfree(ring);
480 return NULL;
481 }
482
xhci_dbc_mem_init(struct xhci_dbc * dbc,gfp_t flags)483 static int xhci_dbc_mem_init(struct xhci_dbc *dbc, gfp_t flags)
484 {
485 int ret;
486 dma_addr_t deq;
487 u32 string_length;
488 struct device *dev = dbc->dev;
489
490 /* Allocate various rings for events and transfers: */
491 dbc->ring_evt = xhci_dbc_ring_alloc(dev, TYPE_EVENT, flags);
492 if (!dbc->ring_evt)
493 goto evt_fail;
494
495 dbc->ring_in = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
496 if (!dbc->ring_in)
497 goto in_fail;
498
499 dbc->ring_out = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
500 if (!dbc->ring_out)
501 goto out_fail;
502
503 /* Allocate and populate ERST: */
504 ret = dbc_erst_alloc(dev, dbc->ring_evt, &dbc->erst, flags);
505 if (ret)
506 goto erst_fail;
507
508 /* Allocate context data structure: */
509 dbc->ctx = dbc_alloc_ctx(dev, flags); /* was sysdev, and is still */
510 if (!dbc->ctx)
511 goto ctx_fail;
512
513 /* Allocate the string table: */
514 dbc->string_size = sizeof(*dbc->string);
515 dbc->string = dma_alloc_coherent(dev, dbc->string_size,
516 &dbc->string_dma, flags);
517 if (!dbc->string)
518 goto string_fail;
519
520 /* Setup ERST register: */
521 writel(dbc->erst.num_entries, &dbc->regs->ersts);
522
523 lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba);
524 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
525 dbc->ring_evt->dequeue);
526 lo_hi_writeq(deq, &dbc->regs->erdp);
527
528 /* Setup strings and contexts: */
529 string_length = xhci_dbc_populate_strings(dbc->string);
530 xhci_dbc_init_contexts(dbc, string_length);
531
532 xhci_dbc_eps_init(dbc);
533 dbc->state = DS_INITIALIZED;
534
535 return 0;
536
537 string_fail:
538 dbc_free_ctx(dev, dbc->ctx);
539 dbc->ctx = NULL;
540 ctx_fail:
541 dbc_erst_free(dev, &dbc->erst);
542 erst_fail:
543 dbc_ring_free(dev, dbc->ring_out);
544 dbc->ring_out = NULL;
545 out_fail:
546 dbc_ring_free(dev, dbc->ring_in);
547 dbc->ring_in = NULL;
548 in_fail:
549 dbc_ring_free(dev, dbc->ring_evt);
550 dbc->ring_evt = NULL;
551 evt_fail:
552 return -ENOMEM;
553 }
554
xhci_dbc_mem_cleanup(struct xhci_dbc * dbc)555 static void xhci_dbc_mem_cleanup(struct xhci_dbc *dbc)
556 {
557 if (!dbc)
558 return;
559
560 xhci_dbc_eps_exit(dbc);
561
562 dma_free_coherent(dbc->dev, dbc->string_size, dbc->string, dbc->string_dma);
563 dbc->string = NULL;
564
565 dbc_free_ctx(dbc->dev, dbc->ctx);
566 dbc->ctx = NULL;
567
568 dbc_erst_free(dbc->dev, &dbc->erst);
569 dbc_ring_free(dbc->dev, dbc->ring_out);
570 dbc_ring_free(dbc->dev, dbc->ring_in);
571 dbc_ring_free(dbc->dev, dbc->ring_evt);
572 dbc->ring_in = NULL;
573 dbc->ring_out = NULL;
574 dbc->ring_evt = NULL;
575 }
576
xhci_do_dbc_start(struct xhci_dbc * dbc)577 static int xhci_do_dbc_start(struct xhci_dbc *dbc)
578 {
579 int ret;
580 u32 ctrl;
581
582 if (dbc->state != DS_DISABLED)
583 return -EINVAL;
584
585 writel(0, &dbc->regs->control);
586 ret = xhci_handshake(&dbc->regs->control,
587 DBC_CTRL_DBC_ENABLE,
588 0, 1000);
589 if (ret)
590 return ret;
591
592 ret = xhci_dbc_mem_init(dbc, GFP_ATOMIC);
593 if (ret)
594 return ret;
595
596 ctrl = readl(&dbc->regs->control);
597 writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
598 &dbc->regs->control);
599 ret = xhci_handshake(&dbc->regs->control,
600 DBC_CTRL_DBC_ENABLE,
601 DBC_CTRL_DBC_ENABLE, 1000);
602 if (ret)
603 return ret;
604
605 dbc->state = DS_ENABLED;
606
607 return 0;
608 }
609
xhci_do_dbc_stop(struct xhci_dbc * dbc)610 static int xhci_do_dbc_stop(struct xhci_dbc *dbc)
611 {
612 if (dbc->state == DS_DISABLED)
613 return -EINVAL;
614
615 writel(0, &dbc->regs->control);
616 dbc->state = DS_DISABLED;
617
618 return 0;
619 }
620
xhci_dbc_start(struct xhci_dbc * dbc)621 static int xhci_dbc_start(struct xhci_dbc *dbc)
622 {
623 int ret;
624 unsigned long flags;
625
626 WARN_ON(!dbc);
627
628 pm_runtime_get_sync(dbc->dev); /* note this was self.controller */
629
630 spin_lock_irqsave(&dbc->lock, flags);
631 ret = xhci_do_dbc_start(dbc);
632 spin_unlock_irqrestore(&dbc->lock, flags);
633
634 if (ret) {
635 pm_runtime_put(dbc->dev); /* note this was self.controller */
636 return ret;
637 }
638
639 return mod_delayed_work(system_wq, &dbc->event_work,
640 msecs_to_jiffies(dbc->poll_interval));
641 }
642
xhci_dbc_stop(struct xhci_dbc * dbc)643 static void xhci_dbc_stop(struct xhci_dbc *dbc)
644 {
645 int ret;
646 unsigned long flags;
647
648 WARN_ON(!dbc);
649
650 switch (dbc->state) {
651 case DS_DISABLED:
652 return;
653 case DS_CONFIGURED:
654 if (dbc->driver->disconnect)
655 dbc->driver->disconnect(dbc);
656 break;
657 default:
658 break;
659 }
660
661 cancel_delayed_work_sync(&dbc->event_work);
662
663 spin_lock_irqsave(&dbc->lock, flags);
664 ret = xhci_do_dbc_stop(dbc);
665 spin_unlock_irqrestore(&dbc->lock, flags);
666 if (ret)
667 return;
668
669 xhci_dbc_mem_cleanup(dbc);
670 pm_runtime_put_sync(dbc->dev); /* note, was self.controller */
671 }
672
673 static void
handle_ep_halt_changes(struct xhci_dbc * dbc,struct dbc_ep * dep,bool halted)674 handle_ep_halt_changes(struct xhci_dbc *dbc, struct dbc_ep *dep, bool halted)
675 {
676 if (halted) {
677 dev_info(dbc->dev, "DbC Endpoint halted\n");
678 dep->halted = 1;
679
680 } else if (dep->halted) {
681 dev_info(dbc->dev, "DbC Endpoint halt cleared\n");
682 dep->halted = 0;
683
684 if (!list_empty(&dep->list_pending))
685 writel(DBC_DOOR_BELL_TARGET(dep->direction),
686 &dbc->regs->doorbell);
687 }
688 }
689
690 static void
dbc_handle_port_status(struct xhci_dbc * dbc,union xhci_trb * event)691 dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event)
692 {
693 u32 portsc;
694
695 portsc = readl(&dbc->regs->portsc);
696 if (portsc & DBC_PORTSC_CONN_CHANGE)
697 dev_info(dbc->dev, "DbC port connect change\n");
698
699 if (portsc & DBC_PORTSC_RESET_CHANGE)
700 dev_info(dbc->dev, "DbC port reset change\n");
701
702 if (portsc & DBC_PORTSC_LINK_CHANGE)
703 dev_info(dbc->dev, "DbC port link status change\n");
704
705 if (portsc & DBC_PORTSC_CONFIG_CHANGE)
706 dev_info(dbc->dev, "DbC config error change\n");
707
708 /* Port reset change bit will be cleared in other place: */
709 writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
710 }
711
dbc_handle_xfer_event(struct xhci_dbc * dbc,union xhci_trb * event)712 static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
713 {
714 struct dbc_ep *dep;
715 struct xhci_ring *ring;
716 int ep_id;
717 int status;
718 struct xhci_ep_ctx *ep_ctx;
719 u32 comp_code;
720 size_t remain_length;
721 struct dbc_request *req = NULL, *r;
722
723 comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
724 remain_length = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
725 ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
726 dep = (ep_id == EPID_OUT) ?
727 get_out_ep(dbc) : get_in_ep(dbc);
728 ep_ctx = (ep_id == EPID_OUT) ?
729 dbc_bulkout_ctx(dbc) : dbc_bulkin_ctx(dbc);
730 ring = dep->ring;
731
732 /* Match the pending request: */
733 list_for_each_entry(r, &dep->list_pending, list_pending) {
734 if (r->trb_dma == event->trans_event.buffer) {
735 req = r;
736 break;
737 }
738 if (r->status == -COMP_STALL_ERROR) {
739 dev_warn(dbc->dev, "Give back stale stalled req\n");
740 ring->num_trbs_free++;
741 xhci_dbc_giveback(r, 0);
742 }
743 }
744
745 if (!req) {
746 dev_warn(dbc->dev, "no matched request\n");
747 return;
748 }
749
750 trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
751
752 switch (comp_code) {
753 case COMP_SUCCESS:
754 remain_length = 0;
755 fallthrough;
756 case COMP_SHORT_PACKET:
757 status = 0;
758 break;
759 case COMP_TRB_ERROR:
760 case COMP_BABBLE_DETECTED_ERROR:
761 case COMP_USB_TRANSACTION_ERROR:
762 dev_warn(dbc->dev, "tx error %d detected\n", comp_code);
763 status = -comp_code;
764 break;
765 case COMP_STALL_ERROR:
766 dev_warn(dbc->dev, "Stall error at bulk TRB %llx, remaining %zu, ep deq %llx\n",
767 event->trans_event.buffer, remain_length, ep_ctx->deq);
768 status = 0;
769 dep->halted = 1;
770
771 /*
772 * xHC DbC may trigger a STALL bulk xfer event when host sends a
773 * ClearFeature(ENDPOINT_HALT) request even if there wasn't an
774 * active bulk transfer.
775 *
776 * Don't give back this transfer request as hardware will later
777 * start processing TRBs starting from this 'STALLED' TRB,
778 * causing TRBs and requests to be out of sync.
779 *
780 * If STALL event shows some bytes were transferred then assume
781 * it's an actual transfer issue and give back the request.
782 * In this case mark the TRB as No-Op to avoid hw from using the
783 * TRB again.
784 */
785
786 if ((ep_ctx->deq & ~TRB_CYCLE) == event->trans_event.buffer) {
787 dev_dbg(dbc->dev, "Ep stopped on Stalled TRB\n");
788 if (remain_length == req->length) {
789 dev_dbg(dbc->dev, "Spurious stall event, keep req\n");
790 req->status = -COMP_STALL_ERROR;
791 req->actual = 0;
792 return;
793 }
794 dev_dbg(dbc->dev, "Give back stalled req, but turn TRB to No-op\n");
795 trb_to_noop(req->trb);
796 }
797 break;
798
799 default:
800 dev_err(dbc->dev, "unknown tx error %d\n", comp_code);
801 status = -comp_code;
802 break;
803 }
804
805 ring->num_trbs_free++;
806 req->actual = req->length - remain_length;
807 xhci_dbc_giveback(req, status);
808 }
809
inc_evt_deq(struct xhci_ring * ring)810 static void inc_evt_deq(struct xhci_ring *ring)
811 {
812 /* If on the last TRB of the segment go back to the beginning */
813 if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) {
814 ring->cycle_state ^= 1;
815 ring->dequeue = ring->deq_seg->trbs;
816 return;
817 }
818 ring->dequeue++;
819 }
820
xhci_dbc_do_handle_events(struct xhci_dbc * dbc)821 static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
822 {
823 dma_addr_t deq;
824 union xhci_trb *evt;
825 u32 ctrl, portsc;
826 bool update_erdp = false;
827
828 /* DbC state machine: */
829 switch (dbc->state) {
830 case DS_DISABLED:
831 case DS_INITIALIZED:
832
833 return EVT_ERR;
834 case DS_ENABLED:
835 portsc = readl(&dbc->regs->portsc);
836 if (portsc & DBC_PORTSC_CONN_STATUS) {
837 dbc->state = DS_CONNECTED;
838 dev_info(dbc->dev, "DbC connected\n");
839 }
840
841 return EVT_DONE;
842 case DS_CONNECTED:
843 ctrl = readl(&dbc->regs->control);
844 if (ctrl & DBC_CTRL_DBC_RUN) {
845 dbc->state = DS_CONFIGURED;
846 dev_info(dbc->dev, "DbC configured\n");
847 portsc = readl(&dbc->regs->portsc);
848 writel(portsc, &dbc->regs->portsc);
849 return EVT_GSER;
850 }
851
852 return EVT_DONE;
853 case DS_CONFIGURED:
854 /* Handle cable unplug event: */
855 portsc = readl(&dbc->regs->portsc);
856 if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
857 !(portsc & DBC_PORTSC_CONN_STATUS)) {
858 dev_info(dbc->dev, "DbC cable unplugged\n");
859 dbc->state = DS_ENABLED;
860 xhci_dbc_flush_requests(dbc);
861
862 return EVT_DISC;
863 }
864
865 /* Handle debug port reset event: */
866 if (portsc & DBC_PORTSC_RESET_CHANGE) {
867 dev_info(dbc->dev, "DbC port reset\n");
868 writel(portsc, &dbc->regs->portsc);
869 dbc->state = DS_ENABLED;
870 xhci_dbc_flush_requests(dbc);
871
872 return EVT_DISC;
873 }
874
875 /* Check and handle changes in endpoint halt status */
876 ctrl = readl(&dbc->regs->control);
877 handle_ep_halt_changes(dbc, get_in_ep(dbc), ctrl & DBC_CTRL_HALT_IN_TR);
878 handle_ep_halt_changes(dbc, get_out_ep(dbc), ctrl & DBC_CTRL_HALT_OUT_TR);
879
880 /* Clear DbC run change bit: */
881 if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
882 writel(ctrl, &dbc->regs->control);
883 ctrl = readl(&dbc->regs->control);
884 }
885 break;
886 default:
887 dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state);
888 break;
889 }
890
891 /* Handle the events in the event ring: */
892 evt = dbc->ring_evt->dequeue;
893 while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
894 dbc->ring_evt->cycle_state) {
895 /*
896 * Add a barrier between reading the cycle flag and any
897 * reads of the event's flags/data below:
898 */
899 rmb();
900
901 trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
902
903 switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
904 case TRB_TYPE(TRB_PORT_STATUS):
905 dbc_handle_port_status(dbc, evt);
906 break;
907 case TRB_TYPE(TRB_TRANSFER):
908 dbc_handle_xfer_event(dbc, evt);
909 break;
910 default:
911 break;
912 }
913
914 inc_evt_deq(dbc->ring_evt);
915
916 evt = dbc->ring_evt->dequeue;
917 update_erdp = true;
918 }
919
920 /* Update event ring dequeue pointer: */
921 if (update_erdp) {
922 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
923 dbc->ring_evt->dequeue);
924 lo_hi_writeq(deq, &dbc->regs->erdp);
925 }
926
927 return EVT_DONE;
928 }
929
xhci_dbc_handle_events(struct work_struct * work)930 static void xhci_dbc_handle_events(struct work_struct *work)
931 {
932 enum evtreturn evtr;
933 struct xhci_dbc *dbc;
934 unsigned long flags;
935 unsigned int poll_interval;
936
937 dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
938 poll_interval = dbc->poll_interval;
939
940 spin_lock_irqsave(&dbc->lock, flags);
941 evtr = xhci_dbc_do_handle_events(dbc);
942 spin_unlock_irqrestore(&dbc->lock, flags);
943
944 switch (evtr) {
945 case EVT_GSER:
946 if (dbc->driver->configure)
947 dbc->driver->configure(dbc);
948 break;
949 case EVT_DISC:
950 if (dbc->driver->disconnect)
951 dbc->driver->disconnect(dbc);
952 break;
953 case EVT_DONE:
954 /* set fast poll rate if there are pending data transfers */
955 if (!list_empty(&dbc->eps[BULK_OUT].list_pending) ||
956 !list_empty(&dbc->eps[BULK_IN].list_pending))
957 poll_interval = 1;
958 break;
959 default:
960 dev_info(dbc->dev, "stop handling dbc events\n");
961 return;
962 }
963
964 mod_delayed_work(system_wq, &dbc->event_work,
965 msecs_to_jiffies(poll_interval));
966 }
967
968 static const char * const dbc_state_strings[DS_MAX] = {
969 [DS_DISABLED] = "disabled",
970 [DS_INITIALIZED] = "initialized",
971 [DS_ENABLED] = "enabled",
972 [DS_CONNECTED] = "connected",
973 [DS_CONFIGURED] = "configured",
974 };
975
dbc_show(struct device * dev,struct device_attribute * attr,char * buf)976 static ssize_t dbc_show(struct device *dev,
977 struct device_attribute *attr,
978 char *buf)
979 {
980 struct xhci_dbc *dbc;
981 struct xhci_hcd *xhci;
982
983 xhci = hcd_to_xhci(dev_get_drvdata(dev));
984 dbc = xhci->dbc;
985
986 if (dbc->state >= ARRAY_SIZE(dbc_state_strings))
987 return sysfs_emit(buf, "unknown\n");
988
989 return sysfs_emit(buf, "%s\n", dbc_state_strings[dbc->state]);
990 }
991
dbc_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)992 static ssize_t dbc_store(struct device *dev,
993 struct device_attribute *attr,
994 const char *buf, size_t count)
995 {
996 struct xhci_hcd *xhci;
997 struct xhci_dbc *dbc;
998
999 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1000 dbc = xhci->dbc;
1001
1002 if (sysfs_streq(buf, "enable"))
1003 xhci_dbc_start(dbc);
1004 else if (sysfs_streq(buf, "disable"))
1005 xhci_dbc_stop(dbc);
1006 else
1007 return -EINVAL;
1008
1009 return count;
1010 }
1011
dbc_idVendor_show(struct device * dev,struct device_attribute * attr,char * buf)1012 static ssize_t dbc_idVendor_show(struct device *dev,
1013 struct device_attribute *attr,
1014 char *buf)
1015 {
1016 struct xhci_dbc *dbc;
1017 struct xhci_hcd *xhci;
1018
1019 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1020 dbc = xhci->dbc;
1021
1022 return sysfs_emit(buf, "%04x\n", dbc->idVendor);
1023 }
1024
dbc_idVendor_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1025 static ssize_t dbc_idVendor_store(struct device *dev,
1026 struct device_attribute *attr,
1027 const char *buf, size_t size)
1028 {
1029 struct xhci_dbc *dbc;
1030 struct xhci_hcd *xhci;
1031 void __iomem *ptr;
1032 u16 value;
1033 u32 dev_info;
1034 int ret;
1035
1036 ret = kstrtou16(buf, 0, &value);
1037 if (ret)
1038 return ret;
1039
1040 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1041 dbc = xhci->dbc;
1042 if (dbc->state != DS_DISABLED)
1043 return -EBUSY;
1044
1045 dbc->idVendor = value;
1046 ptr = &dbc->regs->devinfo1;
1047 dev_info = readl(ptr);
1048 dev_info = (dev_info & ~(0xffffu << 16)) | (value << 16);
1049 writel(dev_info, ptr);
1050
1051 return size;
1052 }
1053
dbc_idProduct_show(struct device * dev,struct device_attribute * attr,char * buf)1054 static ssize_t dbc_idProduct_show(struct device *dev,
1055 struct device_attribute *attr,
1056 char *buf)
1057 {
1058 struct xhci_dbc *dbc;
1059 struct xhci_hcd *xhci;
1060
1061 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1062 dbc = xhci->dbc;
1063
1064 return sysfs_emit(buf, "%04x\n", dbc->idProduct);
1065 }
1066
dbc_idProduct_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1067 static ssize_t dbc_idProduct_store(struct device *dev,
1068 struct device_attribute *attr,
1069 const char *buf, size_t size)
1070 {
1071 struct xhci_dbc *dbc;
1072 struct xhci_hcd *xhci;
1073 void __iomem *ptr;
1074 u32 dev_info;
1075 u16 value;
1076 int ret;
1077
1078 ret = kstrtou16(buf, 0, &value);
1079 if (ret)
1080 return ret;
1081
1082 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1083 dbc = xhci->dbc;
1084 if (dbc->state != DS_DISABLED)
1085 return -EBUSY;
1086
1087 dbc->idProduct = value;
1088 ptr = &dbc->regs->devinfo2;
1089 dev_info = readl(ptr);
1090 dev_info = (dev_info & ~(0xffffu)) | value;
1091 writel(dev_info, ptr);
1092 return size;
1093 }
1094
dbc_bcdDevice_show(struct device * dev,struct device_attribute * attr,char * buf)1095 static ssize_t dbc_bcdDevice_show(struct device *dev,
1096 struct device_attribute *attr,
1097 char *buf)
1098 {
1099 struct xhci_dbc *dbc;
1100 struct xhci_hcd *xhci;
1101
1102 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1103 dbc = xhci->dbc;
1104
1105 return sysfs_emit(buf, "%04x\n", dbc->bcdDevice);
1106 }
1107
dbc_bcdDevice_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1108 static ssize_t dbc_bcdDevice_store(struct device *dev,
1109 struct device_attribute *attr,
1110 const char *buf, size_t size)
1111 {
1112 struct xhci_dbc *dbc;
1113 struct xhci_hcd *xhci;
1114 void __iomem *ptr;
1115 u32 dev_info;
1116 u16 value;
1117 int ret;
1118
1119 ret = kstrtou16(buf, 0, &value);
1120 if (ret)
1121 return ret;
1122
1123 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1124 dbc = xhci->dbc;
1125 if (dbc->state != DS_DISABLED)
1126 return -EBUSY;
1127
1128 dbc->bcdDevice = value;
1129 ptr = &dbc->regs->devinfo2;
1130 dev_info = readl(ptr);
1131 dev_info = (dev_info & ~(0xffffu << 16)) | (value << 16);
1132 writel(dev_info, ptr);
1133
1134 return size;
1135 }
1136
dbc_bInterfaceProtocol_show(struct device * dev,struct device_attribute * attr,char * buf)1137 static ssize_t dbc_bInterfaceProtocol_show(struct device *dev,
1138 struct device_attribute *attr,
1139 char *buf)
1140 {
1141 struct xhci_dbc *dbc;
1142 struct xhci_hcd *xhci;
1143
1144 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1145 dbc = xhci->dbc;
1146
1147 return sysfs_emit(buf, "%02x\n", dbc->bInterfaceProtocol);
1148 }
1149
dbc_bInterfaceProtocol_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1150 static ssize_t dbc_bInterfaceProtocol_store(struct device *dev,
1151 struct device_attribute *attr,
1152 const char *buf, size_t size)
1153 {
1154 struct xhci_dbc *dbc;
1155 struct xhci_hcd *xhci;
1156 void __iomem *ptr;
1157 u32 dev_info;
1158 u8 value;
1159 int ret;
1160
1161 /* bInterfaceProtocol is 8 bit, but... */
1162 ret = kstrtou8(buf, 0, &value);
1163 if (ret)
1164 return ret;
1165
1166 /* ...xhci only supports values 0 and 1 */
1167 if (value > 1)
1168 return -EINVAL;
1169
1170 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1171 dbc = xhci->dbc;
1172 if (dbc->state != DS_DISABLED)
1173 return -EBUSY;
1174
1175 dbc->bInterfaceProtocol = value;
1176 ptr = &dbc->regs->devinfo1;
1177 dev_info = readl(ptr);
1178 dev_info = (dev_info & ~(0xffu)) | value;
1179 writel(dev_info, ptr);
1180
1181 return size;
1182 }
1183
dbc_poll_interval_ms_show(struct device * dev,struct device_attribute * attr,char * buf)1184 static ssize_t dbc_poll_interval_ms_show(struct device *dev,
1185 struct device_attribute *attr,
1186 char *buf)
1187 {
1188 struct xhci_dbc *dbc;
1189 struct xhci_hcd *xhci;
1190
1191 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1192 dbc = xhci->dbc;
1193
1194 return sysfs_emit(buf, "%u\n", dbc->poll_interval);
1195 }
1196
dbc_poll_interval_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1197 static ssize_t dbc_poll_interval_ms_store(struct device *dev,
1198 struct device_attribute *attr,
1199 const char *buf, size_t size)
1200 {
1201 struct xhci_dbc *dbc;
1202 struct xhci_hcd *xhci;
1203 u32 value;
1204 int ret;
1205
1206 ret = kstrtou32(buf, 0, &value);
1207 if (ret || value > DBC_POLL_INTERVAL_MAX)
1208 return -EINVAL;
1209
1210 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1211 dbc = xhci->dbc;
1212
1213 dbc->poll_interval = value;
1214
1215 mod_delayed_work(system_wq, &dbc->event_work, 0);
1216
1217 return size;
1218 }
1219
1220 static DEVICE_ATTR_RW(dbc);
1221 static DEVICE_ATTR_RW(dbc_idVendor);
1222 static DEVICE_ATTR_RW(dbc_idProduct);
1223 static DEVICE_ATTR_RW(dbc_bcdDevice);
1224 static DEVICE_ATTR_RW(dbc_bInterfaceProtocol);
1225 static DEVICE_ATTR_RW(dbc_poll_interval_ms);
1226
1227 static struct attribute *dbc_dev_attrs[] = {
1228 &dev_attr_dbc.attr,
1229 &dev_attr_dbc_idVendor.attr,
1230 &dev_attr_dbc_idProduct.attr,
1231 &dev_attr_dbc_bcdDevice.attr,
1232 &dev_attr_dbc_bInterfaceProtocol.attr,
1233 &dev_attr_dbc_poll_interval_ms.attr,
1234 NULL
1235 };
1236 ATTRIBUTE_GROUPS(dbc_dev);
1237
1238 struct xhci_dbc *
xhci_alloc_dbc(struct device * dev,void __iomem * base,const struct dbc_driver * driver)1239 xhci_alloc_dbc(struct device *dev, void __iomem *base, const struct dbc_driver *driver)
1240 {
1241 struct xhci_dbc *dbc;
1242 int ret;
1243
1244 dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
1245 if (!dbc)
1246 return NULL;
1247
1248 dbc->regs = base;
1249 dbc->dev = dev;
1250 dbc->driver = driver;
1251 dbc->idProduct = DBC_PRODUCT_ID;
1252 dbc->idVendor = DBC_VENDOR_ID;
1253 dbc->bcdDevice = DBC_DEVICE_REV;
1254 dbc->bInterfaceProtocol = DBC_PROTOCOL;
1255 dbc->poll_interval = DBC_POLL_INTERVAL_DEFAULT;
1256
1257 if (readl(&dbc->regs->control) & DBC_CTRL_DBC_ENABLE)
1258 goto err;
1259
1260 INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
1261 spin_lock_init(&dbc->lock);
1262
1263 ret = sysfs_create_groups(&dev->kobj, dbc_dev_groups);
1264 if (ret)
1265 goto err;
1266
1267 return dbc;
1268 err:
1269 kfree(dbc);
1270 return NULL;
1271 }
1272
1273 /* undo what xhci_alloc_dbc() did */
xhci_dbc_remove(struct xhci_dbc * dbc)1274 void xhci_dbc_remove(struct xhci_dbc *dbc)
1275 {
1276 if (!dbc)
1277 return;
1278 /* stop hw, stop wq and call dbc->ops->stop() */
1279 xhci_dbc_stop(dbc);
1280
1281 /* remove sysfs files */
1282 sysfs_remove_groups(&dbc->dev->kobj, dbc_dev_groups);
1283
1284 kfree(dbc);
1285 }
1286
1287
xhci_create_dbc_dev(struct xhci_hcd * xhci)1288 int xhci_create_dbc_dev(struct xhci_hcd *xhci)
1289 {
1290 struct device *dev;
1291 void __iomem *base;
1292 int ret;
1293 int dbc_cap_offs;
1294
1295 /* create all parameters needed resembling a dbc device */
1296 dev = xhci_to_hcd(xhci)->self.controller;
1297 base = &xhci->cap_regs->hc_capbase;
1298
1299 dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
1300 if (!dbc_cap_offs)
1301 return -ENODEV;
1302
1303 /* already allocated and in use */
1304 if (xhci->dbc)
1305 return -EBUSY;
1306
1307 ret = xhci_dbc_tty_probe(dev, base + dbc_cap_offs, xhci);
1308
1309 return ret;
1310 }
1311
xhci_remove_dbc_dev(struct xhci_hcd * xhci)1312 void xhci_remove_dbc_dev(struct xhci_hcd *xhci)
1313 {
1314 unsigned long flags;
1315
1316 if (!xhci->dbc)
1317 return;
1318
1319 xhci_dbc_tty_remove(xhci->dbc);
1320 spin_lock_irqsave(&xhci->lock, flags);
1321 xhci->dbc = NULL;
1322 spin_unlock_irqrestore(&xhci->lock, flags);
1323 }
1324
1325 #ifdef CONFIG_PM
xhci_dbc_suspend(struct xhci_hcd * xhci)1326 int xhci_dbc_suspend(struct xhci_hcd *xhci)
1327 {
1328 struct xhci_dbc *dbc = xhci->dbc;
1329
1330 if (!dbc)
1331 return 0;
1332
1333 if (dbc->state == DS_CONFIGURED)
1334 dbc->resume_required = 1;
1335
1336 xhci_dbc_stop(dbc);
1337
1338 return 0;
1339 }
1340
xhci_dbc_resume(struct xhci_hcd * xhci)1341 int xhci_dbc_resume(struct xhci_hcd *xhci)
1342 {
1343 int ret = 0;
1344 struct xhci_dbc *dbc = xhci->dbc;
1345
1346 if (!dbc)
1347 return 0;
1348
1349 if (dbc->resume_required) {
1350 dbc->resume_required = 0;
1351 xhci_dbc_start(dbc);
1352 }
1353
1354 return ret;
1355 }
1356 #endif /* CONFIG_PM */
1357
xhci_dbc_init(void)1358 int xhci_dbc_init(void)
1359 {
1360 return dbc_tty_init();
1361 }
1362
xhci_dbc_exit(void)1363 void xhci_dbc_exit(void)
1364 {
1365 dbc_tty_exit();
1366 }
1367