1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * xhci-dbgcap.c - xHCI debug capability support
4 *
5 * Copyright (C) 2017 Intel Corporation
6 *
7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
8 */
9 #include <linux/bug.h>
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
13 #include <linux/kstrtox.h>
14 #include <linux/list.h>
15 #include <linux/nls.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/string.h>
20 #include <linux/sysfs.h>
21 #include <linux/types.h>
22 #include <linux/workqueue.h>
23
24 #include <linux/io-64-nonatomic-lo-hi.h>
25
26 #include <asm/byteorder.h>
27
28 #include "xhci.h"
29 #include "xhci-trace.h"
30 #include "xhci-dbgcap.h"
31
dbc_free_ctx(struct device * dev,struct xhci_container_ctx * ctx)32 static void dbc_free_ctx(struct device *dev, struct xhci_container_ctx *ctx)
33 {
34 if (!ctx)
35 return;
36 dma_free_coherent(dev, ctx->size, ctx->bytes, ctx->dma);
37 kfree(ctx);
38 }
39
40 /* we use only one segment for DbC rings */
dbc_ring_free(struct device * dev,struct xhci_ring * ring)41 static void dbc_ring_free(struct device *dev, struct xhci_ring *ring)
42 {
43 if (!ring)
44 return;
45
46 if (ring->first_seg) {
47 dma_free_coherent(dev, TRB_SEGMENT_SIZE,
48 ring->first_seg->trbs,
49 ring->first_seg->dma);
50 kfree(ring->first_seg);
51 }
52 kfree(ring);
53 }
54
xhci_dbc_populate_strings(struct dbc_str_descs * strings)55 static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
56 {
57 struct usb_string_descriptor *s_desc;
58 u32 string_length;
59
60 /* Serial string: */
61 s_desc = (struct usb_string_descriptor *)strings->serial;
62 utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
63 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
64 DBC_MAX_STRING_LENGTH);
65
66 s_desc->bLength = (strlen(DBC_STRING_SERIAL) + 1) * 2;
67 s_desc->bDescriptorType = USB_DT_STRING;
68 string_length = s_desc->bLength;
69 string_length <<= 8;
70
71 /* Product string: */
72 s_desc = (struct usb_string_descriptor *)strings->product;
73 utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
74 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
75 DBC_MAX_STRING_LENGTH);
76
77 s_desc->bLength = (strlen(DBC_STRING_PRODUCT) + 1) * 2;
78 s_desc->bDescriptorType = USB_DT_STRING;
79 string_length += s_desc->bLength;
80 string_length <<= 8;
81
82 /* Manufacture string: */
83 s_desc = (struct usb_string_descriptor *)strings->manufacturer;
84 utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
85 strlen(DBC_STRING_MANUFACTURER),
86 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
87 DBC_MAX_STRING_LENGTH);
88
89 s_desc->bLength = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
90 s_desc->bDescriptorType = USB_DT_STRING;
91 string_length += s_desc->bLength;
92 string_length <<= 8;
93
94 /* String0: */
95 strings->string0[0] = 4;
96 strings->string0[1] = USB_DT_STRING;
97 strings->string0[2] = 0x09;
98 strings->string0[3] = 0x04;
99 string_length += 4;
100
101 return string_length;
102 }
103
xhci_dbc_init_ep_contexts(struct xhci_dbc * dbc)104 static void xhci_dbc_init_ep_contexts(struct xhci_dbc *dbc)
105 {
106 struct xhci_ep_ctx *ep_ctx;
107 unsigned int max_burst;
108 dma_addr_t deq;
109
110 max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
111
112 /* Populate bulk out endpoint context: */
113 ep_ctx = dbc_bulkout_ctx(dbc);
114 deq = dbc_bulkout_enq(dbc);
115 ep_ctx->ep_info = 0;
116 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
117 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
118
119 /* Populate bulk in endpoint context: */
120 ep_ctx = dbc_bulkin_ctx(dbc);
121 deq = dbc_bulkin_enq(dbc);
122 ep_ctx->ep_info = 0;
123 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
124 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
125 }
126
xhci_dbc_init_contexts(struct xhci_dbc * dbc,u32 string_length)127 static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
128 {
129 struct dbc_info_context *info;
130 u32 dev_info;
131 dma_addr_t dma;
132
133 if (!dbc)
134 return;
135
136 /* Populate info Context: */
137 info = (struct dbc_info_context *)dbc->ctx->bytes;
138 dma = dbc->string_dma;
139 info->string0 = cpu_to_le64(dma);
140 info->manufacturer = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
141 info->product = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
142 info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
143 info->length = cpu_to_le32(string_length);
144
145 /* Populate bulk in and out endpoint contexts: */
146 xhci_dbc_init_ep_contexts(dbc);
147
148 /* Set DbC context and info registers: */
149 lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
150
151 dev_info = (dbc->idVendor << 16) | dbc->bInterfaceProtocol;
152 writel(dev_info, &dbc->regs->devinfo1);
153
154 dev_info = (dbc->bcdDevice << 16) | dbc->idProduct;
155 writel(dev_info, &dbc->regs->devinfo2);
156 }
157
xhci_dbc_giveback(struct dbc_request * req,int status)158 static void xhci_dbc_giveback(struct dbc_request *req, int status)
159 __releases(&dbc->lock)
160 __acquires(&dbc->lock)
161 {
162 struct xhci_dbc *dbc = req->dbc;
163 struct device *dev = dbc->dev;
164
165 list_del_init(&req->list_pending);
166 req->trb_dma = 0;
167 req->trb = NULL;
168
169 if (req->status == -EINPROGRESS)
170 req->status = status;
171
172 trace_xhci_dbc_giveback_request(req);
173
174 dma_unmap_single(dev,
175 req->dma,
176 req->length,
177 dbc_ep_dma_direction(req));
178
179 /* Give back the transfer request: */
180 spin_unlock(&dbc->lock);
181 req->complete(dbc, req);
182 spin_lock(&dbc->lock);
183 }
184
trb_to_noop(union xhci_trb * trb)185 static void trb_to_noop(union xhci_trb *trb)
186 {
187 trb->generic.field[0] = 0;
188 trb->generic.field[1] = 0;
189 trb->generic.field[2] = 0;
190 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
191 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
192 }
193
xhci_dbc_flush_single_request(struct dbc_request * req)194 static void xhci_dbc_flush_single_request(struct dbc_request *req)
195 {
196 trb_to_noop(req->trb);
197 xhci_dbc_giveback(req, -ESHUTDOWN);
198 }
199
xhci_dbc_flush_endpoint_requests(struct dbc_ep * dep)200 static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
201 {
202 struct dbc_request *req, *tmp;
203
204 list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
205 xhci_dbc_flush_single_request(req);
206 }
207
xhci_dbc_flush_requests(struct xhci_dbc * dbc)208 static void xhci_dbc_flush_requests(struct xhci_dbc *dbc)
209 {
210 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
211 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
212 }
213
214 struct dbc_request *
dbc_alloc_request(struct xhci_dbc * dbc,unsigned int direction,gfp_t flags)215 dbc_alloc_request(struct xhci_dbc *dbc, unsigned int direction, gfp_t flags)
216 {
217 struct dbc_request *req;
218
219 if (direction != BULK_IN &&
220 direction != BULK_OUT)
221 return NULL;
222
223 if (!dbc)
224 return NULL;
225
226 req = kzalloc(sizeof(*req), flags);
227 if (!req)
228 return NULL;
229
230 req->dbc = dbc;
231 INIT_LIST_HEAD(&req->list_pending);
232 INIT_LIST_HEAD(&req->list_pool);
233 req->direction = direction;
234
235 trace_xhci_dbc_alloc_request(req);
236
237 return req;
238 }
239
240 void
dbc_free_request(struct dbc_request * req)241 dbc_free_request(struct dbc_request *req)
242 {
243 trace_xhci_dbc_free_request(req);
244
245 kfree(req);
246 }
247
248 static void
xhci_dbc_queue_trb(struct xhci_ring * ring,u32 field1,u32 field2,u32 field3,u32 field4)249 xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
250 u32 field2, u32 field3, u32 field4)
251 {
252 union xhci_trb *trb, *next;
253
254 trb = ring->enqueue;
255 trb->generic.field[0] = cpu_to_le32(field1);
256 trb->generic.field[1] = cpu_to_le32(field2);
257 trb->generic.field[2] = cpu_to_le32(field3);
258 trb->generic.field[3] = cpu_to_le32(field4);
259
260 trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic,
261 xhci_trb_virt_to_dma(ring->enq_seg,
262 ring->enqueue));
263 ring->num_trbs_free--;
264 next = ++(ring->enqueue);
265 if (TRB_TYPE_LINK_LE32(next->link.control)) {
266 next->link.control ^= cpu_to_le32(TRB_CYCLE);
267 ring->enqueue = ring->enq_seg->trbs;
268 ring->cycle_state ^= 1;
269 }
270 }
271
xhci_dbc_queue_bulk_tx(struct dbc_ep * dep,struct dbc_request * req)272 static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
273 struct dbc_request *req)
274 {
275 u64 addr;
276 union xhci_trb *trb;
277 unsigned int num_trbs;
278 struct xhci_dbc *dbc = req->dbc;
279 struct xhci_ring *ring = dep->ring;
280 u32 length, control, cycle;
281
282 num_trbs = count_trbs(req->dma, req->length);
283 WARN_ON(num_trbs != 1);
284 if (ring->num_trbs_free < num_trbs)
285 return -EBUSY;
286
287 addr = req->dma;
288 trb = ring->enqueue;
289 cycle = ring->cycle_state;
290 length = TRB_LEN(req->length);
291 control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
292
293 if (cycle)
294 control &= cpu_to_le32(~TRB_CYCLE);
295 else
296 control |= cpu_to_le32(TRB_CYCLE);
297
298 req->trb = ring->enqueue;
299 req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
300 xhci_dbc_queue_trb(ring,
301 lower_32_bits(addr),
302 upper_32_bits(addr),
303 length, control);
304
305 /*
306 * Add a barrier between writes of trb fields and flipping
307 * the cycle bit:
308 */
309 wmb();
310
311 if (cycle)
312 trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
313 else
314 trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
315
316 writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
317
318 return 0;
319 }
320
321 static int
dbc_ep_do_queue(struct dbc_request * req)322 dbc_ep_do_queue(struct dbc_request *req)
323 {
324 int ret;
325 struct xhci_dbc *dbc = req->dbc;
326 struct device *dev = dbc->dev;
327 struct dbc_ep *dep = &dbc->eps[req->direction];
328
329 if (!req->length || !req->buf)
330 return -EINVAL;
331
332 req->actual = 0;
333 req->status = -EINPROGRESS;
334
335 req->dma = dma_map_single(dev,
336 req->buf,
337 req->length,
338 dbc_ep_dma_direction(dep));
339 if (dma_mapping_error(dev, req->dma)) {
340 dev_err(dbc->dev, "failed to map buffer\n");
341 return -EFAULT;
342 }
343
344 ret = xhci_dbc_queue_bulk_tx(dep, req);
345 if (ret) {
346 dev_err(dbc->dev, "failed to queue trbs\n");
347 dma_unmap_single(dev,
348 req->dma,
349 req->length,
350 dbc_ep_dma_direction(dep));
351 return -EFAULT;
352 }
353
354 list_add_tail(&req->list_pending, &dep->list_pending);
355
356 return 0;
357 }
358
dbc_ep_queue(struct dbc_request * req)359 int dbc_ep_queue(struct dbc_request *req)
360 {
361 unsigned long flags;
362 struct xhci_dbc *dbc = req->dbc;
363 int ret = -ESHUTDOWN;
364
365 if (!dbc)
366 return -ENODEV;
367
368 if (req->direction != BULK_IN &&
369 req->direction != BULK_OUT)
370 return -EINVAL;
371
372 spin_lock_irqsave(&dbc->lock, flags);
373 if (dbc->state == DS_CONFIGURED)
374 ret = dbc_ep_do_queue(req);
375 spin_unlock_irqrestore(&dbc->lock, flags);
376
377 mod_delayed_work(system_wq, &dbc->event_work, 0);
378
379 trace_xhci_dbc_queue_request(req);
380
381 return ret;
382 }
383
xhci_dbc_do_eps_init(struct xhci_dbc * dbc,bool direction)384 static inline void xhci_dbc_do_eps_init(struct xhci_dbc *dbc, bool direction)
385 {
386 struct dbc_ep *dep;
387
388 dep = &dbc->eps[direction];
389 dep->dbc = dbc;
390 dep->direction = direction;
391 dep->ring = direction ? dbc->ring_in : dbc->ring_out;
392
393 INIT_LIST_HEAD(&dep->list_pending);
394 }
395
xhci_dbc_eps_init(struct xhci_dbc * dbc)396 static void xhci_dbc_eps_init(struct xhci_dbc *dbc)
397 {
398 xhci_dbc_do_eps_init(dbc, BULK_OUT);
399 xhci_dbc_do_eps_init(dbc, BULK_IN);
400 }
401
xhci_dbc_eps_exit(struct xhci_dbc * dbc)402 static void xhci_dbc_eps_exit(struct xhci_dbc *dbc)
403 {
404 memset(dbc->eps, 0, sizeof_field(struct xhci_dbc, eps));
405 }
406
dbc_erst_alloc(struct device * dev,struct xhci_ring * evt_ring,struct xhci_erst * erst,gfp_t flags)407 static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring,
408 struct xhci_erst *erst, gfp_t flags)
409 {
410 erst->entries = dma_alloc_coherent(dev, sizeof(*erst->entries),
411 &erst->erst_dma_addr, flags);
412 if (!erst->entries)
413 return -ENOMEM;
414
415 erst->num_entries = 1;
416 erst->entries[0].seg_addr = cpu_to_le64(evt_ring->first_seg->dma);
417 erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
418 erst->entries[0].rsvd = 0;
419 return 0;
420 }
421
dbc_erst_free(struct device * dev,struct xhci_erst * erst)422 static void dbc_erst_free(struct device *dev, struct xhci_erst *erst)
423 {
424 dma_free_coherent(dev, sizeof(*erst->entries), erst->entries,
425 erst->erst_dma_addr);
426 erst->entries = NULL;
427 }
428
429 static struct xhci_container_ctx *
dbc_alloc_ctx(struct device * dev,gfp_t flags)430 dbc_alloc_ctx(struct device *dev, gfp_t flags)
431 {
432 struct xhci_container_ctx *ctx;
433
434 ctx = kzalloc(sizeof(*ctx), flags);
435 if (!ctx)
436 return NULL;
437
438 /* xhci 7.6.9, all three contexts; info, ep-out and ep-in. Each 64 bytes*/
439 ctx->size = 3 * DBC_CONTEXT_SIZE;
440 ctx->bytes = dma_alloc_coherent(dev, ctx->size, &ctx->dma, flags);
441 if (!ctx->bytes) {
442 kfree(ctx);
443 return NULL;
444 }
445 return ctx;
446 }
447
xhci_dbc_ring_init(struct xhci_ring * ring)448 static void xhci_dbc_ring_init(struct xhci_ring *ring)
449 {
450 struct xhci_segment *seg = ring->first_seg;
451
452 /* clear all trbs on ring in case of old ring */
453 memset(seg->trbs, 0, TRB_SEGMENT_SIZE);
454
455 /* Only event ring does not use link TRB */
456 if (ring->type != TYPE_EVENT) {
457 union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
458
459 trb->link.segment_ptr = cpu_to_le64(ring->first_seg->dma);
460 trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
461 }
462 xhci_initialize_ring_info(ring);
463 }
464
xhci_dbc_reinit_ep_rings(struct xhci_dbc * dbc)465 static int xhci_dbc_reinit_ep_rings(struct xhci_dbc *dbc)
466 {
467 struct xhci_ring *in_ring = dbc->eps[BULK_IN].ring;
468 struct xhci_ring *out_ring = dbc->eps[BULK_OUT].ring;
469
470 if (!in_ring || !out_ring || !dbc->ctx) {
471 dev_warn(dbc->dev, "Can't re-init unallocated endpoints\n");
472 return -ENODEV;
473 }
474
475 xhci_dbc_ring_init(in_ring);
476 xhci_dbc_ring_init(out_ring);
477
478 /* set ep context enqueue, dequeue, and cycle to initial values */
479 xhci_dbc_init_ep_contexts(dbc);
480
481 return 0;
482 }
483
484 static struct xhci_ring *
xhci_dbc_ring_alloc(struct device * dev,enum xhci_ring_type type,gfp_t flags)485 xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
486 {
487 struct xhci_ring *ring;
488 struct xhci_segment *seg;
489 dma_addr_t dma;
490
491 ring = kzalloc(sizeof(*ring), flags);
492 if (!ring)
493 return NULL;
494
495 ring->num_segs = 1;
496 ring->type = type;
497
498 seg = kzalloc(sizeof(*seg), flags);
499 if (!seg)
500 goto seg_fail;
501
502 ring->first_seg = seg;
503 ring->last_seg = seg;
504 seg->next = seg;
505
506 seg->trbs = dma_alloc_coherent(dev, TRB_SEGMENT_SIZE, &dma, flags);
507 if (!seg->trbs)
508 goto dma_fail;
509
510 seg->dma = dma;
511
512 INIT_LIST_HEAD(&ring->td_list);
513
514 xhci_dbc_ring_init(ring);
515
516 return ring;
517 dma_fail:
518 kfree(seg);
519 seg_fail:
520 kfree(ring);
521 return NULL;
522 }
523
xhci_dbc_mem_init(struct xhci_dbc * dbc,gfp_t flags)524 static int xhci_dbc_mem_init(struct xhci_dbc *dbc, gfp_t flags)
525 {
526 int ret;
527 dma_addr_t deq;
528 u32 string_length;
529 struct device *dev = dbc->dev;
530
531 /* Allocate various rings for events and transfers: */
532 dbc->ring_evt = xhci_dbc_ring_alloc(dev, TYPE_EVENT, flags);
533 if (!dbc->ring_evt)
534 goto evt_fail;
535
536 dbc->ring_in = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
537 if (!dbc->ring_in)
538 goto in_fail;
539
540 dbc->ring_out = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
541 if (!dbc->ring_out)
542 goto out_fail;
543
544 /* Allocate and populate ERST: */
545 ret = dbc_erst_alloc(dev, dbc->ring_evt, &dbc->erst, flags);
546 if (ret)
547 goto erst_fail;
548
549 /* Allocate context data structure: */
550 dbc->ctx = dbc_alloc_ctx(dev, flags); /* was sysdev, and is still */
551 if (!dbc->ctx)
552 goto ctx_fail;
553
554 /* Allocate the string table: */
555 dbc->string_size = sizeof(*dbc->string);
556 dbc->string = dma_alloc_coherent(dev, dbc->string_size,
557 &dbc->string_dma, flags);
558 if (!dbc->string)
559 goto string_fail;
560
561 /* Setup ERST register: */
562 writel(dbc->erst.num_entries, &dbc->regs->ersts);
563
564 lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba);
565 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
566 dbc->ring_evt->dequeue);
567 lo_hi_writeq(deq, &dbc->regs->erdp);
568
569 /* Setup strings and contexts: */
570 string_length = xhci_dbc_populate_strings(dbc->string);
571 xhci_dbc_init_contexts(dbc, string_length);
572
573 xhci_dbc_eps_init(dbc);
574 dbc->state = DS_INITIALIZED;
575
576 return 0;
577
578 string_fail:
579 dbc_free_ctx(dev, dbc->ctx);
580 dbc->ctx = NULL;
581 ctx_fail:
582 dbc_erst_free(dev, &dbc->erst);
583 erst_fail:
584 dbc_ring_free(dev, dbc->ring_out);
585 dbc->ring_out = NULL;
586 out_fail:
587 dbc_ring_free(dev, dbc->ring_in);
588 dbc->ring_in = NULL;
589 in_fail:
590 dbc_ring_free(dev, dbc->ring_evt);
591 dbc->ring_evt = NULL;
592 evt_fail:
593 return -ENOMEM;
594 }
595
xhci_dbc_mem_cleanup(struct xhci_dbc * dbc)596 static void xhci_dbc_mem_cleanup(struct xhci_dbc *dbc)
597 {
598 if (!dbc)
599 return;
600
601 xhci_dbc_eps_exit(dbc);
602
603 dma_free_coherent(dbc->dev, dbc->string_size, dbc->string, dbc->string_dma);
604 dbc->string = NULL;
605
606 dbc_free_ctx(dbc->dev, dbc->ctx);
607 dbc->ctx = NULL;
608
609 dbc_erst_free(dbc->dev, &dbc->erst);
610 dbc_ring_free(dbc->dev, dbc->ring_out);
611 dbc_ring_free(dbc->dev, dbc->ring_in);
612 dbc_ring_free(dbc->dev, dbc->ring_evt);
613 dbc->ring_in = NULL;
614 dbc->ring_out = NULL;
615 dbc->ring_evt = NULL;
616 }
617
xhci_do_dbc_start(struct xhci_dbc * dbc)618 static int xhci_do_dbc_start(struct xhci_dbc *dbc)
619 {
620 int ret;
621 u32 ctrl;
622
623 if (dbc->state != DS_DISABLED)
624 return -EINVAL;
625
626 writel(0, &dbc->regs->control);
627 ret = xhci_handshake(&dbc->regs->control,
628 DBC_CTRL_DBC_ENABLE,
629 0, 1000);
630 if (ret)
631 return ret;
632
633 ret = xhci_dbc_mem_init(dbc, GFP_ATOMIC);
634 if (ret)
635 return ret;
636
637 ctrl = readl(&dbc->regs->control);
638 writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
639 &dbc->regs->control);
640 ret = xhci_handshake(&dbc->regs->control,
641 DBC_CTRL_DBC_ENABLE,
642 DBC_CTRL_DBC_ENABLE, 1000);
643 if (ret)
644 return ret;
645
646 dbc->state = DS_ENABLED;
647
648 return 0;
649 }
650
xhci_do_dbc_stop(struct xhci_dbc * dbc)651 static int xhci_do_dbc_stop(struct xhci_dbc *dbc)
652 {
653 if (dbc->state == DS_DISABLED)
654 return -EINVAL;
655
656 writel(0, &dbc->regs->control);
657 dbc->state = DS_DISABLED;
658
659 return 0;
660 }
661
xhci_dbc_start(struct xhci_dbc * dbc)662 static int xhci_dbc_start(struct xhci_dbc *dbc)
663 {
664 int ret;
665 unsigned long flags;
666
667 WARN_ON(!dbc);
668
669 pm_runtime_get_sync(dbc->dev); /* note this was self.controller */
670
671 spin_lock_irqsave(&dbc->lock, flags);
672 ret = xhci_do_dbc_start(dbc);
673 spin_unlock_irqrestore(&dbc->lock, flags);
674
675 if (ret) {
676 pm_runtime_put(dbc->dev); /* note this was self.controller */
677 return ret;
678 }
679
680 return mod_delayed_work(system_wq, &dbc->event_work,
681 msecs_to_jiffies(dbc->poll_interval));
682 }
683
xhci_dbc_stop(struct xhci_dbc * dbc)684 static void xhci_dbc_stop(struct xhci_dbc *dbc)
685 {
686 int ret;
687 unsigned long flags;
688
689 WARN_ON(!dbc);
690
691 switch (dbc->state) {
692 case DS_DISABLED:
693 return;
694 case DS_CONFIGURED:
695 spin_lock(&dbc->lock);
696 xhci_dbc_flush_requests(dbc);
697 spin_unlock(&dbc->lock);
698
699 if (dbc->driver->disconnect)
700 dbc->driver->disconnect(dbc);
701 break;
702 default:
703 break;
704 }
705
706 cancel_delayed_work_sync(&dbc->event_work);
707
708 spin_lock_irqsave(&dbc->lock, flags);
709 ret = xhci_do_dbc_stop(dbc);
710 spin_unlock_irqrestore(&dbc->lock, flags);
711 if (ret)
712 return;
713
714 xhci_dbc_mem_cleanup(dbc);
715 pm_runtime_put_sync(dbc->dev); /* note, was self.controller */
716 }
717
718 static void
handle_ep_halt_changes(struct xhci_dbc * dbc,struct dbc_ep * dep,bool halted)719 handle_ep_halt_changes(struct xhci_dbc *dbc, struct dbc_ep *dep, bool halted)
720 {
721 if (halted) {
722 dev_info(dbc->dev, "DbC Endpoint halted\n");
723 dep->halted = 1;
724
725 } else if (dep->halted) {
726 dev_info(dbc->dev, "DbC Endpoint halt cleared\n");
727 dep->halted = 0;
728
729 if (!list_empty(&dep->list_pending))
730 writel(DBC_DOOR_BELL_TARGET(dep->direction),
731 &dbc->regs->doorbell);
732 }
733 }
734
735 static void
dbc_handle_port_status(struct xhci_dbc * dbc,union xhci_trb * event)736 dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event)
737 {
738 u32 portsc;
739
740 portsc = readl(&dbc->regs->portsc);
741 if (portsc & DBC_PORTSC_CONN_CHANGE)
742 dev_info(dbc->dev, "DbC port connect change\n");
743
744 if (portsc & DBC_PORTSC_RESET_CHANGE)
745 dev_info(dbc->dev, "DbC port reset change\n");
746
747 if (portsc & DBC_PORTSC_LINK_CHANGE)
748 dev_info(dbc->dev, "DbC port link status change\n");
749
750 if (portsc & DBC_PORTSC_CONFIG_CHANGE)
751 dev_info(dbc->dev, "DbC config error change\n");
752
753 /* Port reset change bit will be cleared in other place: */
754 writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
755 }
756
dbc_handle_xfer_event(struct xhci_dbc * dbc,union xhci_trb * event)757 static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
758 {
759 struct dbc_ep *dep;
760 struct xhci_ring *ring;
761 int ep_id;
762 int status;
763 struct xhci_ep_ctx *ep_ctx;
764 u32 comp_code;
765 size_t remain_length;
766 struct dbc_request *req = NULL, *r;
767
768 comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
769 remain_length = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
770 ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
771 dep = (ep_id == EPID_OUT) ?
772 get_out_ep(dbc) : get_in_ep(dbc);
773 ep_ctx = (ep_id == EPID_OUT) ?
774 dbc_bulkout_ctx(dbc) : dbc_bulkin_ctx(dbc);
775 ring = dep->ring;
776
777 /* Match the pending request: */
778 list_for_each_entry(r, &dep->list_pending, list_pending) {
779 if (r->trb_dma == event->trans_event.buffer) {
780 req = r;
781 break;
782 }
783 if (r->status == -COMP_STALL_ERROR) {
784 dev_warn(dbc->dev, "Give back stale stalled req\n");
785 ring->num_trbs_free++;
786 xhci_dbc_giveback(r, 0);
787 }
788 }
789
790 if (!req) {
791 dev_warn(dbc->dev, "no matched request\n");
792 return;
793 }
794
795 trace_xhci_dbc_handle_transfer(ring, &req->trb->generic, req->trb_dma);
796
797 switch (comp_code) {
798 case COMP_SUCCESS:
799 remain_length = 0;
800 fallthrough;
801 case COMP_SHORT_PACKET:
802 status = 0;
803 break;
804 case COMP_TRB_ERROR:
805 case COMP_BABBLE_DETECTED_ERROR:
806 case COMP_USB_TRANSACTION_ERROR:
807 dev_warn(dbc->dev, "tx error %d detected\n", comp_code);
808 status = -comp_code;
809 break;
810 case COMP_STALL_ERROR:
811 dev_warn(dbc->dev, "Stall error at bulk TRB %llx, remaining %zu, ep deq %llx\n",
812 event->trans_event.buffer, remain_length, ep_ctx->deq);
813 status = 0;
814 dep->halted = 1;
815
816 /*
817 * xHC DbC may trigger a STALL bulk xfer event when host sends a
818 * ClearFeature(ENDPOINT_HALT) request even if there wasn't an
819 * active bulk transfer.
820 *
821 * Don't give back this transfer request as hardware will later
822 * start processing TRBs starting from this 'STALLED' TRB,
823 * causing TRBs and requests to be out of sync.
824 *
825 * If STALL event shows some bytes were transferred then assume
826 * it's an actual transfer issue and give back the request.
827 * In this case mark the TRB as No-Op to avoid hw from using the
828 * TRB again.
829 */
830
831 if ((ep_ctx->deq & ~TRB_CYCLE) == event->trans_event.buffer) {
832 dev_dbg(dbc->dev, "Ep stopped on Stalled TRB\n");
833 if (remain_length == req->length) {
834 dev_dbg(dbc->dev, "Spurious stall event, keep req\n");
835 req->status = -COMP_STALL_ERROR;
836 req->actual = 0;
837 return;
838 }
839 dev_dbg(dbc->dev, "Give back stalled req, but turn TRB to No-op\n");
840 trb_to_noop(req->trb);
841 }
842 break;
843
844 default:
845 dev_err(dbc->dev, "unknown tx error %d\n", comp_code);
846 status = -comp_code;
847 break;
848 }
849
850 ring->num_trbs_free++;
851 req->actual = req->length - remain_length;
852 xhci_dbc_giveback(req, status);
853 }
854
inc_evt_deq(struct xhci_ring * ring)855 static void inc_evt_deq(struct xhci_ring *ring)
856 {
857 /* If on the last TRB of the segment go back to the beginning */
858 if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) {
859 ring->cycle_state ^= 1;
860 ring->dequeue = ring->deq_seg->trbs;
861 return;
862 }
863 ring->dequeue++;
864 }
865
xhci_dbc_do_handle_events(struct xhci_dbc * dbc)866 static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
867 {
868 dma_addr_t deq;
869 union xhci_trb *evt;
870 enum evtreturn ret = EVT_DONE;
871 u32 ctrl, portsc;
872 bool update_erdp = false;
873
874 /* DbC state machine: */
875 switch (dbc->state) {
876 case DS_DISABLED:
877 case DS_INITIALIZED:
878
879 return EVT_ERR;
880 case DS_ENABLED:
881 portsc = readl(&dbc->regs->portsc);
882 if (portsc & DBC_PORTSC_CONN_STATUS) {
883 dbc->state = DS_CONNECTED;
884 dev_info(dbc->dev, "DbC connected\n");
885 }
886
887 return EVT_DONE;
888 case DS_CONNECTED:
889 ctrl = readl(&dbc->regs->control);
890 if (ctrl & DBC_CTRL_DBC_RUN) {
891 dbc->state = DS_CONFIGURED;
892 dev_info(dbc->dev, "DbC configured\n");
893 portsc = readl(&dbc->regs->portsc);
894 writel(portsc, &dbc->regs->portsc);
895 ret = EVT_GSER;
896 break;
897 }
898
899 return EVT_DONE;
900 case DS_CONFIGURED:
901 /* Handle cable unplug event: */
902 portsc = readl(&dbc->regs->portsc);
903 if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
904 !(portsc & DBC_PORTSC_CONN_STATUS)) {
905 dev_info(dbc->dev, "DbC cable unplugged\n");
906 dbc->state = DS_ENABLED;
907 xhci_dbc_flush_requests(dbc);
908 xhci_dbc_reinit_ep_rings(dbc);
909 return EVT_DISC;
910 }
911
912 /* Handle debug port reset event: */
913 if (portsc & DBC_PORTSC_RESET_CHANGE) {
914 dev_info(dbc->dev, "DbC port reset\n");
915 writel(portsc, &dbc->regs->portsc);
916 dbc->state = DS_ENABLED;
917 xhci_dbc_flush_requests(dbc);
918 xhci_dbc_reinit_ep_rings(dbc);
919 return EVT_DISC;
920 }
921
922 /* Check and handle changes in endpoint halt status */
923 ctrl = readl(&dbc->regs->control);
924 handle_ep_halt_changes(dbc, get_in_ep(dbc), ctrl & DBC_CTRL_HALT_IN_TR);
925 handle_ep_halt_changes(dbc, get_out_ep(dbc), ctrl & DBC_CTRL_HALT_OUT_TR);
926
927 /* Clear DbC run change bit: */
928 if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
929 writel(ctrl, &dbc->regs->control);
930 ctrl = readl(&dbc->regs->control);
931 }
932 break;
933 default:
934 dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state);
935 break;
936 }
937
938 /* Handle the events in the event ring: */
939 evt = dbc->ring_evt->dequeue;
940 while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
941 dbc->ring_evt->cycle_state) {
942 /*
943 * Add a barrier between reading the cycle flag and any
944 * reads of the event's flags/data below:
945 */
946 rmb();
947
948 trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic,
949 xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
950 dbc->ring_evt->dequeue));
951
952 switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
953 case TRB_TYPE(TRB_PORT_STATUS):
954 dbc_handle_port_status(dbc, evt);
955 break;
956 case TRB_TYPE(TRB_TRANSFER):
957 dbc_handle_xfer_event(dbc, evt);
958 if (ret != EVT_GSER)
959 ret = EVT_XFER_DONE;
960 break;
961 default:
962 break;
963 }
964
965 inc_evt_deq(dbc->ring_evt);
966
967 evt = dbc->ring_evt->dequeue;
968 update_erdp = true;
969 }
970
971 /* Update event ring dequeue pointer: */
972 if (update_erdp) {
973 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
974 dbc->ring_evt->dequeue);
975 lo_hi_writeq(deq, &dbc->regs->erdp);
976 }
977
978 return ret;
979 }
980
xhci_dbc_handle_events(struct work_struct * work)981 static void xhci_dbc_handle_events(struct work_struct *work)
982 {
983 enum evtreturn evtr;
984 struct xhci_dbc *dbc;
985 unsigned long flags;
986 unsigned int poll_interval;
987 unsigned long busypoll_timelimit;
988
989 dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
990 poll_interval = dbc->poll_interval;
991
992 spin_lock_irqsave(&dbc->lock, flags);
993 evtr = xhci_dbc_do_handle_events(dbc);
994 spin_unlock_irqrestore(&dbc->lock, flags);
995
996 switch (evtr) {
997 case EVT_GSER:
998 if (dbc->driver->configure)
999 dbc->driver->configure(dbc);
1000 break;
1001 case EVT_DISC:
1002 if (dbc->driver->disconnect)
1003 dbc->driver->disconnect(dbc);
1004 break;
1005 case EVT_DONE:
1006 /*
1007 * Set fast poll rate if there are pending out transfers, or
1008 * a transfer was recently processed
1009 */
1010 busypoll_timelimit = dbc->xfer_timestamp +
1011 msecs_to_jiffies(DBC_XFER_INACTIVITY_TIMEOUT);
1012
1013 if (!list_empty(&dbc->eps[BULK_OUT].list_pending) ||
1014 time_is_after_jiffies(busypoll_timelimit))
1015 poll_interval = 0;
1016 break;
1017 case EVT_XFER_DONE:
1018 dbc->xfer_timestamp = jiffies;
1019 poll_interval = 0;
1020 break;
1021 default:
1022 dev_info(dbc->dev, "stop handling dbc events\n");
1023 return;
1024 }
1025
1026 mod_delayed_work(system_wq, &dbc->event_work,
1027 msecs_to_jiffies(poll_interval));
1028 }
1029
1030 static const char * const dbc_state_strings[DS_MAX] = {
1031 [DS_DISABLED] = "disabled",
1032 [DS_INITIALIZED] = "initialized",
1033 [DS_ENABLED] = "enabled",
1034 [DS_CONNECTED] = "connected",
1035 [DS_CONFIGURED] = "configured",
1036 };
1037
dbc_show(struct device * dev,struct device_attribute * attr,char * buf)1038 static ssize_t dbc_show(struct device *dev,
1039 struct device_attribute *attr,
1040 char *buf)
1041 {
1042 struct xhci_dbc *dbc;
1043 struct xhci_hcd *xhci;
1044
1045 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1046 dbc = xhci->dbc;
1047
1048 if (dbc->state >= ARRAY_SIZE(dbc_state_strings))
1049 return sysfs_emit(buf, "unknown\n");
1050
1051 return sysfs_emit(buf, "%s\n", dbc_state_strings[dbc->state]);
1052 }
1053
dbc_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1054 static ssize_t dbc_store(struct device *dev,
1055 struct device_attribute *attr,
1056 const char *buf, size_t count)
1057 {
1058 struct xhci_hcd *xhci;
1059 struct xhci_dbc *dbc;
1060
1061 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1062 dbc = xhci->dbc;
1063
1064 if (sysfs_streq(buf, "enable"))
1065 xhci_dbc_start(dbc);
1066 else if (sysfs_streq(buf, "disable"))
1067 xhci_dbc_stop(dbc);
1068 else
1069 return -EINVAL;
1070
1071 return count;
1072 }
1073
dbc_idVendor_show(struct device * dev,struct device_attribute * attr,char * buf)1074 static ssize_t dbc_idVendor_show(struct device *dev,
1075 struct device_attribute *attr,
1076 char *buf)
1077 {
1078 struct xhci_dbc *dbc;
1079 struct xhci_hcd *xhci;
1080
1081 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1082 dbc = xhci->dbc;
1083
1084 return sysfs_emit(buf, "%04x\n", dbc->idVendor);
1085 }
1086
dbc_idVendor_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1087 static ssize_t dbc_idVendor_store(struct device *dev,
1088 struct device_attribute *attr,
1089 const char *buf, size_t size)
1090 {
1091 struct xhci_dbc *dbc;
1092 struct xhci_hcd *xhci;
1093 void __iomem *ptr;
1094 u16 value;
1095 u32 dev_info;
1096 int ret;
1097
1098 ret = kstrtou16(buf, 0, &value);
1099 if (ret)
1100 return ret;
1101
1102 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1103 dbc = xhci->dbc;
1104 if (dbc->state != DS_DISABLED)
1105 return -EBUSY;
1106
1107 dbc->idVendor = value;
1108 ptr = &dbc->regs->devinfo1;
1109 dev_info = readl(ptr);
1110 dev_info = (dev_info & ~(0xffffu << 16)) | (value << 16);
1111 writel(dev_info, ptr);
1112
1113 return size;
1114 }
1115
dbc_idProduct_show(struct device * dev,struct device_attribute * attr,char * buf)1116 static ssize_t dbc_idProduct_show(struct device *dev,
1117 struct device_attribute *attr,
1118 char *buf)
1119 {
1120 struct xhci_dbc *dbc;
1121 struct xhci_hcd *xhci;
1122
1123 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1124 dbc = xhci->dbc;
1125
1126 return sysfs_emit(buf, "%04x\n", dbc->idProduct);
1127 }
1128
dbc_idProduct_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1129 static ssize_t dbc_idProduct_store(struct device *dev,
1130 struct device_attribute *attr,
1131 const char *buf, size_t size)
1132 {
1133 struct xhci_dbc *dbc;
1134 struct xhci_hcd *xhci;
1135 void __iomem *ptr;
1136 u32 dev_info;
1137 u16 value;
1138 int ret;
1139
1140 ret = kstrtou16(buf, 0, &value);
1141 if (ret)
1142 return ret;
1143
1144 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1145 dbc = xhci->dbc;
1146 if (dbc->state != DS_DISABLED)
1147 return -EBUSY;
1148
1149 dbc->idProduct = value;
1150 ptr = &dbc->regs->devinfo2;
1151 dev_info = readl(ptr);
1152 dev_info = (dev_info & ~(0xffffu)) | value;
1153 writel(dev_info, ptr);
1154 return size;
1155 }
1156
dbc_bcdDevice_show(struct device * dev,struct device_attribute * attr,char * buf)1157 static ssize_t dbc_bcdDevice_show(struct device *dev,
1158 struct device_attribute *attr,
1159 char *buf)
1160 {
1161 struct xhci_dbc *dbc;
1162 struct xhci_hcd *xhci;
1163
1164 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1165 dbc = xhci->dbc;
1166
1167 return sysfs_emit(buf, "%04x\n", dbc->bcdDevice);
1168 }
1169
dbc_bcdDevice_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1170 static ssize_t dbc_bcdDevice_store(struct device *dev,
1171 struct device_attribute *attr,
1172 const char *buf, size_t size)
1173 {
1174 struct xhci_dbc *dbc;
1175 struct xhci_hcd *xhci;
1176 void __iomem *ptr;
1177 u32 dev_info;
1178 u16 value;
1179 int ret;
1180
1181 ret = kstrtou16(buf, 0, &value);
1182 if (ret)
1183 return ret;
1184
1185 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1186 dbc = xhci->dbc;
1187 if (dbc->state != DS_DISABLED)
1188 return -EBUSY;
1189
1190 dbc->bcdDevice = value;
1191 ptr = &dbc->regs->devinfo2;
1192 dev_info = readl(ptr);
1193 dev_info = (dev_info & ~(0xffffu << 16)) | (value << 16);
1194 writel(dev_info, ptr);
1195
1196 return size;
1197 }
1198
dbc_bInterfaceProtocol_show(struct device * dev,struct device_attribute * attr,char * buf)1199 static ssize_t dbc_bInterfaceProtocol_show(struct device *dev,
1200 struct device_attribute *attr,
1201 char *buf)
1202 {
1203 struct xhci_dbc *dbc;
1204 struct xhci_hcd *xhci;
1205
1206 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1207 dbc = xhci->dbc;
1208
1209 return sysfs_emit(buf, "%02x\n", dbc->bInterfaceProtocol);
1210 }
1211
dbc_bInterfaceProtocol_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1212 static ssize_t dbc_bInterfaceProtocol_store(struct device *dev,
1213 struct device_attribute *attr,
1214 const char *buf, size_t size)
1215 {
1216 struct xhci_dbc *dbc;
1217 struct xhci_hcd *xhci;
1218 void __iomem *ptr;
1219 u32 dev_info;
1220 u8 value;
1221 int ret;
1222
1223 /* bInterfaceProtocol is 8 bit, but... */
1224 ret = kstrtou8(buf, 0, &value);
1225 if (ret)
1226 return ret;
1227
1228 /* ...xhci only supports values 0 and 1 */
1229 if (value > 1)
1230 return -EINVAL;
1231
1232 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1233 dbc = xhci->dbc;
1234 if (dbc->state != DS_DISABLED)
1235 return -EBUSY;
1236
1237 dbc->bInterfaceProtocol = value;
1238 ptr = &dbc->regs->devinfo1;
1239 dev_info = readl(ptr);
1240 dev_info = (dev_info & ~(0xffu)) | value;
1241 writel(dev_info, ptr);
1242
1243 return size;
1244 }
1245
dbc_poll_interval_ms_show(struct device * dev,struct device_attribute * attr,char * buf)1246 static ssize_t dbc_poll_interval_ms_show(struct device *dev,
1247 struct device_attribute *attr,
1248 char *buf)
1249 {
1250 struct xhci_dbc *dbc;
1251 struct xhci_hcd *xhci;
1252
1253 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1254 dbc = xhci->dbc;
1255
1256 return sysfs_emit(buf, "%u\n", dbc->poll_interval);
1257 }
1258
dbc_poll_interval_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1259 static ssize_t dbc_poll_interval_ms_store(struct device *dev,
1260 struct device_attribute *attr,
1261 const char *buf, size_t size)
1262 {
1263 struct xhci_dbc *dbc;
1264 struct xhci_hcd *xhci;
1265 u32 value;
1266 int ret;
1267
1268 ret = kstrtou32(buf, 0, &value);
1269 if (ret || value > DBC_POLL_INTERVAL_MAX)
1270 return -EINVAL;
1271
1272 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1273 dbc = xhci->dbc;
1274
1275 dbc->poll_interval = value;
1276
1277 mod_delayed_work(system_wq, &dbc->event_work, 0);
1278
1279 return size;
1280 }
1281
1282 static DEVICE_ATTR_RW(dbc);
1283 static DEVICE_ATTR_RW(dbc_idVendor);
1284 static DEVICE_ATTR_RW(dbc_idProduct);
1285 static DEVICE_ATTR_RW(dbc_bcdDevice);
1286 static DEVICE_ATTR_RW(dbc_bInterfaceProtocol);
1287 static DEVICE_ATTR_RW(dbc_poll_interval_ms);
1288
1289 static struct attribute *dbc_dev_attrs[] = {
1290 &dev_attr_dbc.attr,
1291 &dev_attr_dbc_idVendor.attr,
1292 &dev_attr_dbc_idProduct.attr,
1293 &dev_attr_dbc_bcdDevice.attr,
1294 &dev_attr_dbc_bInterfaceProtocol.attr,
1295 &dev_attr_dbc_poll_interval_ms.attr,
1296 NULL
1297 };
1298 ATTRIBUTE_GROUPS(dbc_dev);
1299
1300 struct xhci_dbc *
xhci_alloc_dbc(struct device * dev,void __iomem * base,const struct dbc_driver * driver)1301 xhci_alloc_dbc(struct device *dev, void __iomem *base, const struct dbc_driver *driver)
1302 {
1303 struct xhci_dbc *dbc;
1304 int ret;
1305
1306 dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
1307 if (!dbc)
1308 return NULL;
1309
1310 dbc->regs = base;
1311 dbc->dev = dev;
1312 dbc->driver = driver;
1313 dbc->idProduct = DBC_PRODUCT_ID;
1314 dbc->idVendor = DBC_VENDOR_ID;
1315 dbc->bcdDevice = DBC_DEVICE_REV;
1316 dbc->bInterfaceProtocol = DBC_PROTOCOL;
1317 dbc->poll_interval = DBC_POLL_INTERVAL_DEFAULT;
1318
1319 if (readl(&dbc->regs->control) & DBC_CTRL_DBC_ENABLE)
1320 goto err;
1321
1322 INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
1323 spin_lock_init(&dbc->lock);
1324
1325 ret = sysfs_create_groups(&dev->kobj, dbc_dev_groups);
1326 if (ret)
1327 goto err;
1328
1329 return dbc;
1330 err:
1331 kfree(dbc);
1332 return NULL;
1333 }
1334
1335 /* undo what xhci_alloc_dbc() did */
xhci_dbc_remove(struct xhci_dbc * dbc)1336 void xhci_dbc_remove(struct xhci_dbc *dbc)
1337 {
1338 if (!dbc)
1339 return;
1340 /* stop hw, stop wq and call dbc->ops->stop() */
1341 xhci_dbc_stop(dbc);
1342
1343 /* remove sysfs files */
1344 sysfs_remove_groups(&dbc->dev->kobj, dbc_dev_groups);
1345
1346 kfree(dbc);
1347 }
1348
1349
xhci_create_dbc_dev(struct xhci_hcd * xhci)1350 int xhci_create_dbc_dev(struct xhci_hcd *xhci)
1351 {
1352 struct device *dev;
1353 void __iomem *base;
1354 int ret;
1355 int dbc_cap_offs;
1356
1357 /* create all parameters needed resembling a dbc device */
1358 dev = xhci_to_hcd(xhci)->self.controller;
1359 base = &xhci->cap_regs->hc_capbase;
1360
1361 dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
1362 if (!dbc_cap_offs)
1363 return -ENODEV;
1364
1365 /* already allocated and in use */
1366 if (xhci->dbc)
1367 return -EBUSY;
1368
1369 ret = xhci_dbc_tty_probe(dev, base + dbc_cap_offs, xhci);
1370
1371 return ret;
1372 }
1373
xhci_remove_dbc_dev(struct xhci_hcd * xhci)1374 void xhci_remove_dbc_dev(struct xhci_hcd *xhci)
1375 {
1376 unsigned long flags;
1377
1378 if (!xhci->dbc)
1379 return;
1380
1381 xhci_dbc_tty_remove(xhci->dbc);
1382 spin_lock_irqsave(&xhci->lock, flags);
1383 xhci->dbc = NULL;
1384 spin_unlock_irqrestore(&xhci->lock, flags);
1385 }
1386
1387 #ifdef CONFIG_PM
xhci_dbc_suspend(struct xhci_hcd * xhci)1388 int xhci_dbc_suspend(struct xhci_hcd *xhci)
1389 {
1390 struct xhci_dbc *dbc = xhci->dbc;
1391
1392 if (!dbc)
1393 return 0;
1394
1395 switch (dbc->state) {
1396 case DS_ENABLED:
1397 case DS_CONNECTED:
1398 case DS_CONFIGURED:
1399 dbc->resume_required = 1;
1400 break;
1401 default:
1402 break;
1403 }
1404
1405 xhci_dbc_stop(dbc);
1406
1407 return 0;
1408 }
1409
xhci_dbc_resume(struct xhci_hcd * xhci)1410 int xhci_dbc_resume(struct xhci_hcd *xhci)
1411 {
1412 int ret = 0;
1413 struct xhci_dbc *dbc = xhci->dbc;
1414
1415 if (!dbc)
1416 return 0;
1417
1418 if (dbc->resume_required) {
1419 dbc->resume_required = 0;
1420 xhci_dbc_start(dbc);
1421 }
1422
1423 return ret;
1424 }
1425 #endif /* CONFIG_PM */
1426
xhci_dbc_init(void)1427 int xhci_dbc_init(void)
1428 {
1429 return dbc_tty_init();
1430 }
1431
xhci_dbc_exit(void)1432 void xhci_dbc_exit(void)
1433 {
1434 dbc_tty_exit();
1435 }
1436