1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * xhci-dbgcap.c - xHCI debug capability support
4 *
5 * Copyright (C) 2017 Intel Corporation
6 *
7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
8 */
9 #include <linux/bug.h>
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
13 #include <linux/kstrtox.h>
14 #include <linux/list.h>
15 #include <linux/nls.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/string.h>
20 #include <linux/sysfs.h>
21 #include <linux/types.h>
22 #include <linux/workqueue.h>
23
24 #include <linux/io-64-nonatomic-lo-hi.h>
25
26 #include <asm/byteorder.h>
27
28 #include "xhci.h"
29 #include "xhci-trace.h"
30 #include "xhci-dbgcap.h"
31
32 static const struct dbc_str dbc_str_default = {
33 .manufacturer = "Linux Foundation",
34 .product = "Linux USB Debug Target",
35 .serial = "0001",
36 };
37
dbc_free_ctx(struct device * dev,struct xhci_container_ctx * ctx)38 static void dbc_free_ctx(struct device *dev, struct xhci_container_ctx *ctx)
39 {
40 if (!ctx)
41 return;
42 dma_free_coherent(dev, ctx->size, ctx->bytes, ctx->dma);
43 kfree(ctx);
44 }
45
46 /* we use only one segment for DbC rings */
dbc_ring_free(struct device * dev,struct xhci_ring * ring)47 static void dbc_ring_free(struct device *dev, struct xhci_ring *ring)
48 {
49 if (!ring)
50 return;
51
52 if (ring->first_seg) {
53 dma_free_coherent(dev, TRB_SEGMENT_SIZE,
54 ring->first_seg->trbs,
55 ring->first_seg->dma);
56 kfree(ring->first_seg);
57 }
58 kfree(ring);
59 }
60
xhci_dbc_init_ep_contexts(struct xhci_dbc * dbc)61 static void xhci_dbc_init_ep_contexts(struct xhci_dbc *dbc)
62 {
63 struct xhci_ep_ctx *ep_ctx;
64 unsigned int max_burst;
65 dma_addr_t deq;
66
67 max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
68
69 /* Populate bulk out endpoint context: */
70 ep_ctx = dbc_bulkout_ctx(dbc);
71 deq = dbc_bulkout_enq(dbc);
72 ep_ctx->ep_info = 0;
73 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
74 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
75
76 /* Populate bulk in endpoint context: */
77 ep_ctx = dbc_bulkin_ctx(dbc);
78 deq = dbc_bulkin_enq(dbc);
79 ep_ctx->ep_info = 0;
80 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
81 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
82 }
83
get_str_desc_len(const char * desc)84 static u8 get_str_desc_len(const char *desc)
85 {
86 return ((struct usb_string_descriptor *)desc)->bLength;
87 }
88
dbc_prepare_info_context_str_len(struct dbc_str_descs * descs)89 static u32 dbc_prepare_info_context_str_len(struct dbc_str_descs *descs)
90 {
91 u32 len;
92
93 len = get_str_desc_len(descs->serial);
94 len <<= 8;
95 len += get_str_desc_len(descs->product);
96 len <<= 8;
97 len += get_str_desc_len(descs->manufacturer);
98 len <<= 8;
99 len += get_str_desc_len(descs->string0);
100
101 return len;
102 }
103
xhci_dbc_populate_str_desc(char * desc,const char * src)104 static int xhci_dbc_populate_str_desc(char *desc, const char *src)
105 {
106 struct usb_string_descriptor *s_desc;
107 int len;
108
109 s_desc = (struct usb_string_descriptor *)desc;
110
111 /* len holds number of 2 byte UTF-16 characters */
112 len = utf8s_to_utf16s(src, strlen(src), UTF16_LITTLE_ENDIAN,
113 (wchar_t *)s_desc->wData, USB_MAX_STRING_LEN * 2);
114 if (len < 0)
115 return len;
116
117 s_desc->bLength = len * 2 + 2;
118 s_desc->bDescriptorType = USB_DT_STRING;
119
120 return s_desc->bLength;
121 }
122
xhci_dbc_populate_str_descs(struct dbc_str_descs * str_descs,struct dbc_str * str)123 static void xhci_dbc_populate_str_descs(struct dbc_str_descs *str_descs,
124 struct dbc_str *str)
125 {
126 /* Serial string: */
127 xhci_dbc_populate_str_desc(str_descs->serial, str->serial);
128
129 /* Product string: */
130 xhci_dbc_populate_str_desc(str_descs->product, str->product);
131
132 /* Manufacturer string: */
133 xhci_dbc_populate_str_desc(str_descs->manufacturer, str->manufacturer);
134
135 /* String0: */
136 str_descs->string0[0] = 4;
137 str_descs->string0[1] = USB_DT_STRING;
138 str_descs->string0[2] = 0x09;
139 str_descs->string0[3] = 0x04;
140 }
141
xhci_dbc_init_contexts(struct xhci_dbc * dbc)142 static void xhci_dbc_init_contexts(struct xhci_dbc *dbc)
143 {
144 struct dbc_info_context *info;
145 u32 dev_info;
146 dma_addr_t dma;
147
148 if (!dbc)
149 return;
150
151 /* Populate info Context: */
152 info = (struct dbc_info_context *)dbc->ctx->bytes;
153 dma = dbc->str_descs_dma;
154 info->string0 = cpu_to_le64(dma);
155 info->manufacturer = cpu_to_le64(dma + USB_MAX_STRING_DESC_LEN);
156 info->product = cpu_to_le64(dma + USB_MAX_STRING_DESC_LEN * 2);
157 info->serial = cpu_to_le64(dma + USB_MAX_STRING_DESC_LEN * 3);
158 info->length = cpu_to_le32(dbc_prepare_info_context_str_len(dbc->str_descs));
159
160 /* Populate bulk in and out endpoint contexts: */
161 xhci_dbc_init_ep_contexts(dbc);
162
163 /* Set DbC context and info registers: */
164 lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
165
166 dev_info = (dbc->idVendor << 16) | dbc->bInterfaceProtocol;
167 writel(dev_info, &dbc->regs->devinfo1);
168
169 dev_info = (dbc->bcdDevice << 16) | dbc->idProduct;
170 writel(dev_info, &dbc->regs->devinfo2);
171 }
172
xhci_dbc_giveback(struct dbc_request * req,int status)173 static void xhci_dbc_giveback(struct dbc_request *req, int status)
174 __releases(&dbc->lock)
175 __acquires(&dbc->lock)
176 {
177 struct xhci_dbc *dbc = req->dbc;
178 struct device *dev = dbc->dev;
179
180 list_del_init(&req->list_pending);
181 req->trb_dma = 0;
182 req->trb = NULL;
183
184 if (req->status == -EINPROGRESS)
185 req->status = status;
186
187 trace_xhci_dbc_giveback_request(req);
188
189 dma_unmap_single(dev,
190 req->dma,
191 req->length,
192 dbc_ep_dma_direction(req));
193
194 /* Give back the transfer request: */
195 spin_unlock(&dbc->lock);
196 req->complete(dbc, req);
197 spin_lock(&dbc->lock);
198 }
199
trb_to_noop(union xhci_trb * trb)200 static void trb_to_noop(union xhci_trb *trb)
201 {
202 trb->generic.field[0] = 0;
203 trb->generic.field[1] = 0;
204 trb->generic.field[2] = 0;
205 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
206 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
207 }
208
xhci_dbc_flush_single_request(struct dbc_request * req)209 static void xhci_dbc_flush_single_request(struct dbc_request *req)
210 {
211 trb_to_noop(req->trb);
212 xhci_dbc_giveback(req, -ESHUTDOWN);
213 }
214
xhci_dbc_flush_endpoint_requests(struct dbc_ep * dep)215 static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
216 {
217 struct dbc_request *req, *tmp;
218
219 list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
220 xhci_dbc_flush_single_request(req);
221 }
222
xhci_dbc_flush_requests(struct xhci_dbc * dbc)223 static void xhci_dbc_flush_requests(struct xhci_dbc *dbc)
224 {
225 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
226 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
227 }
228
229 struct dbc_request *
dbc_alloc_request(struct xhci_dbc * dbc,unsigned int direction,gfp_t flags)230 dbc_alloc_request(struct xhci_dbc *dbc, unsigned int direction, gfp_t flags)
231 {
232 struct dbc_request *req;
233
234 if (direction != BULK_IN &&
235 direction != BULK_OUT)
236 return NULL;
237
238 if (!dbc)
239 return NULL;
240
241 req = kzalloc_obj(*req, flags);
242 if (!req)
243 return NULL;
244
245 req->dbc = dbc;
246 INIT_LIST_HEAD(&req->list_pending);
247 INIT_LIST_HEAD(&req->list_pool);
248 req->direction = direction;
249
250 trace_xhci_dbc_alloc_request(req);
251
252 return req;
253 }
254
255 void
dbc_free_request(struct dbc_request * req)256 dbc_free_request(struct dbc_request *req)
257 {
258 trace_xhci_dbc_free_request(req);
259
260 kfree(req);
261 }
262
263 static void
xhci_dbc_queue_trb(struct xhci_ring * ring,u32 field1,u32 field2,u32 field3,u32 field4)264 xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
265 u32 field2, u32 field3, u32 field4)
266 {
267 union xhci_trb *trb, *next;
268
269 trb = ring->enqueue;
270 trb->generic.field[0] = cpu_to_le32(field1);
271 trb->generic.field[1] = cpu_to_le32(field2);
272 trb->generic.field[2] = cpu_to_le32(field3);
273 trb->generic.field[3] = cpu_to_le32(field4);
274
275 trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic,
276 xhci_trb_virt_to_dma(ring->enq_seg,
277 ring->enqueue));
278 ring->num_trbs_free--;
279 next = ++(ring->enqueue);
280 if (TRB_TYPE_LINK_LE32(next->link.control)) {
281 next->link.control ^= cpu_to_le32(TRB_CYCLE);
282 ring->enqueue = ring->enq_seg->trbs;
283 ring->cycle_state ^= 1;
284 }
285 }
286
xhci_dbc_queue_bulk_tx(struct dbc_ep * dep,struct dbc_request * req)287 static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
288 struct dbc_request *req)
289 {
290 u64 addr;
291 union xhci_trb *trb;
292 unsigned int num_trbs;
293 struct xhci_dbc *dbc = req->dbc;
294 struct xhci_ring *ring = dep->ring;
295 u32 length, control, cycle;
296
297 num_trbs = count_trbs(req->dma, req->length);
298 WARN_ON(num_trbs != 1);
299 if (ring->num_trbs_free < num_trbs)
300 return -EBUSY;
301
302 addr = req->dma;
303 trb = ring->enqueue;
304 cycle = ring->cycle_state;
305 length = TRB_LEN(req->length);
306 control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
307
308 if (cycle)
309 control &= cpu_to_le32(~TRB_CYCLE);
310 else
311 control |= cpu_to_le32(TRB_CYCLE);
312
313 req->trb = ring->enqueue;
314 req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
315 xhci_dbc_queue_trb(ring,
316 lower_32_bits(addr),
317 upper_32_bits(addr),
318 length, control);
319
320 /*
321 * Add a barrier between writes of trb fields and flipping
322 * the cycle bit:
323 */
324 wmb();
325
326 if (cycle)
327 trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
328 else
329 trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
330
331 writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
332
333 return 0;
334 }
335
336 static int
dbc_ep_do_queue(struct dbc_request * req)337 dbc_ep_do_queue(struct dbc_request *req)
338 {
339 int ret;
340 struct xhci_dbc *dbc = req->dbc;
341 struct device *dev = dbc->dev;
342 struct dbc_ep *dep = &dbc->eps[req->direction];
343
344 if (!req->length || !req->buf)
345 return -EINVAL;
346
347 req->actual = 0;
348 req->status = -EINPROGRESS;
349
350 req->dma = dma_map_single(dev,
351 req->buf,
352 req->length,
353 dbc_ep_dma_direction(dep));
354 if (dma_mapping_error(dev, req->dma)) {
355 dev_err(dbc->dev, "failed to map buffer\n");
356 return -EFAULT;
357 }
358
359 ret = xhci_dbc_queue_bulk_tx(dep, req);
360 if (ret) {
361 dev_err(dbc->dev, "failed to queue trbs\n");
362 dma_unmap_single(dev,
363 req->dma,
364 req->length,
365 dbc_ep_dma_direction(dep));
366 return -EFAULT;
367 }
368
369 list_add_tail(&req->list_pending, &dep->list_pending);
370
371 return 0;
372 }
373
dbc_ep_queue(struct dbc_request * req)374 int dbc_ep_queue(struct dbc_request *req)
375 {
376 unsigned long flags;
377 struct xhci_dbc *dbc = req->dbc;
378 int ret = -ESHUTDOWN;
379
380 if (!dbc)
381 return -ENODEV;
382
383 if (req->direction != BULK_IN &&
384 req->direction != BULK_OUT)
385 return -EINVAL;
386
387 spin_lock_irqsave(&dbc->lock, flags);
388 if (dbc->state == DS_CONFIGURED)
389 ret = dbc_ep_do_queue(req);
390 spin_unlock_irqrestore(&dbc->lock, flags);
391
392 mod_delayed_work(system_percpu_wq, &dbc->event_work, 0);
393
394 trace_xhci_dbc_queue_request(req);
395
396 return ret;
397 }
398
xhci_dbc_do_eps_init(struct xhci_dbc * dbc,bool direction)399 static inline void xhci_dbc_do_eps_init(struct xhci_dbc *dbc, bool direction)
400 {
401 struct dbc_ep *dep;
402
403 dep = &dbc->eps[direction];
404 dep->dbc = dbc;
405 dep->direction = direction;
406 dep->ring = direction ? dbc->ring_in : dbc->ring_out;
407
408 INIT_LIST_HEAD(&dep->list_pending);
409 }
410
xhci_dbc_eps_init(struct xhci_dbc * dbc)411 static void xhci_dbc_eps_init(struct xhci_dbc *dbc)
412 {
413 xhci_dbc_do_eps_init(dbc, BULK_OUT);
414 xhci_dbc_do_eps_init(dbc, BULK_IN);
415 }
416
xhci_dbc_eps_exit(struct xhci_dbc * dbc)417 static void xhci_dbc_eps_exit(struct xhci_dbc *dbc)
418 {
419 memset(dbc->eps, 0, sizeof_field(struct xhci_dbc, eps));
420 }
421
dbc_erst_alloc(struct device * dev,struct xhci_ring * evt_ring,struct xhci_erst * erst,gfp_t flags)422 static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring,
423 struct xhci_erst *erst, gfp_t flags)
424 {
425 erst->entries = dma_alloc_coherent(dev, sizeof(*erst->entries),
426 &erst->erst_dma_addr, flags);
427 if (!erst->entries)
428 return -ENOMEM;
429
430 erst->num_entries = 1;
431 erst->entries[0].seg_addr = cpu_to_le64(evt_ring->first_seg->dma);
432 erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
433 erst->entries[0].rsvd = 0;
434 return 0;
435 }
436
dbc_erst_free(struct device * dev,struct xhci_erst * erst)437 static void dbc_erst_free(struct device *dev, struct xhci_erst *erst)
438 {
439 dma_free_coherent(dev, sizeof(*erst->entries), erst->entries,
440 erst->erst_dma_addr);
441 erst->entries = NULL;
442 }
443
444 static struct xhci_container_ctx *
dbc_alloc_ctx(struct device * dev,gfp_t flags)445 dbc_alloc_ctx(struct device *dev, gfp_t flags)
446 {
447 struct xhci_container_ctx *ctx;
448
449 ctx = kzalloc_obj(*ctx, flags);
450 if (!ctx)
451 return NULL;
452
453 /* xhci 7.6.9, all three contexts; info, ep-out and ep-in. Each 64 bytes*/
454 ctx->size = 3 * DBC_CONTEXT_SIZE;
455 ctx->bytes = dma_alloc_coherent(dev, ctx->size, &ctx->dma, flags);
456 if (!ctx->bytes) {
457 kfree(ctx);
458 return NULL;
459 }
460 return ctx;
461 }
462
xhci_dbc_ring_init(struct xhci_ring * ring)463 static void xhci_dbc_ring_init(struct xhci_ring *ring)
464 {
465 struct xhci_segment *seg = ring->first_seg;
466
467 /* clear all trbs on ring in case of old ring */
468 memset(seg->trbs, 0, TRB_SEGMENT_SIZE);
469
470 /* Only event ring does not use link TRB */
471 if (ring->type != TYPE_EVENT) {
472 union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
473
474 trb->link.segment_ptr = cpu_to_le64(ring->first_seg->dma);
475 trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
476 }
477 xhci_initialize_ring_info(ring);
478 }
479
xhci_dbc_reinit_ep_rings(struct xhci_dbc * dbc)480 static int xhci_dbc_reinit_ep_rings(struct xhci_dbc *dbc)
481 {
482 struct xhci_ring *in_ring = dbc->eps[BULK_IN].ring;
483 struct xhci_ring *out_ring = dbc->eps[BULK_OUT].ring;
484
485 if (!in_ring || !out_ring || !dbc->ctx) {
486 dev_warn(dbc->dev, "Can't re-init unallocated endpoints\n");
487 return -ENODEV;
488 }
489
490 xhci_dbc_ring_init(in_ring);
491 xhci_dbc_ring_init(out_ring);
492
493 /* set ep context enqueue, dequeue, and cycle to initial values */
494 xhci_dbc_init_ep_contexts(dbc);
495
496 return 0;
497 }
498
499 static struct xhci_ring *
xhci_dbc_ring_alloc(struct device * dev,enum xhci_ring_type type,gfp_t flags)500 xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
501 {
502 struct xhci_ring *ring;
503 struct xhci_segment *seg;
504 dma_addr_t dma;
505
506 ring = kzalloc_obj(*ring, flags);
507 if (!ring)
508 return NULL;
509
510 ring->num_segs = 1;
511 ring->type = type;
512
513 seg = kzalloc_obj(*seg, flags);
514 if (!seg)
515 goto seg_fail;
516
517 ring->first_seg = seg;
518 ring->last_seg = seg;
519 seg->next = seg;
520
521 seg->trbs = dma_alloc_coherent(dev, TRB_SEGMENT_SIZE, &dma, flags);
522 if (!seg->trbs)
523 goto dma_fail;
524
525 seg->dma = dma;
526
527 INIT_LIST_HEAD(&ring->td_list);
528
529 xhci_dbc_ring_init(ring);
530
531 return ring;
532 dma_fail:
533 kfree(seg);
534 seg_fail:
535 kfree(ring);
536 return NULL;
537 }
538
xhci_dbc_mem_init(struct xhci_dbc * dbc,gfp_t flags)539 static int xhci_dbc_mem_init(struct xhci_dbc *dbc, gfp_t flags)
540 {
541 int ret;
542 dma_addr_t deq;
543 struct device *dev = dbc->dev;
544
545 /* Allocate various rings for events and transfers: */
546 dbc->ring_evt = xhci_dbc_ring_alloc(dev, TYPE_EVENT, flags);
547 if (!dbc->ring_evt)
548 goto evt_fail;
549
550 dbc->ring_in = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
551 if (!dbc->ring_in)
552 goto in_fail;
553
554 dbc->ring_out = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
555 if (!dbc->ring_out)
556 goto out_fail;
557
558 /* Allocate and populate ERST: */
559 ret = dbc_erst_alloc(dev, dbc->ring_evt, &dbc->erst, flags);
560 if (ret)
561 goto erst_fail;
562
563 /* Allocate context data structure: */
564 dbc->ctx = dbc_alloc_ctx(dev, flags); /* was sysdev, and is still */
565 if (!dbc->ctx)
566 goto ctx_fail;
567
568 /* Allocate the string table: */
569 dbc->str_descs_size = sizeof(*dbc->str_descs);
570 dbc->str_descs = dma_alloc_coherent(dev, dbc->str_descs_size,
571 &dbc->str_descs_dma, flags);
572 if (!dbc->str_descs)
573 goto str_descs_fail;
574
575 /* Setup ERST register: */
576 writel(dbc->erst.num_entries, &dbc->regs->ersts);
577
578 lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba);
579 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
580 dbc->ring_evt->dequeue);
581 lo_hi_writeq(deq, &dbc->regs->erdp);
582
583 /* Setup string descriptors and contexts: */
584 xhci_dbc_populate_str_descs(dbc->str_descs, &dbc->str);
585 xhci_dbc_init_contexts(dbc);
586
587 xhci_dbc_eps_init(dbc);
588 dbc->state = DS_INITIALIZED;
589
590 return 0;
591
592 str_descs_fail:
593 dbc_free_ctx(dev, dbc->ctx);
594 dbc->ctx = NULL;
595 ctx_fail:
596 dbc_erst_free(dev, &dbc->erst);
597 erst_fail:
598 dbc_ring_free(dev, dbc->ring_out);
599 dbc->ring_out = NULL;
600 out_fail:
601 dbc_ring_free(dev, dbc->ring_in);
602 dbc->ring_in = NULL;
603 in_fail:
604 dbc_ring_free(dev, dbc->ring_evt);
605 dbc->ring_evt = NULL;
606 evt_fail:
607 return -ENOMEM;
608 }
609
xhci_dbc_mem_cleanup(struct xhci_dbc * dbc)610 static void xhci_dbc_mem_cleanup(struct xhci_dbc *dbc)
611 {
612 if (!dbc)
613 return;
614
615 xhci_dbc_eps_exit(dbc);
616
617 dma_free_coherent(dbc->dev, dbc->str_descs_size, dbc->str_descs, dbc->str_descs_dma);
618 dbc->str_descs = NULL;
619
620 dbc_free_ctx(dbc->dev, dbc->ctx);
621 dbc->ctx = NULL;
622
623 dbc_erst_free(dbc->dev, &dbc->erst);
624 dbc_ring_free(dbc->dev, dbc->ring_out);
625 dbc_ring_free(dbc->dev, dbc->ring_in);
626 dbc_ring_free(dbc->dev, dbc->ring_evt);
627 dbc->ring_in = NULL;
628 dbc->ring_out = NULL;
629 dbc->ring_evt = NULL;
630 }
631
xhci_do_dbc_start(struct xhci_dbc * dbc)632 static int xhci_do_dbc_start(struct xhci_dbc *dbc)
633 {
634 int ret;
635 u32 ctrl;
636
637 if (dbc->state != DS_DISABLED)
638 return -EINVAL;
639
640 writel(0, &dbc->regs->control);
641 ret = xhci_handshake(&dbc->regs->control,
642 DBC_CTRL_DBC_ENABLE,
643 0, 1000);
644 if (ret)
645 return ret;
646
647 ret = xhci_dbc_mem_init(dbc, GFP_ATOMIC);
648 if (ret)
649 return ret;
650
651 ctrl = readl(&dbc->regs->control);
652 writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
653 &dbc->regs->control);
654 ret = xhci_handshake(&dbc->regs->control,
655 DBC_CTRL_DBC_ENABLE,
656 DBC_CTRL_DBC_ENABLE, 1000);
657 if (ret)
658 return ret;
659
660 dbc->state = DS_ENABLED;
661
662 return 0;
663 }
664
xhci_do_dbc_stop(struct xhci_dbc * dbc)665 static int xhci_do_dbc_stop(struct xhci_dbc *dbc)
666 {
667 if (dbc->state == DS_DISABLED)
668 return -EINVAL;
669
670 writel(0, &dbc->regs->control);
671 dbc->state = DS_DISABLED;
672
673 return 0;
674 }
675
xhci_dbc_start(struct xhci_dbc * dbc)676 static int xhci_dbc_start(struct xhci_dbc *dbc)
677 {
678 int ret;
679 unsigned long flags;
680
681 WARN_ON(!dbc);
682
683 pm_runtime_get_sync(dbc->dev); /* note this was self.controller */
684
685 spin_lock_irqsave(&dbc->lock, flags);
686 ret = xhci_do_dbc_start(dbc);
687 spin_unlock_irqrestore(&dbc->lock, flags);
688
689 if (ret) {
690 pm_runtime_put(dbc->dev); /* note this was self.controller */
691 return ret;
692 }
693
694 return mod_delayed_work(system_percpu_wq, &dbc->event_work,
695 msecs_to_jiffies(dbc->poll_interval));
696 }
697
xhci_dbc_stop(struct xhci_dbc * dbc)698 static void xhci_dbc_stop(struct xhci_dbc *dbc)
699 {
700 int ret;
701 unsigned long flags;
702
703 WARN_ON(!dbc);
704
705 switch (dbc->state) {
706 case DS_DISABLED:
707 return;
708 case DS_CONFIGURED:
709 spin_lock(&dbc->lock);
710 xhci_dbc_flush_requests(dbc);
711 spin_unlock(&dbc->lock);
712
713 if (dbc->driver->disconnect)
714 dbc->driver->disconnect(dbc);
715 break;
716 default:
717 break;
718 }
719
720 cancel_delayed_work_sync(&dbc->event_work);
721
722 spin_lock_irqsave(&dbc->lock, flags);
723 ret = xhci_do_dbc_stop(dbc);
724 spin_unlock_irqrestore(&dbc->lock, flags);
725 if (ret)
726 return;
727
728 xhci_dbc_mem_cleanup(dbc);
729 pm_runtime_put_sync(dbc->dev); /* note, was self.controller */
730 }
731
732 static void
handle_ep_halt_changes(struct xhci_dbc * dbc,struct dbc_ep * dep,bool halted)733 handle_ep_halt_changes(struct xhci_dbc *dbc, struct dbc_ep *dep, bool halted)
734 {
735 if (halted) {
736 dev_info(dbc->dev, "DbC Endpoint halted\n");
737 dep->halted = 1;
738
739 } else if (dep->halted) {
740 dev_info(dbc->dev, "DbC Endpoint halt cleared\n");
741 dep->halted = 0;
742
743 if (!list_empty(&dep->list_pending))
744 writel(DBC_DOOR_BELL_TARGET(dep->direction),
745 &dbc->regs->doorbell);
746 }
747 }
748
749 static void
dbc_handle_port_status(struct xhci_dbc * dbc,union xhci_trb * event)750 dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event)
751 {
752 u32 portsc;
753
754 portsc = readl(&dbc->regs->portsc);
755 if (portsc & DBC_PORTSC_CONN_CHANGE)
756 dev_info(dbc->dev, "DbC port connect change\n");
757
758 if (portsc & DBC_PORTSC_RESET_CHANGE)
759 dev_info(dbc->dev, "DbC port reset change\n");
760
761 if (portsc & DBC_PORTSC_LINK_CHANGE)
762 dev_info(dbc->dev, "DbC port link status change\n");
763
764 if (portsc & DBC_PORTSC_CONFIG_CHANGE)
765 dev_info(dbc->dev, "DbC config error change\n");
766
767 /* Port reset change bit will be cleared in other place: */
768 writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
769 }
770
dbc_handle_xfer_event(struct xhci_dbc * dbc,union xhci_trb * event)771 static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
772 {
773 struct dbc_ep *dep;
774 struct xhci_ring *ring;
775 int ep_id;
776 int status;
777 struct xhci_ep_ctx *ep_ctx;
778 u32 comp_code;
779 size_t remain_length;
780 struct dbc_request *req = NULL, *r;
781
782 comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
783 remain_length = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
784 ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
785 dep = (ep_id == EPID_OUT) ?
786 get_out_ep(dbc) : get_in_ep(dbc);
787 ep_ctx = (ep_id == EPID_OUT) ?
788 dbc_bulkout_ctx(dbc) : dbc_bulkin_ctx(dbc);
789 ring = dep->ring;
790
791 /* Match the pending request: */
792 list_for_each_entry(r, &dep->list_pending, list_pending) {
793 if (r->trb_dma == event->trans_event.buffer) {
794 req = r;
795 break;
796 }
797 if (r->status == -COMP_STALL_ERROR) {
798 dev_warn(dbc->dev, "Give back stale stalled req\n");
799 ring->num_trbs_free++;
800 xhci_dbc_giveback(r, 0);
801 }
802 }
803
804 if (!req) {
805 dev_warn(dbc->dev, "no matched request\n");
806 return;
807 }
808
809 trace_xhci_dbc_handle_transfer(ring, &req->trb->generic, req->trb_dma);
810
811 switch (comp_code) {
812 case COMP_SUCCESS:
813 remain_length = 0;
814 fallthrough;
815 case COMP_SHORT_PACKET:
816 status = 0;
817 break;
818 case COMP_TRB_ERROR:
819 case COMP_BABBLE_DETECTED_ERROR:
820 case COMP_USB_TRANSACTION_ERROR:
821 dev_warn(dbc->dev, "tx error %d detected\n", comp_code);
822 status = -comp_code;
823 break;
824 case COMP_STALL_ERROR:
825 dev_warn(dbc->dev, "Stall error at bulk TRB %llx, remaining %zu, ep deq %llx\n",
826 event->trans_event.buffer, remain_length, ep_ctx->deq);
827 status = 0;
828 dep->halted = 1;
829
830 /*
831 * xHC DbC may trigger a STALL bulk xfer event when host sends a
832 * ClearFeature(ENDPOINT_HALT) request even if there wasn't an
833 * active bulk transfer.
834 *
835 * Don't give back this transfer request as hardware will later
836 * start processing TRBs starting from this 'STALLED' TRB,
837 * causing TRBs and requests to be out of sync.
838 *
839 * If STALL event shows some bytes were transferred then assume
840 * it's an actual transfer issue and give back the request.
841 * In this case mark the TRB as No-Op to avoid hw from using the
842 * TRB again.
843 */
844
845 if ((ep_ctx->deq & ~TRB_CYCLE) == event->trans_event.buffer) {
846 dev_dbg(dbc->dev, "Ep stopped on Stalled TRB\n");
847 if (remain_length == req->length) {
848 dev_dbg(dbc->dev, "Spurious stall event, keep req\n");
849 req->status = -COMP_STALL_ERROR;
850 req->actual = 0;
851 return;
852 }
853 dev_dbg(dbc->dev, "Give back stalled req, but turn TRB to No-op\n");
854 trb_to_noop(req->trb);
855 }
856 break;
857
858 default:
859 dev_err(dbc->dev, "unknown tx error %d\n", comp_code);
860 status = -comp_code;
861 break;
862 }
863
864 ring->num_trbs_free++;
865 req->actual = req->length - remain_length;
866 xhci_dbc_giveback(req, status);
867 }
868
inc_evt_deq(struct xhci_ring * ring)869 static void inc_evt_deq(struct xhci_ring *ring)
870 {
871 /* If on the last TRB of the segment go back to the beginning */
872 if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) {
873 ring->cycle_state ^= 1;
874 ring->dequeue = ring->deq_seg->trbs;
875 return;
876 }
877 ring->dequeue++;
878 }
879
xhci_dbc_do_handle_events(struct xhci_dbc * dbc)880 static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
881 {
882 dma_addr_t deq;
883 union xhci_trb *evt;
884 enum evtreturn ret = EVT_DONE;
885 u32 ctrl, portsc;
886 bool update_erdp = false;
887
888 /* DbC state machine: */
889 switch (dbc->state) {
890 case DS_DISABLED:
891 case DS_INITIALIZED:
892
893 return EVT_ERR;
894 case DS_ENABLED:
895 portsc = readl(&dbc->regs->portsc);
896 if (portsc & DBC_PORTSC_CONN_STATUS) {
897 dbc->state = DS_CONNECTED;
898 dev_info(dbc->dev, "DbC connected\n");
899 }
900
901 return EVT_DONE;
902 case DS_CONNECTED:
903 ctrl = readl(&dbc->regs->control);
904 if (ctrl & DBC_CTRL_DBC_RUN) {
905 dbc->state = DS_CONFIGURED;
906 dev_info(dbc->dev, "DbC configured\n");
907 portsc = readl(&dbc->regs->portsc);
908 writel(portsc, &dbc->regs->portsc);
909 ret = EVT_GSER;
910 break;
911 }
912
913 return EVT_DONE;
914 case DS_CONFIGURED:
915 /* Handle cable unplug event: */
916 portsc = readl(&dbc->regs->portsc);
917 if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
918 !(portsc & DBC_PORTSC_CONN_STATUS)) {
919 dev_info(dbc->dev, "DbC cable unplugged\n");
920 dbc->state = DS_ENABLED;
921 xhci_dbc_flush_requests(dbc);
922 xhci_dbc_reinit_ep_rings(dbc);
923 return EVT_DISC;
924 }
925
926 /* Handle debug port reset event: */
927 if (portsc & DBC_PORTSC_RESET_CHANGE) {
928 dev_info(dbc->dev, "DbC port reset\n");
929 writel(portsc, &dbc->regs->portsc);
930 dbc->state = DS_ENABLED;
931 xhci_dbc_flush_requests(dbc);
932 xhci_dbc_reinit_ep_rings(dbc);
933 return EVT_DISC;
934 }
935
936 /* Check and handle changes in endpoint halt status */
937 ctrl = readl(&dbc->regs->control);
938 handle_ep_halt_changes(dbc, get_in_ep(dbc), ctrl & DBC_CTRL_HALT_IN_TR);
939 handle_ep_halt_changes(dbc, get_out_ep(dbc), ctrl & DBC_CTRL_HALT_OUT_TR);
940
941 /* Clear DbC run change bit: */
942 if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
943 writel(ctrl, &dbc->regs->control);
944 ctrl = readl(&dbc->regs->control);
945 }
946 break;
947 default:
948 dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state);
949 break;
950 }
951
952 /* Handle the events in the event ring: */
953 evt = dbc->ring_evt->dequeue;
954 while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
955 dbc->ring_evt->cycle_state) {
956 /*
957 * Add a barrier between reading the cycle flag and any
958 * reads of the event's flags/data below:
959 */
960 rmb();
961
962 trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic,
963 xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
964 dbc->ring_evt->dequeue));
965
966 switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
967 case TRB_TYPE(TRB_PORT_STATUS):
968 dbc_handle_port_status(dbc, evt);
969 break;
970 case TRB_TYPE(TRB_TRANSFER):
971 dbc_handle_xfer_event(dbc, evt);
972 if (ret != EVT_GSER)
973 ret = EVT_XFER_DONE;
974 break;
975 default:
976 break;
977 }
978
979 inc_evt_deq(dbc->ring_evt);
980
981 evt = dbc->ring_evt->dequeue;
982 update_erdp = true;
983 }
984
985 /* Update event ring dequeue pointer: */
986 if (update_erdp) {
987 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
988 dbc->ring_evt->dequeue);
989 lo_hi_writeq(deq, &dbc->regs->erdp);
990 }
991
992 return ret;
993 }
994
xhci_dbc_handle_events(struct work_struct * work)995 static void xhci_dbc_handle_events(struct work_struct *work)
996 {
997 enum evtreturn evtr;
998 struct xhci_dbc *dbc;
999 unsigned long flags;
1000 unsigned int poll_interval;
1001 unsigned long busypoll_timelimit;
1002
1003 dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
1004 poll_interval = dbc->poll_interval;
1005
1006 spin_lock_irqsave(&dbc->lock, flags);
1007 evtr = xhci_dbc_do_handle_events(dbc);
1008 spin_unlock_irqrestore(&dbc->lock, flags);
1009
1010 switch (evtr) {
1011 case EVT_GSER:
1012 if (dbc->driver->configure)
1013 dbc->driver->configure(dbc);
1014 break;
1015 case EVT_DISC:
1016 if (dbc->driver->disconnect)
1017 dbc->driver->disconnect(dbc);
1018 break;
1019 case EVT_DONE:
1020 /*
1021 * Set fast poll rate if there are pending out transfers, or
1022 * a transfer was recently processed
1023 */
1024 busypoll_timelimit = dbc->xfer_timestamp +
1025 msecs_to_jiffies(DBC_XFER_INACTIVITY_TIMEOUT);
1026
1027 if (!list_empty(&dbc->eps[BULK_OUT].list_pending) ||
1028 time_is_after_jiffies(busypoll_timelimit))
1029 poll_interval = 0;
1030 break;
1031 case EVT_XFER_DONE:
1032 dbc->xfer_timestamp = jiffies;
1033 poll_interval = 0;
1034 break;
1035 default:
1036 dev_info(dbc->dev, "stop handling dbc events\n");
1037 return;
1038 }
1039
1040 mod_delayed_work(system_percpu_wq, &dbc->event_work,
1041 msecs_to_jiffies(poll_interval));
1042 }
1043
1044 static const char * const dbc_state_strings[DS_MAX] = {
1045 [DS_DISABLED] = "disabled",
1046 [DS_INITIALIZED] = "initialized",
1047 [DS_ENABLED] = "enabled",
1048 [DS_CONNECTED] = "connected",
1049 [DS_CONFIGURED] = "configured",
1050 };
1051
dbc_show(struct device * dev,struct device_attribute * attr,char * buf)1052 static ssize_t dbc_show(struct device *dev,
1053 struct device_attribute *attr,
1054 char *buf)
1055 {
1056 struct xhci_dbc *dbc;
1057 struct xhci_hcd *xhci;
1058
1059 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1060 dbc = xhci->dbc;
1061
1062 if (dbc->state >= ARRAY_SIZE(dbc_state_strings))
1063 return sysfs_emit(buf, "unknown\n");
1064
1065 return sysfs_emit(buf, "%s\n", dbc_state_strings[dbc->state]);
1066 }
1067
dbc_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1068 static ssize_t dbc_store(struct device *dev,
1069 struct device_attribute *attr,
1070 const char *buf, size_t count)
1071 {
1072 struct xhci_hcd *xhci;
1073 struct xhci_dbc *dbc;
1074
1075 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1076 dbc = xhci->dbc;
1077
1078 if (sysfs_streq(buf, "enable"))
1079 xhci_dbc_start(dbc);
1080 else if (sysfs_streq(buf, "disable"))
1081 xhci_dbc_stop(dbc);
1082 else
1083 return -EINVAL;
1084
1085 return count;
1086 }
1087
dbc_idVendor_show(struct device * dev,struct device_attribute * attr,char * buf)1088 static ssize_t dbc_idVendor_show(struct device *dev,
1089 struct device_attribute *attr,
1090 char *buf)
1091 {
1092 struct xhci_dbc *dbc;
1093 struct xhci_hcd *xhci;
1094
1095 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1096 dbc = xhci->dbc;
1097
1098 return sysfs_emit(buf, "%04x\n", dbc->idVendor);
1099 }
1100
dbc_idVendor_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1101 static ssize_t dbc_idVendor_store(struct device *dev,
1102 struct device_attribute *attr,
1103 const char *buf, size_t size)
1104 {
1105 struct xhci_dbc *dbc;
1106 struct xhci_hcd *xhci;
1107 void __iomem *ptr;
1108 u16 value;
1109 u32 dev_info;
1110 int ret;
1111
1112 ret = kstrtou16(buf, 0, &value);
1113 if (ret)
1114 return ret;
1115
1116 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1117 dbc = xhci->dbc;
1118 if (dbc->state != DS_DISABLED)
1119 return -EBUSY;
1120
1121 dbc->idVendor = value;
1122 ptr = &dbc->regs->devinfo1;
1123 dev_info = readl(ptr);
1124 dev_info = (dev_info & ~(0xffffu << 16)) | (value << 16);
1125 writel(dev_info, ptr);
1126
1127 return size;
1128 }
1129
dbc_idProduct_show(struct device * dev,struct device_attribute * attr,char * buf)1130 static ssize_t dbc_idProduct_show(struct device *dev,
1131 struct device_attribute *attr,
1132 char *buf)
1133 {
1134 struct xhci_dbc *dbc;
1135 struct xhci_hcd *xhci;
1136
1137 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1138 dbc = xhci->dbc;
1139
1140 return sysfs_emit(buf, "%04x\n", dbc->idProduct);
1141 }
1142
dbc_idProduct_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1143 static ssize_t dbc_idProduct_store(struct device *dev,
1144 struct device_attribute *attr,
1145 const char *buf, size_t size)
1146 {
1147 struct xhci_dbc *dbc;
1148 struct xhci_hcd *xhci;
1149 void __iomem *ptr;
1150 u32 dev_info;
1151 u16 value;
1152 int ret;
1153
1154 ret = kstrtou16(buf, 0, &value);
1155 if (ret)
1156 return ret;
1157
1158 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1159 dbc = xhci->dbc;
1160 if (dbc->state != DS_DISABLED)
1161 return -EBUSY;
1162
1163 dbc->idProduct = value;
1164 ptr = &dbc->regs->devinfo2;
1165 dev_info = readl(ptr);
1166 dev_info = (dev_info & ~(0xffffu)) | value;
1167 writel(dev_info, ptr);
1168 return size;
1169 }
1170
dbc_bcdDevice_show(struct device * dev,struct device_attribute * attr,char * buf)1171 static ssize_t dbc_bcdDevice_show(struct device *dev,
1172 struct device_attribute *attr,
1173 char *buf)
1174 {
1175 struct xhci_dbc *dbc;
1176 struct xhci_hcd *xhci;
1177
1178 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1179 dbc = xhci->dbc;
1180
1181 return sysfs_emit(buf, "%04x\n", dbc->bcdDevice);
1182 }
1183
dbc_bcdDevice_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1184 static ssize_t dbc_bcdDevice_store(struct device *dev,
1185 struct device_attribute *attr,
1186 const char *buf, size_t size)
1187 {
1188 struct xhci_dbc *dbc;
1189 struct xhci_hcd *xhci;
1190 void __iomem *ptr;
1191 u32 dev_info;
1192 u16 value;
1193 int ret;
1194
1195 ret = kstrtou16(buf, 0, &value);
1196 if (ret)
1197 return ret;
1198
1199 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1200 dbc = xhci->dbc;
1201 if (dbc->state != DS_DISABLED)
1202 return -EBUSY;
1203
1204 dbc->bcdDevice = value;
1205 ptr = &dbc->regs->devinfo2;
1206 dev_info = readl(ptr);
1207 dev_info = (dev_info & ~(0xffffu << 16)) | (value << 16);
1208 writel(dev_info, ptr);
1209
1210 return size;
1211 }
1212
dbc_manufacturer_show(struct device * dev,struct device_attribute * attr,char * buf)1213 static ssize_t dbc_manufacturer_show(struct device *dev,
1214 struct device_attribute *attr,
1215 char *buf)
1216 {
1217 struct xhci_hcd *xhci = hcd_to_xhci(dev_get_drvdata(dev));
1218 struct xhci_dbc *dbc = xhci->dbc;
1219
1220 return sysfs_emit(buf, "%s\n", dbc->str.manufacturer);
1221 }
1222
dbc_manufacturer_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1223 static ssize_t dbc_manufacturer_store(struct device *dev,
1224 struct device_attribute *attr,
1225 const char *buf, size_t size)
1226 {
1227 struct xhci_hcd *xhci = hcd_to_xhci(dev_get_drvdata(dev));
1228 struct xhci_dbc *dbc = xhci->dbc;
1229 size_t len;
1230
1231 if (dbc->state != DS_DISABLED)
1232 return -EBUSY;
1233
1234 len = strcspn(buf, "\n");
1235 if (!len)
1236 return -EINVAL;
1237
1238 if (len > USB_MAX_STRING_LEN)
1239 return -E2BIG;
1240
1241 memcpy(dbc->str.manufacturer, buf, len);
1242 dbc->str.manufacturer[len] = '\0';
1243
1244 return size;
1245 }
1246
dbc_product_show(struct device * dev,struct device_attribute * attr,char * buf)1247 static ssize_t dbc_product_show(struct device *dev,
1248 struct device_attribute *attr,
1249 char *buf)
1250 {
1251 struct xhci_hcd *xhci = hcd_to_xhci(dev_get_drvdata(dev));
1252 struct xhci_dbc *dbc = xhci->dbc;
1253
1254 return sysfs_emit(buf, "%s\n", dbc->str.product);
1255 }
1256
dbc_product_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1257 static ssize_t dbc_product_store(struct device *dev,
1258 struct device_attribute *attr,
1259 const char *buf, size_t size)
1260 {
1261 struct xhci_hcd *xhci = hcd_to_xhci(dev_get_drvdata(dev));
1262 struct xhci_dbc *dbc = xhci->dbc;
1263 size_t len;
1264
1265 if (dbc->state != DS_DISABLED)
1266 return -EBUSY;
1267
1268 len = strcspn(buf, "\n");
1269 if (!len)
1270 return -EINVAL;
1271
1272 if (len > USB_MAX_STRING_LEN)
1273 return -E2BIG;
1274
1275 memcpy(dbc->str.product, buf, len);
1276 dbc->str.product[len] = '\0';
1277
1278 return size;
1279 }
1280
dbc_serial_show(struct device * dev,struct device_attribute * attr,char * buf)1281 static ssize_t dbc_serial_show(struct device *dev,
1282 struct device_attribute *attr,
1283 char *buf)
1284 {
1285 struct xhci_hcd *xhci = hcd_to_xhci(dev_get_drvdata(dev));
1286 struct xhci_dbc *dbc = xhci->dbc;
1287
1288 return sysfs_emit(buf, "%s\n", dbc->str.serial);
1289 }
1290
dbc_serial_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1291 static ssize_t dbc_serial_store(struct device *dev,
1292 struct device_attribute *attr,
1293 const char *buf, size_t size)
1294 {
1295 struct xhci_hcd *xhci = hcd_to_xhci(dev_get_drvdata(dev));
1296 struct xhci_dbc *dbc = xhci->dbc;
1297 size_t len;
1298
1299 if (dbc->state != DS_DISABLED)
1300 return -EBUSY;
1301
1302 len = strcspn(buf, "\n");
1303 if (!len)
1304 return -EINVAL;
1305
1306 if (len > USB_MAX_STRING_LEN)
1307 return -E2BIG;
1308
1309 memcpy(dbc->str.serial, buf, len);
1310 dbc->str.serial[len] = '\0';
1311
1312 return size;
1313 }
1314
dbc_bInterfaceProtocol_show(struct device * dev,struct device_attribute * attr,char * buf)1315 static ssize_t dbc_bInterfaceProtocol_show(struct device *dev,
1316 struct device_attribute *attr,
1317 char *buf)
1318 {
1319 struct xhci_dbc *dbc;
1320 struct xhci_hcd *xhci;
1321
1322 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1323 dbc = xhci->dbc;
1324
1325 return sysfs_emit(buf, "%02x\n", dbc->bInterfaceProtocol);
1326 }
1327
dbc_bInterfaceProtocol_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1328 static ssize_t dbc_bInterfaceProtocol_store(struct device *dev,
1329 struct device_attribute *attr,
1330 const char *buf, size_t size)
1331 {
1332 struct xhci_dbc *dbc;
1333 struct xhci_hcd *xhci;
1334 void __iomem *ptr;
1335 u32 dev_info;
1336 u8 value;
1337 int ret;
1338
1339 /* bInterfaceProtocol is 8 bit, but... */
1340 ret = kstrtou8(buf, 0, &value);
1341 if (ret)
1342 return ret;
1343
1344 /* ...xhci only supports values 0 and 1 */
1345 if (value > 1)
1346 return -EINVAL;
1347
1348 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1349 dbc = xhci->dbc;
1350 if (dbc->state != DS_DISABLED)
1351 return -EBUSY;
1352
1353 dbc->bInterfaceProtocol = value;
1354 ptr = &dbc->regs->devinfo1;
1355 dev_info = readl(ptr);
1356 dev_info = (dev_info & ~(0xffu)) | value;
1357 writel(dev_info, ptr);
1358
1359 return size;
1360 }
1361
dbc_poll_interval_ms_show(struct device * dev,struct device_attribute * attr,char * buf)1362 static ssize_t dbc_poll_interval_ms_show(struct device *dev,
1363 struct device_attribute *attr,
1364 char *buf)
1365 {
1366 struct xhci_dbc *dbc;
1367 struct xhci_hcd *xhci;
1368
1369 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1370 dbc = xhci->dbc;
1371
1372 return sysfs_emit(buf, "%u\n", dbc->poll_interval);
1373 }
1374
dbc_poll_interval_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1375 static ssize_t dbc_poll_interval_ms_store(struct device *dev,
1376 struct device_attribute *attr,
1377 const char *buf, size_t size)
1378 {
1379 struct xhci_dbc *dbc;
1380 struct xhci_hcd *xhci;
1381 u32 value;
1382 int ret;
1383
1384 ret = kstrtou32(buf, 0, &value);
1385 if (ret || value > DBC_POLL_INTERVAL_MAX)
1386 return -EINVAL;
1387
1388 xhci = hcd_to_xhci(dev_get_drvdata(dev));
1389 dbc = xhci->dbc;
1390
1391 dbc->poll_interval = value;
1392
1393 mod_delayed_work(system_percpu_wq, &dbc->event_work, 0);
1394
1395 return size;
1396 }
1397
1398 static DEVICE_ATTR_RW(dbc);
1399 static DEVICE_ATTR_RW(dbc_idVendor);
1400 static DEVICE_ATTR_RW(dbc_idProduct);
1401 static DEVICE_ATTR_RW(dbc_bcdDevice);
1402 static DEVICE_ATTR_RW(dbc_serial);
1403 static DEVICE_ATTR_RW(dbc_product);
1404 static DEVICE_ATTR_RW(dbc_manufacturer);
1405 static DEVICE_ATTR_RW(dbc_bInterfaceProtocol);
1406 static DEVICE_ATTR_RW(dbc_poll_interval_ms);
1407
1408 static struct attribute *dbc_dev_attrs[] = {
1409 &dev_attr_dbc.attr,
1410 &dev_attr_dbc_idVendor.attr,
1411 &dev_attr_dbc_idProduct.attr,
1412 &dev_attr_dbc_bcdDevice.attr,
1413 &dev_attr_dbc_serial.attr,
1414 &dev_attr_dbc_product.attr,
1415 &dev_attr_dbc_manufacturer.attr,
1416 &dev_attr_dbc_bInterfaceProtocol.attr,
1417 &dev_attr_dbc_poll_interval_ms.attr,
1418 NULL
1419 };
1420 ATTRIBUTE_GROUPS(dbc_dev);
1421
1422 struct xhci_dbc *
xhci_alloc_dbc(struct device * dev,void __iomem * base,const struct dbc_driver * driver)1423 xhci_alloc_dbc(struct device *dev, void __iomem *base, const struct dbc_driver *driver)
1424 {
1425 struct xhci_dbc *dbc;
1426 int ret;
1427
1428 dbc = kzalloc_obj(*dbc);
1429 if (!dbc)
1430 return NULL;
1431
1432 dbc->regs = base;
1433 dbc->dev = dev;
1434 dbc->driver = driver;
1435 dbc->idProduct = DBC_PRODUCT_ID;
1436 dbc->idVendor = DBC_VENDOR_ID;
1437 dbc->bcdDevice = DBC_DEVICE_REV;
1438 dbc->bInterfaceProtocol = DBC_PROTOCOL;
1439 dbc->poll_interval = DBC_POLL_INTERVAL_DEFAULT;
1440
1441 /* initialize serial, product and manufacturer with default values */
1442 dbc->str = dbc_str_default;
1443
1444 if (readl(&dbc->regs->control) & DBC_CTRL_DBC_ENABLE)
1445 goto err;
1446
1447 INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
1448 spin_lock_init(&dbc->lock);
1449
1450 ret = sysfs_create_groups(&dev->kobj, dbc_dev_groups);
1451 if (ret)
1452 goto err;
1453
1454 return dbc;
1455 err:
1456 kfree(dbc);
1457 return NULL;
1458 }
1459
1460 /* undo what xhci_alloc_dbc() did */
xhci_dbc_remove(struct xhci_dbc * dbc)1461 void xhci_dbc_remove(struct xhci_dbc *dbc)
1462 {
1463 if (!dbc)
1464 return;
1465 /* stop hw, stop wq and call dbc->ops->stop() */
1466 xhci_dbc_stop(dbc);
1467
1468 /* remove sysfs files */
1469 sysfs_remove_groups(&dbc->dev->kobj, dbc_dev_groups);
1470
1471 kfree(dbc);
1472 }
1473
1474
xhci_create_dbc_dev(struct xhci_hcd * xhci)1475 int xhci_create_dbc_dev(struct xhci_hcd *xhci)
1476 {
1477 struct device *dev;
1478 void __iomem *base;
1479 int ret;
1480 int dbc_cap_offs;
1481
1482 /* create all parameters needed resembling a dbc device */
1483 dev = xhci_to_hcd(xhci)->self.controller;
1484 base = &xhci->cap_regs->hc_capbase;
1485
1486 dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
1487 if (!dbc_cap_offs)
1488 return -ENODEV;
1489
1490 /* already allocated and in use */
1491 if (xhci->dbc)
1492 return -EBUSY;
1493
1494 ret = xhci_dbc_tty_probe(dev, base + dbc_cap_offs, xhci);
1495
1496 return ret;
1497 }
1498
xhci_remove_dbc_dev(struct xhci_hcd * xhci)1499 void xhci_remove_dbc_dev(struct xhci_hcd *xhci)
1500 {
1501 unsigned long flags;
1502
1503 if (!xhci->dbc)
1504 return;
1505
1506 xhci_dbc_tty_remove(xhci->dbc);
1507 spin_lock_irqsave(&xhci->lock, flags);
1508 xhci->dbc = NULL;
1509 spin_unlock_irqrestore(&xhci->lock, flags);
1510 }
1511
1512 #ifdef CONFIG_PM
xhci_dbc_suspend(struct xhci_hcd * xhci)1513 int xhci_dbc_suspend(struct xhci_hcd *xhci)
1514 {
1515 struct xhci_dbc *dbc = xhci->dbc;
1516
1517 if (!dbc)
1518 return 0;
1519
1520 switch (dbc->state) {
1521 case DS_ENABLED:
1522 case DS_CONNECTED:
1523 case DS_CONFIGURED:
1524 dbc->resume_required = 1;
1525 break;
1526 default:
1527 break;
1528 }
1529
1530 xhci_dbc_stop(dbc);
1531
1532 return 0;
1533 }
1534
xhci_dbc_resume(struct xhci_hcd * xhci)1535 int xhci_dbc_resume(struct xhci_hcd *xhci)
1536 {
1537 int ret = 0;
1538 struct xhci_dbc *dbc = xhci->dbc;
1539
1540 if (!dbc)
1541 return 0;
1542
1543 if (dbc->resume_required) {
1544 dbc->resume_required = 0;
1545 xhci_dbc_start(dbc);
1546 }
1547
1548 return ret;
1549 }
1550 #endif /* CONFIG_PM */
1551
xhci_dbc_init(void)1552 int xhci_dbc_init(void)
1553 {
1554 return dbc_tty_init();
1555 }
1556
xhci_dbc_exit(void)1557 void xhci_dbc_exit(void)
1558 {
1559 dbc_tty_exit();
1560 }
1561