Lines Matching +full:generic +full:- +full:xhci
1 // SPDX-License-Identifier: GPL-2.0
3 * xhci-dbgcap.c - xHCI debug capability support
11 #include <linux/dma-mapping.h>
24 #include <linux/io-64-nonatomic-lo-hi.h>
28 #include "xhci.h"
29 #include "xhci-trace.h"
30 #include "xhci-dbgcap.h"
36 dma_free_coherent(dev, ctx->size, ctx->bytes, ctx->dma); in dbc_free_ctx()
46 if (ring->first_seg) { in dbc_ring_free()
48 ring->first_seg->trbs, in dbc_ring_free()
49 ring->first_seg->dma); in dbc_ring_free()
50 kfree(ring->first_seg); in dbc_ring_free()
61 s_desc = (struct usb_string_descriptor *)strings->serial; in xhci_dbc_populate_strings()
63 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData, in xhci_dbc_populate_strings()
66 s_desc->bLength = (strlen(DBC_STRING_SERIAL) + 1) * 2; in xhci_dbc_populate_strings()
67 s_desc->bDescriptorType = USB_DT_STRING; in xhci_dbc_populate_strings()
68 string_length = s_desc->bLength; in xhci_dbc_populate_strings()
72 s_desc = (struct usb_string_descriptor *)strings->product; in xhci_dbc_populate_strings()
74 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData, in xhci_dbc_populate_strings()
77 s_desc->bLength = (strlen(DBC_STRING_PRODUCT) + 1) * 2; in xhci_dbc_populate_strings()
78 s_desc->bDescriptorType = USB_DT_STRING; in xhci_dbc_populate_strings()
79 string_length += s_desc->bLength; in xhci_dbc_populate_strings()
83 s_desc = (struct usb_string_descriptor *)strings->manufacturer; in xhci_dbc_populate_strings()
86 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData, in xhci_dbc_populate_strings()
89 s_desc->bLength = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2; in xhci_dbc_populate_strings()
90 s_desc->bDescriptorType = USB_DT_STRING; in xhci_dbc_populate_strings()
91 string_length += s_desc->bLength; in xhci_dbc_populate_strings()
95 strings->string0[0] = 4; in xhci_dbc_populate_strings()
96 strings->string0[1] = USB_DT_STRING; in xhci_dbc_populate_strings()
97 strings->string0[2] = 0x09; in xhci_dbc_populate_strings()
98 strings->string0[3] = 0x04; in xhci_dbc_populate_strings()
110 max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control)); in xhci_dbc_init_ep_contexts()
115 ep_ctx->ep_info = 0; in xhci_dbc_init_ep_contexts()
116 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst); in xhci_dbc_init_ep_contexts()
117 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state); in xhci_dbc_init_ep_contexts()
122 ep_ctx->ep_info = 0; in xhci_dbc_init_ep_contexts()
123 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst); in xhci_dbc_init_ep_contexts()
124 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state); in xhci_dbc_init_ep_contexts()
137 info = (struct dbc_info_context *)dbc->ctx->bytes; in xhci_dbc_init_contexts()
138 dma = dbc->string_dma; in xhci_dbc_init_contexts()
139 info->string0 = cpu_to_le64(dma); in xhci_dbc_init_contexts()
140 info->manufacturer = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH); in xhci_dbc_init_contexts()
141 info->product = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2); in xhci_dbc_init_contexts()
142 info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3); in xhci_dbc_init_contexts()
143 info->length = cpu_to_le32(string_length); in xhci_dbc_init_contexts()
149 lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp); in xhci_dbc_init_contexts()
151 dev_info = (dbc->idVendor << 16) | dbc->bInterfaceProtocol; in xhci_dbc_init_contexts()
152 writel(dev_info, &dbc->regs->devinfo1); in xhci_dbc_init_contexts()
154 dev_info = (dbc->bcdDevice << 16) | dbc->idProduct; in xhci_dbc_init_contexts()
155 writel(dev_info, &dbc->regs->devinfo2); in xhci_dbc_init_contexts()
159 __releases(&dbc->lock) in xhci_dbc_giveback()
160 __acquires(&dbc->lock) in xhci_dbc_giveback()
162 struct xhci_dbc *dbc = req->dbc; in xhci_dbc_giveback()
163 struct device *dev = dbc->dev; in xhci_dbc_giveback()
165 list_del_init(&req->list_pending); in xhci_dbc_giveback()
166 req->trb_dma = 0; in xhci_dbc_giveback()
167 req->trb = NULL; in xhci_dbc_giveback()
169 if (req->status == -EINPROGRESS) in xhci_dbc_giveback()
170 req->status = status; in xhci_dbc_giveback()
175 req->dma, in xhci_dbc_giveback()
176 req->length, in xhci_dbc_giveback()
180 spin_unlock(&dbc->lock); in xhci_dbc_giveback()
181 req->complete(dbc, req); in xhci_dbc_giveback()
182 spin_lock(&dbc->lock); in xhci_dbc_giveback()
187 trb->generic.field[0] = 0; in trb_to_noop()
188 trb->generic.field[1] = 0; in trb_to_noop()
189 trb->generic.field[2] = 0; in trb_to_noop()
190 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); in trb_to_noop()
191 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP)); in trb_to_noop()
196 trb_to_noop(req->trb); in xhci_dbc_flush_single_request()
197 xhci_dbc_giveback(req, -ESHUTDOWN); in xhci_dbc_flush_single_request()
204 list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending) in xhci_dbc_flush_endpoint_requests()
210 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]); in xhci_dbc_flush_requests()
211 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]); in xhci_dbc_flush_requests()
230 req->dbc = dbc; in dbc_alloc_request()
231 INIT_LIST_HEAD(&req->list_pending); in dbc_alloc_request()
232 INIT_LIST_HEAD(&req->list_pool); in dbc_alloc_request()
233 req->direction = direction; in dbc_alloc_request()
254 trb = ring->enqueue; in xhci_dbc_queue_trb()
255 trb->generic.field[0] = cpu_to_le32(field1); in xhci_dbc_queue_trb()
256 trb->generic.field[1] = cpu_to_le32(field2); in xhci_dbc_queue_trb()
257 trb->generic.field[2] = cpu_to_le32(field3); in xhci_dbc_queue_trb()
258 trb->generic.field[3] = cpu_to_le32(field4); in xhci_dbc_queue_trb()
260 trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic, in xhci_dbc_queue_trb()
261 xhci_trb_virt_to_dma(ring->enq_seg, in xhci_dbc_queue_trb()
262 ring->enqueue)); in xhci_dbc_queue_trb()
263 ring->num_trbs_free--; in xhci_dbc_queue_trb()
264 next = ++(ring->enqueue); in xhci_dbc_queue_trb()
265 if (TRB_TYPE_LINK_LE32(next->link.control)) { in xhci_dbc_queue_trb()
266 next->link.control ^= cpu_to_le32(TRB_CYCLE); in xhci_dbc_queue_trb()
267 ring->enqueue = ring->enq_seg->trbs; in xhci_dbc_queue_trb()
268 ring->cycle_state ^= 1; in xhci_dbc_queue_trb()
278 struct xhci_dbc *dbc = req->dbc; in xhci_dbc_queue_bulk_tx()
279 struct xhci_ring *ring = dep->ring; in xhci_dbc_queue_bulk_tx()
282 num_trbs = count_trbs(req->dma, req->length); in xhci_dbc_queue_bulk_tx()
284 if (ring->num_trbs_free < num_trbs) in xhci_dbc_queue_bulk_tx()
285 return -EBUSY; in xhci_dbc_queue_bulk_tx()
287 addr = req->dma; in xhci_dbc_queue_bulk_tx()
288 trb = ring->enqueue; in xhci_dbc_queue_bulk_tx()
289 cycle = ring->cycle_state; in xhci_dbc_queue_bulk_tx()
290 length = TRB_LEN(req->length); in xhci_dbc_queue_bulk_tx()
298 req->trb = ring->enqueue; in xhci_dbc_queue_bulk_tx()
299 req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue); in xhci_dbc_queue_bulk_tx()
312 trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE); in xhci_dbc_queue_bulk_tx()
314 trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE); in xhci_dbc_queue_bulk_tx()
316 writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell); in xhci_dbc_queue_bulk_tx()
325 struct xhci_dbc *dbc = req->dbc; in dbc_ep_do_queue()
326 struct device *dev = dbc->dev; in dbc_ep_do_queue()
327 struct dbc_ep *dep = &dbc->eps[req->direction]; in dbc_ep_do_queue()
329 if (!req->length || !req->buf) in dbc_ep_do_queue()
330 return -EINVAL; in dbc_ep_do_queue()
332 req->actual = 0; in dbc_ep_do_queue()
333 req->status = -EINPROGRESS; in dbc_ep_do_queue()
335 req->dma = dma_map_single(dev, in dbc_ep_do_queue()
336 req->buf, in dbc_ep_do_queue()
337 req->length, in dbc_ep_do_queue()
339 if (dma_mapping_error(dev, req->dma)) { in dbc_ep_do_queue()
340 dev_err(dbc->dev, "failed to map buffer\n"); in dbc_ep_do_queue()
341 return -EFAULT; in dbc_ep_do_queue()
346 dev_err(dbc->dev, "failed to queue trbs\n"); in dbc_ep_do_queue()
348 req->dma, in dbc_ep_do_queue()
349 req->length, in dbc_ep_do_queue()
351 return -EFAULT; in dbc_ep_do_queue()
354 list_add_tail(&req->list_pending, &dep->list_pending); in dbc_ep_do_queue()
362 struct xhci_dbc *dbc = req->dbc; in dbc_ep_queue()
363 int ret = -ESHUTDOWN; in dbc_ep_queue()
366 return -ENODEV; in dbc_ep_queue()
368 if (req->direction != BULK_IN && in dbc_ep_queue()
369 req->direction != BULK_OUT) in dbc_ep_queue()
370 return -EINVAL; in dbc_ep_queue()
372 spin_lock_irqsave(&dbc->lock, flags); in dbc_ep_queue()
373 if (dbc->state == DS_CONFIGURED) in dbc_ep_queue()
375 spin_unlock_irqrestore(&dbc->lock, flags); in dbc_ep_queue()
377 mod_delayed_work(system_wq, &dbc->event_work, 0); in dbc_ep_queue()
388 dep = &dbc->eps[direction]; in xhci_dbc_do_eps_init()
389 dep->dbc = dbc; in xhci_dbc_do_eps_init()
390 dep->direction = direction; in xhci_dbc_do_eps_init()
391 dep->ring = direction ? dbc->ring_in : dbc->ring_out; in xhci_dbc_do_eps_init()
393 INIT_LIST_HEAD(&dep->list_pending); in xhci_dbc_do_eps_init()
404 memset(dbc->eps, 0, sizeof_field(struct xhci_dbc, eps)); in xhci_dbc_eps_exit()
410 erst->entries = dma_alloc_coherent(dev, sizeof(*erst->entries), in dbc_erst_alloc()
411 &erst->erst_dma_addr, flags); in dbc_erst_alloc()
412 if (!erst->entries) in dbc_erst_alloc()
413 return -ENOMEM; in dbc_erst_alloc()
415 erst->num_entries = 1; in dbc_erst_alloc()
416 erst->entries[0].seg_addr = cpu_to_le64(evt_ring->first_seg->dma); in dbc_erst_alloc()
417 erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT); in dbc_erst_alloc()
418 erst->entries[0].rsvd = 0; in dbc_erst_alloc()
424 dma_free_coherent(dev, sizeof(*erst->entries), erst->entries, in dbc_erst_free()
425 erst->erst_dma_addr); in dbc_erst_free()
426 erst->entries = NULL; in dbc_erst_free()
438 /* xhci 7.6.9, all three contexts; info, ep-out and ep-in. Each 64 bytes*/ in dbc_alloc_ctx()
439 ctx->size = 3 * DBC_CONTEXT_SIZE; in dbc_alloc_ctx()
440 ctx->bytes = dma_alloc_coherent(dev, ctx->size, &ctx->dma, flags); in dbc_alloc_ctx()
441 if (!ctx->bytes) { in dbc_alloc_ctx()
450 struct xhci_segment *seg = ring->first_seg; in xhci_dbc_ring_init()
453 memset(seg->trbs, 0, TRB_SEGMENT_SIZE); in xhci_dbc_ring_init()
456 if (ring->type != TYPE_EVENT) { in xhci_dbc_ring_init()
457 union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1]; in xhci_dbc_ring_init()
459 trb->link.segment_ptr = cpu_to_le64(ring->first_seg->dma); in xhci_dbc_ring_init()
460 trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK)); in xhci_dbc_ring_init()
467 struct xhci_ring *in_ring = dbc->eps[BULK_IN].ring; in xhci_dbc_reinit_ep_rings()
468 struct xhci_ring *out_ring = dbc->eps[BULK_OUT].ring; in xhci_dbc_reinit_ep_rings()
470 if (!in_ring || !out_ring || !dbc->ctx) { in xhci_dbc_reinit_ep_rings()
471 dev_warn(dbc->dev, "Can't re-init unallocated endpoints\n"); in xhci_dbc_reinit_ep_rings()
472 return -ENODEV; in xhci_dbc_reinit_ep_rings()
495 ring->num_segs = 1; in xhci_dbc_ring_alloc()
496 ring->type = type; in xhci_dbc_ring_alloc()
502 ring->first_seg = seg; in xhci_dbc_ring_alloc()
503 ring->last_seg = seg; in xhci_dbc_ring_alloc()
504 seg->next = seg; in xhci_dbc_ring_alloc()
506 seg->trbs = dma_alloc_coherent(dev, TRB_SEGMENT_SIZE, &dma, flags); in xhci_dbc_ring_alloc()
507 if (!seg->trbs) in xhci_dbc_ring_alloc()
510 seg->dma = dma; in xhci_dbc_ring_alloc()
512 INIT_LIST_HEAD(&ring->td_list); in xhci_dbc_ring_alloc()
529 struct device *dev = dbc->dev; in xhci_dbc_mem_init()
532 dbc->ring_evt = xhci_dbc_ring_alloc(dev, TYPE_EVENT, flags); in xhci_dbc_mem_init()
533 if (!dbc->ring_evt) in xhci_dbc_mem_init()
536 dbc->ring_in = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags); in xhci_dbc_mem_init()
537 if (!dbc->ring_in) in xhci_dbc_mem_init()
540 dbc->ring_out = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags); in xhci_dbc_mem_init()
541 if (!dbc->ring_out) in xhci_dbc_mem_init()
545 ret = dbc_erst_alloc(dev, dbc->ring_evt, &dbc->erst, flags); in xhci_dbc_mem_init()
550 dbc->ctx = dbc_alloc_ctx(dev, flags); /* was sysdev, and is still */ in xhci_dbc_mem_init()
551 if (!dbc->ctx) in xhci_dbc_mem_init()
555 dbc->string_size = sizeof(*dbc->string); in xhci_dbc_mem_init()
556 dbc->string = dma_alloc_coherent(dev, dbc->string_size, in xhci_dbc_mem_init()
557 &dbc->string_dma, flags); in xhci_dbc_mem_init()
558 if (!dbc->string) in xhci_dbc_mem_init()
562 writel(dbc->erst.num_entries, &dbc->regs->ersts); in xhci_dbc_mem_init()
564 lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba); in xhci_dbc_mem_init()
565 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg, in xhci_dbc_mem_init()
566 dbc->ring_evt->dequeue); in xhci_dbc_mem_init()
567 lo_hi_writeq(deq, &dbc->regs->erdp); in xhci_dbc_mem_init()
570 string_length = xhci_dbc_populate_strings(dbc->string); in xhci_dbc_mem_init()
574 dbc->state = DS_INITIALIZED; in xhci_dbc_mem_init()
579 dbc_free_ctx(dev, dbc->ctx); in xhci_dbc_mem_init()
580 dbc->ctx = NULL; in xhci_dbc_mem_init()
582 dbc_erst_free(dev, &dbc->erst); in xhci_dbc_mem_init()
584 dbc_ring_free(dev, dbc->ring_out); in xhci_dbc_mem_init()
585 dbc->ring_out = NULL; in xhci_dbc_mem_init()
587 dbc_ring_free(dev, dbc->ring_in); in xhci_dbc_mem_init()
588 dbc->ring_in = NULL; in xhci_dbc_mem_init()
590 dbc_ring_free(dev, dbc->ring_evt); in xhci_dbc_mem_init()
591 dbc->ring_evt = NULL; in xhci_dbc_mem_init()
593 return -ENOMEM; in xhci_dbc_mem_init()
603 dma_free_coherent(dbc->dev, dbc->string_size, dbc->string, dbc->string_dma); in xhci_dbc_mem_cleanup()
604 dbc->string = NULL; in xhci_dbc_mem_cleanup()
606 dbc_free_ctx(dbc->dev, dbc->ctx); in xhci_dbc_mem_cleanup()
607 dbc->ctx = NULL; in xhci_dbc_mem_cleanup()
609 dbc_erst_free(dbc->dev, &dbc->erst); in xhci_dbc_mem_cleanup()
610 dbc_ring_free(dbc->dev, dbc->ring_out); in xhci_dbc_mem_cleanup()
611 dbc_ring_free(dbc->dev, dbc->ring_in); in xhci_dbc_mem_cleanup()
612 dbc_ring_free(dbc->dev, dbc->ring_evt); in xhci_dbc_mem_cleanup()
613 dbc->ring_in = NULL; in xhci_dbc_mem_cleanup()
614 dbc->ring_out = NULL; in xhci_dbc_mem_cleanup()
615 dbc->ring_evt = NULL; in xhci_dbc_mem_cleanup()
623 if (dbc->state != DS_DISABLED) in xhci_do_dbc_start()
624 return -EINVAL; in xhci_do_dbc_start()
626 writel(0, &dbc->regs->control); in xhci_do_dbc_start()
627 ret = xhci_handshake(&dbc->regs->control, in xhci_do_dbc_start()
637 ctrl = readl(&dbc->regs->control); in xhci_do_dbc_start()
639 &dbc->regs->control); in xhci_do_dbc_start()
640 ret = xhci_handshake(&dbc->regs->control, in xhci_do_dbc_start()
646 dbc->state = DS_ENABLED; in xhci_do_dbc_start()
653 if (dbc->state == DS_DISABLED) in xhci_do_dbc_stop()
654 return -EINVAL; in xhci_do_dbc_stop()
656 writel(0, &dbc->regs->control); in xhci_do_dbc_stop()
657 dbc->state = DS_DISABLED; in xhci_do_dbc_stop()
669 pm_runtime_get_sync(dbc->dev); /* note this was self.controller */ in xhci_dbc_start()
671 spin_lock_irqsave(&dbc->lock, flags); in xhci_dbc_start()
673 spin_unlock_irqrestore(&dbc->lock, flags); in xhci_dbc_start()
676 pm_runtime_put(dbc->dev); /* note this was self.controller */ in xhci_dbc_start()
680 return mod_delayed_work(system_wq, &dbc->event_work, in xhci_dbc_start()
681 msecs_to_jiffies(dbc->poll_interval)); in xhci_dbc_start()
691 switch (dbc->state) { in xhci_dbc_stop()
695 spin_lock(&dbc->lock); in xhci_dbc_stop()
697 spin_unlock(&dbc->lock); in xhci_dbc_stop()
699 if (dbc->driver->disconnect) in xhci_dbc_stop()
700 dbc->driver->disconnect(dbc); in xhci_dbc_stop()
706 cancel_delayed_work_sync(&dbc->event_work); in xhci_dbc_stop()
708 spin_lock_irqsave(&dbc->lock, flags); in xhci_dbc_stop()
710 spin_unlock_irqrestore(&dbc->lock, flags); in xhci_dbc_stop()
715 pm_runtime_put_sync(dbc->dev); /* note, was self.controller */ in xhci_dbc_stop()
722 dev_info(dbc->dev, "DbC Endpoint halted\n"); in handle_ep_halt_changes()
723 dep->halted = 1; in handle_ep_halt_changes()
725 } else if (dep->halted) { in handle_ep_halt_changes()
726 dev_info(dbc->dev, "DbC Endpoint halt cleared\n"); in handle_ep_halt_changes()
727 dep->halted = 0; in handle_ep_halt_changes()
729 if (!list_empty(&dep->list_pending)) in handle_ep_halt_changes()
730 writel(DBC_DOOR_BELL_TARGET(dep->direction), in handle_ep_halt_changes()
731 &dbc->regs->doorbell); in handle_ep_halt_changes()
740 portsc = readl(&dbc->regs->portsc); in dbc_handle_port_status()
742 dev_info(dbc->dev, "DbC port connect change\n"); in dbc_handle_port_status()
745 dev_info(dbc->dev, "DbC port reset change\n"); in dbc_handle_port_status()
748 dev_info(dbc->dev, "DbC port link status change\n"); in dbc_handle_port_status()
751 dev_info(dbc->dev, "DbC config error change\n"); in dbc_handle_port_status()
754 writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc); in dbc_handle_port_status()
768 comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2])); in dbc_handle_xfer_event()
769 remain_length = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2])); in dbc_handle_xfer_event()
770 ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3])); in dbc_handle_xfer_event()
775 ring = dep->ring; in dbc_handle_xfer_event()
778 list_for_each_entry(r, &dep->list_pending, list_pending) { in dbc_handle_xfer_event()
779 if (r->trb_dma == event->trans_event.buffer) { in dbc_handle_xfer_event()
783 if (r->status == -COMP_STALL_ERROR) { in dbc_handle_xfer_event()
784 dev_warn(dbc->dev, "Give back stale stalled req\n"); in dbc_handle_xfer_event()
785 ring->num_trbs_free++; in dbc_handle_xfer_event()
791 dev_warn(dbc->dev, "no matched request\n"); in dbc_handle_xfer_event()
795 trace_xhci_dbc_handle_transfer(ring, &req->trb->generic, req->trb_dma); in dbc_handle_xfer_event()
807 dev_warn(dbc->dev, "tx error %d detected\n", comp_code); in dbc_handle_xfer_event()
808 status = -comp_code; in dbc_handle_xfer_event()
811 dev_warn(dbc->dev, "Stall error at bulk TRB %llx, remaining %zu, ep deq %llx\n", in dbc_handle_xfer_event()
812 event->trans_event.buffer, remain_length, ep_ctx->deq); in dbc_handle_xfer_event()
814 dep->halted = 1; in dbc_handle_xfer_event()
827 * In this case mark the TRB as No-Op to avoid hw from using the in dbc_handle_xfer_event()
831 if ((ep_ctx->deq & ~TRB_CYCLE) == event->trans_event.buffer) { in dbc_handle_xfer_event()
832 dev_dbg(dbc->dev, "Ep stopped on Stalled TRB\n"); in dbc_handle_xfer_event()
833 if (remain_length == req->length) { in dbc_handle_xfer_event()
834 dev_dbg(dbc->dev, "Spurious stall event, keep req\n"); in dbc_handle_xfer_event()
835 req->status = -COMP_STALL_ERROR; in dbc_handle_xfer_event()
836 req->actual = 0; in dbc_handle_xfer_event()
839 dev_dbg(dbc->dev, "Give back stalled req, but turn TRB to No-op\n"); in dbc_handle_xfer_event()
840 trb_to_noop(req->trb); in dbc_handle_xfer_event()
845 dev_err(dbc->dev, "unknown tx error %d\n", comp_code); in dbc_handle_xfer_event()
846 status = -comp_code; in dbc_handle_xfer_event()
850 ring->num_trbs_free++; in dbc_handle_xfer_event()
851 req->actual = req->length - remain_length; in dbc_handle_xfer_event()
858 if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) { in inc_evt_deq()
859 ring->cycle_state ^= 1; in inc_evt_deq()
860 ring->dequeue = ring->deq_seg->trbs; in inc_evt_deq()
863 ring->dequeue++; in inc_evt_deq()
875 switch (dbc->state) { in xhci_dbc_do_handle_events()
881 portsc = readl(&dbc->regs->portsc); in xhci_dbc_do_handle_events()
883 dbc->state = DS_CONNECTED; in xhci_dbc_do_handle_events()
884 dev_info(dbc->dev, "DbC connected\n"); in xhci_dbc_do_handle_events()
889 ctrl = readl(&dbc->regs->control); in xhci_dbc_do_handle_events()
891 dbc->state = DS_CONFIGURED; in xhci_dbc_do_handle_events()
892 dev_info(dbc->dev, "DbC configured\n"); in xhci_dbc_do_handle_events()
893 portsc = readl(&dbc->regs->portsc); in xhci_dbc_do_handle_events()
894 writel(portsc, &dbc->regs->portsc); in xhci_dbc_do_handle_events()
901 portsc = readl(&dbc->regs->portsc); in xhci_dbc_do_handle_events()
904 dev_info(dbc->dev, "DbC cable unplugged\n"); in xhci_dbc_do_handle_events()
905 dbc->state = DS_ENABLED; in xhci_dbc_do_handle_events()
913 dev_info(dbc->dev, "DbC port reset\n"); in xhci_dbc_do_handle_events()
914 writel(portsc, &dbc->regs->portsc); in xhci_dbc_do_handle_events()
915 dbc->state = DS_ENABLED; in xhci_dbc_do_handle_events()
922 ctrl = readl(&dbc->regs->control); in xhci_dbc_do_handle_events()
928 writel(ctrl, &dbc->regs->control); in xhci_dbc_do_handle_events()
929 ctrl = readl(&dbc->regs->control); in xhci_dbc_do_handle_events()
933 dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state); in xhci_dbc_do_handle_events()
938 evt = dbc->ring_evt->dequeue; in xhci_dbc_do_handle_events()
939 while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) == in xhci_dbc_do_handle_events()
940 dbc->ring_evt->cycle_state) { in xhci_dbc_do_handle_events()
947 trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic, in xhci_dbc_do_handle_events()
948 xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg, in xhci_dbc_do_handle_events()
949 dbc->ring_evt->dequeue)); in xhci_dbc_do_handle_events()
951 switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) { in xhci_dbc_do_handle_events()
963 inc_evt_deq(dbc->ring_evt); in xhci_dbc_do_handle_events()
965 evt = dbc->ring_evt->dequeue; in xhci_dbc_do_handle_events()
971 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg, in xhci_dbc_do_handle_events()
972 dbc->ring_evt->dequeue); in xhci_dbc_do_handle_events()
973 lo_hi_writeq(deq, &dbc->regs->erdp); in xhci_dbc_do_handle_events()
988 poll_interval = dbc->poll_interval; in xhci_dbc_handle_events()
990 spin_lock_irqsave(&dbc->lock, flags); in xhci_dbc_handle_events()
992 spin_unlock_irqrestore(&dbc->lock, flags); in xhci_dbc_handle_events()
996 if (dbc->driver->configure) in xhci_dbc_handle_events()
997 dbc->driver->configure(dbc); in xhci_dbc_handle_events()
1000 if (dbc->driver->disconnect) in xhci_dbc_handle_events()
1001 dbc->driver->disconnect(dbc); in xhci_dbc_handle_events()
1008 busypoll_timelimit = dbc->xfer_timestamp + in xhci_dbc_handle_events()
1011 if (!list_empty(&dbc->eps[BULK_OUT].list_pending) || in xhci_dbc_handle_events()
1016 dbc->xfer_timestamp = jiffies; in xhci_dbc_handle_events()
1020 dev_info(dbc->dev, "stop handling dbc events\n"); in xhci_dbc_handle_events()
1024 mod_delayed_work(system_wq, &dbc->event_work, in xhci_dbc_handle_events()
1041 struct xhci_hcd *xhci; in dbc_show() local
1043 xhci = hcd_to_xhci(dev_get_drvdata(dev)); in dbc_show()
1044 dbc = xhci->dbc; in dbc_show()
1046 if (dbc->state >= ARRAY_SIZE(dbc_state_strings)) in dbc_show()
1049 return sysfs_emit(buf, "%s\n", dbc_state_strings[dbc->state]); in dbc_show()
1056 struct xhci_hcd *xhci; in dbc_store() local
1059 xhci = hcd_to_xhci(dev_get_drvdata(dev)); in dbc_store()
1060 dbc = xhci->dbc; in dbc_store()
1067 return -EINVAL; in dbc_store()
1077 struct xhci_hcd *xhci; in dbc_idVendor_show() local
1079 xhci = hcd_to_xhci(dev_get_drvdata(dev)); in dbc_idVendor_show()
1080 dbc = xhci->dbc; in dbc_idVendor_show()
1082 return sysfs_emit(buf, "%04x\n", dbc->idVendor); in dbc_idVendor_show()
1090 struct xhci_hcd *xhci; in dbc_idVendor_store() local
1100 xhci = hcd_to_xhci(dev_get_drvdata(dev)); in dbc_idVendor_store()
1101 dbc = xhci->dbc; in dbc_idVendor_store()
1102 if (dbc->state != DS_DISABLED) in dbc_idVendor_store()
1103 return -EBUSY; in dbc_idVendor_store()
1105 dbc->idVendor = value; in dbc_idVendor_store()
1106 ptr = &dbc->regs->devinfo1; in dbc_idVendor_store()
1119 struct xhci_hcd *xhci; in dbc_idProduct_show() local
1121 xhci = hcd_to_xhci(dev_get_drvdata(dev)); in dbc_idProduct_show()
1122 dbc = xhci->dbc; in dbc_idProduct_show()
1124 return sysfs_emit(buf, "%04x\n", dbc->idProduct); in dbc_idProduct_show()
1132 struct xhci_hcd *xhci; in dbc_idProduct_store() local
1142 xhci = hcd_to_xhci(dev_get_drvdata(dev)); in dbc_idProduct_store()
1143 dbc = xhci->dbc; in dbc_idProduct_store()
1144 if (dbc->state != DS_DISABLED) in dbc_idProduct_store()
1145 return -EBUSY; in dbc_idProduct_store()
1147 dbc->idProduct = value; in dbc_idProduct_store()
1148 ptr = &dbc->regs->devinfo2; in dbc_idProduct_store()
1160 struct xhci_hcd *xhci; in dbc_bcdDevice_show() local
1162 xhci = hcd_to_xhci(dev_get_drvdata(dev)); in dbc_bcdDevice_show()
1163 dbc = xhci->dbc; in dbc_bcdDevice_show()
1165 return sysfs_emit(buf, "%04x\n", dbc->bcdDevice); in dbc_bcdDevice_show()
1173 struct xhci_hcd *xhci; in dbc_bcdDevice_store() local
1183 xhci = hcd_to_xhci(dev_get_drvdata(dev)); in dbc_bcdDevice_store()
1184 dbc = xhci->dbc; in dbc_bcdDevice_store()
1185 if (dbc->state != DS_DISABLED) in dbc_bcdDevice_store()
1186 return -EBUSY; in dbc_bcdDevice_store()
1188 dbc->bcdDevice = value; in dbc_bcdDevice_store()
1189 ptr = &dbc->regs->devinfo2; in dbc_bcdDevice_store()
1202 struct xhci_hcd *xhci; in dbc_bInterfaceProtocol_show() local
1204 xhci = hcd_to_xhci(dev_get_drvdata(dev)); in dbc_bInterfaceProtocol_show()
1205 dbc = xhci->dbc; in dbc_bInterfaceProtocol_show()
1207 return sysfs_emit(buf, "%02x\n", dbc->bInterfaceProtocol); in dbc_bInterfaceProtocol_show()
1215 struct xhci_hcd *xhci; in dbc_bInterfaceProtocol_store() local
1226 /* ...xhci only supports values 0 and 1 */ in dbc_bInterfaceProtocol_store()
1228 return -EINVAL; in dbc_bInterfaceProtocol_store()
1230 xhci = hcd_to_xhci(dev_get_drvdata(dev)); in dbc_bInterfaceProtocol_store()
1231 dbc = xhci->dbc; in dbc_bInterfaceProtocol_store()
1232 if (dbc->state != DS_DISABLED) in dbc_bInterfaceProtocol_store()
1233 return -EBUSY; in dbc_bInterfaceProtocol_store()
1235 dbc->bInterfaceProtocol = value; in dbc_bInterfaceProtocol_store()
1236 ptr = &dbc->regs->devinfo1; in dbc_bInterfaceProtocol_store()
1249 struct xhci_hcd *xhci; in dbc_poll_interval_ms_show() local
1251 xhci = hcd_to_xhci(dev_get_drvdata(dev)); in dbc_poll_interval_ms_show()
1252 dbc = xhci->dbc; in dbc_poll_interval_ms_show()
1254 return sysfs_emit(buf, "%u\n", dbc->poll_interval); in dbc_poll_interval_ms_show()
1262 struct xhci_hcd *xhci; in dbc_poll_interval_ms_store() local
1268 return -EINVAL; in dbc_poll_interval_ms_store()
1270 xhci = hcd_to_xhci(dev_get_drvdata(dev)); in dbc_poll_interval_ms_store()
1271 dbc = xhci->dbc; in dbc_poll_interval_ms_store()
1273 dbc->poll_interval = value; in dbc_poll_interval_ms_store()
1275 mod_delayed_work(system_wq, &dbc->event_work, 0); in dbc_poll_interval_ms_store()
1308 dbc->regs = base; in xhci_alloc_dbc()
1309 dbc->dev = dev; in xhci_alloc_dbc()
1310 dbc->driver = driver; in xhci_alloc_dbc()
1311 dbc->idProduct = DBC_PRODUCT_ID; in xhci_alloc_dbc()
1312 dbc->idVendor = DBC_VENDOR_ID; in xhci_alloc_dbc()
1313 dbc->bcdDevice = DBC_DEVICE_REV; in xhci_alloc_dbc()
1314 dbc->bInterfaceProtocol = DBC_PROTOCOL; in xhci_alloc_dbc()
1315 dbc->poll_interval = DBC_POLL_INTERVAL_DEFAULT; in xhci_alloc_dbc()
1317 if (readl(&dbc->regs->control) & DBC_CTRL_DBC_ENABLE) in xhci_alloc_dbc()
1320 INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events); in xhci_alloc_dbc()
1321 spin_lock_init(&dbc->lock); in xhci_alloc_dbc()
1323 ret = sysfs_create_groups(&dev->kobj, dbc_dev_groups); in xhci_alloc_dbc()
1338 /* stop hw, stop wq and call dbc->ops->stop() */ in xhci_dbc_remove()
1342 sysfs_remove_groups(&dbc->dev->kobj, dbc_dev_groups); in xhci_dbc_remove()
1348 int xhci_create_dbc_dev(struct xhci_hcd *xhci) in xhci_create_dbc_dev() argument
1356 dev = xhci_to_hcd(xhci)->self.controller; in xhci_create_dbc_dev()
1357 base = &xhci->cap_regs->hc_capbase; in xhci_create_dbc_dev()
1361 return -ENODEV; in xhci_create_dbc_dev()
1364 if (xhci->dbc) in xhci_create_dbc_dev()
1365 return -EBUSY; in xhci_create_dbc_dev()
1367 ret = xhci_dbc_tty_probe(dev, base + dbc_cap_offs, xhci); in xhci_create_dbc_dev()
1372 void xhci_remove_dbc_dev(struct xhci_hcd *xhci) in xhci_remove_dbc_dev() argument
1376 if (!xhci->dbc) in xhci_remove_dbc_dev()
1379 xhci_dbc_tty_remove(xhci->dbc); in xhci_remove_dbc_dev()
1380 spin_lock_irqsave(&xhci->lock, flags); in xhci_remove_dbc_dev()
1381 xhci->dbc = NULL; in xhci_remove_dbc_dev()
1382 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_remove_dbc_dev()
1386 int xhci_dbc_suspend(struct xhci_hcd *xhci) in xhci_dbc_suspend() argument
1388 struct xhci_dbc *dbc = xhci->dbc; in xhci_dbc_suspend()
1393 if (dbc->state == DS_CONFIGURED) in xhci_dbc_suspend()
1394 dbc->resume_required = 1; in xhci_dbc_suspend()
1401 int xhci_dbc_resume(struct xhci_hcd *xhci) in xhci_dbc_resume() argument
1404 struct xhci_dbc *dbc = xhci->dbc; in xhci_dbc_resume()
1409 if (dbc->resume_required) { in xhci_dbc_resume()
1410 dbc->resume_required = 0; in xhci_dbc_resume()