1 // SPDX-License-Identifier: GPL-2.0 2 /** 3 * xhci-dbgcap.c - xHCI debug capability support 4 * 5 * Copyright (C) 2017 Intel Corporation 6 * 7 * Author: Lu Baolu <baolu.lu@linux.intel.com> 8 */ 9 #include <linux/dma-mapping.h> 10 #include <linux/slab.h> 11 #include <linux/nls.h> 12 13 #include "xhci.h" 14 #include "xhci-trace.h" 15 #include "xhci-dbgcap.h" 16 17 static inline void * 18 dbc_dma_alloc_coherent(struct xhci_hcd *xhci, size_t size, 19 dma_addr_t *dma_handle, gfp_t flags) 20 { 21 void *vaddr; 22 23 vaddr = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev, 24 size, dma_handle, flags); 25 memset(vaddr, 0, size); 26 return vaddr; 27 } 28 29 static inline void 30 dbc_dma_free_coherent(struct xhci_hcd *xhci, size_t size, 31 void *cpu_addr, dma_addr_t dma_handle) 32 { 33 if (cpu_addr) 34 dma_free_coherent(xhci_to_hcd(xhci)->self.sysdev, 35 size, cpu_addr, dma_handle); 36 } 37 38 static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings) 39 { 40 struct usb_string_descriptor *s_desc; 41 u32 string_length; 42 43 /* Serial string: */ 44 s_desc = (struct usb_string_descriptor *)strings->serial; 45 utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL), 46 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData, 47 DBC_MAX_STRING_LENGTH); 48 49 s_desc->bLength = (strlen(DBC_STRING_SERIAL) + 1) * 2; 50 s_desc->bDescriptorType = USB_DT_STRING; 51 string_length = s_desc->bLength; 52 string_length <<= 8; 53 54 /* Product string: */ 55 s_desc = (struct usb_string_descriptor *)strings->product; 56 utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT), 57 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData, 58 DBC_MAX_STRING_LENGTH); 59 60 s_desc->bLength = (strlen(DBC_STRING_PRODUCT) + 1) * 2; 61 s_desc->bDescriptorType = USB_DT_STRING; 62 string_length += s_desc->bLength; 63 string_length <<= 8; 64 65 /* Manufacture string: */ 66 s_desc = (struct usb_string_descriptor *)strings->manufacturer; 67 utf8s_to_utf16s(DBC_STRING_MANUFACTURER, 68 strlen(DBC_STRING_MANUFACTURER), 69 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData, 70 DBC_MAX_STRING_LENGTH); 71 72 s_desc->bLength = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2; 73 s_desc->bDescriptorType = USB_DT_STRING; 74 string_length += s_desc->bLength; 75 string_length <<= 8; 76 77 /* String0: */ 78 strings->string0[0] = 4; 79 strings->string0[1] = USB_DT_STRING; 80 strings->string0[2] = 0x09; 81 strings->string0[3] = 0x04; 82 string_length += 4; 83 84 return string_length; 85 } 86 87 static void xhci_dbc_init_contexts(struct xhci_hcd *xhci, u32 string_length) 88 { 89 struct xhci_dbc *dbc; 90 struct dbc_info_context *info; 91 struct xhci_ep_ctx *ep_ctx; 92 u32 dev_info; 93 dma_addr_t deq, dma; 94 unsigned int max_burst; 95 96 dbc = xhci->dbc; 97 if (!dbc) 98 return; 99 100 /* Populate info Context: */ 101 info = (struct dbc_info_context *)dbc->ctx->bytes; 102 dma = dbc->string_dma; 103 info->string0 = cpu_to_le64(dma); 104 info->manufacturer = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH); 105 info->product = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2); 106 info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3); 107 info->length = cpu_to_le32(string_length); 108 109 /* Populate bulk out endpoint context: */ 110 ep_ctx = dbc_bulkout_ctx(dbc); 111 max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control)); 112 deq = dbc_bulkout_enq(dbc); 113 ep_ctx->ep_info = 0; 114 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst); 115 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state); 116 117 /* Populate bulk in endpoint context: */ 118 ep_ctx = dbc_bulkin_ctx(dbc); 119 deq = dbc_bulkin_enq(dbc); 120 ep_ctx->ep_info = 0; 121 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst); 122 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state); 123 124 /* Set DbC context and info registers: */ 125 xhci_write_64(xhci, dbc->ctx->dma, &dbc->regs->dccp); 126 127 dev_info = cpu_to_le32((DBC_VENDOR_ID << 16) | DBC_PROTOCOL); 128 writel(dev_info, &dbc->regs->devinfo1); 129 130 dev_info = cpu_to_le32((DBC_DEVICE_REV << 16) | DBC_PRODUCT_ID); 131 writel(dev_info, &dbc->regs->devinfo2); 132 } 133 134 static void xhci_dbc_giveback(struct dbc_request *req, int status) 135 __releases(&dbc->lock) 136 __acquires(&dbc->lock) 137 { 138 struct dbc_ep *dep = req->dep; 139 struct xhci_dbc *dbc = dep->dbc; 140 struct xhci_hcd *xhci = dbc->xhci; 141 struct device *dev = xhci_to_hcd(dbc->xhci)->self.sysdev; 142 143 list_del_init(&req->list_pending); 144 req->trb_dma = 0; 145 req->trb = NULL; 146 147 if (req->status == -EINPROGRESS) 148 req->status = status; 149 150 trace_xhci_dbc_giveback_request(req); 151 152 dma_unmap_single(dev, 153 req->dma, 154 req->length, 155 dbc_ep_dma_direction(dep)); 156 157 /* Give back the transfer request: */ 158 spin_unlock(&dbc->lock); 159 req->complete(xhci, req); 160 spin_lock(&dbc->lock); 161 } 162 163 static void xhci_dbc_flush_single_request(struct dbc_request *req) 164 { 165 union xhci_trb *trb = req->trb; 166 167 trb->generic.field[0] = 0; 168 trb->generic.field[1] = 0; 169 trb->generic.field[2] = 0; 170 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); 171 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP)); 172 173 xhci_dbc_giveback(req, -ESHUTDOWN); 174 } 175 176 static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep) 177 { 178 struct dbc_request *req, *tmp; 179 180 list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending) 181 xhci_dbc_flush_single_request(req); 182 } 183 184 static void xhci_dbc_flush_reqests(struct xhci_dbc *dbc) 185 { 186 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]); 187 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]); 188 } 189 190 struct dbc_request * 191 dbc_alloc_request(struct dbc_ep *dep, gfp_t gfp_flags) 192 { 193 struct dbc_request *req; 194 195 req = kzalloc(sizeof(*req), gfp_flags); 196 if (!req) 197 return NULL; 198 199 req->dep = dep; 200 INIT_LIST_HEAD(&req->list_pending); 201 INIT_LIST_HEAD(&req->list_pool); 202 req->direction = dep->direction; 203 204 trace_xhci_dbc_alloc_request(req); 205 206 return req; 207 } 208 209 void 210 dbc_free_request(struct dbc_ep *dep, struct dbc_request *req) 211 { 212 trace_xhci_dbc_free_request(req); 213 214 kfree(req); 215 } 216 217 static void 218 xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1, 219 u32 field2, u32 field3, u32 field4) 220 { 221 union xhci_trb *trb, *next; 222 223 trb = ring->enqueue; 224 trb->generic.field[0] = cpu_to_le32(field1); 225 trb->generic.field[1] = cpu_to_le32(field2); 226 trb->generic.field[2] = cpu_to_le32(field3); 227 trb->generic.field[3] = cpu_to_le32(field4); 228 229 trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic); 230 231 ring->num_trbs_free--; 232 next = ++(ring->enqueue); 233 if (TRB_TYPE_LINK_LE32(next->link.control)) { 234 next->link.control ^= cpu_to_le32(TRB_CYCLE); 235 ring->enqueue = ring->enq_seg->trbs; 236 ring->cycle_state ^= 1; 237 } 238 } 239 240 static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep, 241 struct dbc_request *req) 242 { 243 u64 addr; 244 union xhci_trb *trb; 245 unsigned int num_trbs; 246 struct xhci_dbc *dbc = dep->dbc; 247 struct xhci_ring *ring = dep->ring; 248 u32 length, control, cycle; 249 250 num_trbs = count_trbs(req->dma, req->length); 251 WARN_ON(num_trbs != 1); 252 if (ring->num_trbs_free < num_trbs) 253 return -EBUSY; 254 255 addr = req->dma; 256 trb = ring->enqueue; 257 cycle = ring->cycle_state; 258 length = TRB_LEN(req->length); 259 control = TRB_TYPE(TRB_NORMAL) | TRB_IOC; 260 261 if (cycle) 262 control &= cpu_to_le32(~TRB_CYCLE); 263 else 264 control |= cpu_to_le32(TRB_CYCLE); 265 266 req->trb = ring->enqueue; 267 req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue); 268 xhci_dbc_queue_trb(ring, 269 lower_32_bits(addr), 270 upper_32_bits(addr), 271 length, control); 272 273 /* 274 * Add a barrier between writes of trb fields and flipping 275 * the cycle bit: 276 */ 277 wmb(); 278 279 if (cycle) 280 trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE); 281 else 282 trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE); 283 284 writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell); 285 286 return 0; 287 } 288 289 static int 290 dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req) 291 { 292 int ret; 293 struct device *dev; 294 struct xhci_dbc *dbc = dep->dbc; 295 struct xhci_hcd *xhci = dbc->xhci; 296 297 dev = xhci_to_hcd(xhci)->self.sysdev; 298 299 if (!req->length || !req->buf) 300 return -EINVAL; 301 302 req->actual = 0; 303 req->status = -EINPROGRESS; 304 305 req->dma = dma_map_single(dev, 306 req->buf, 307 req->length, 308 dbc_ep_dma_direction(dep)); 309 if (dma_mapping_error(dev, req->dma)) { 310 xhci_err(xhci, "failed to map buffer\n"); 311 return -EFAULT; 312 } 313 314 ret = xhci_dbc_queue_bulk_tx(dep, req); 315 if (ret) { 316 xhci_err(xhci, "failed to queue trbs\n"); 317 dma_unmap_single(dev, 318 req->dma, 319 req->length, 320 dbc_ep_dma_direction(dep)); 321 return -EFAULT; 322 } 323 324 list_add_tail(&req->list_pending, &dep->list_pending); 325 326 return 0; 327 } 328 329 int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req, 330 gfp_t gfp_flags) 331 { 332 unsigned long flags; 333 struct xhci_dbc *dbc = dep->dbc; 334 int ret = -ESHUTDOWN; 335 336 spin_lock_irqsave(&dbc->lock, flags); 337 if (dbc->state == DS_CONFIGURED) 338 ret = dbc_ep_do_queue(dep, req); 339 spin_unlock_irqrestore(&dbc->lock, flags); 340 341 mod_delayed_work(system_wq, &dbc->event_work, 0); 342 343 trace_xhci_dbc_queue_request(req); 344 345 return ret; 346 } 347 348 static inline void xhci_dbc_do_eps_init(struct xhci_hcd *xhci, bool direction) 349 { 350 struct dbc_ep *dep; 351 struct xhci_dbc *dbc = xhci->dbc; 352 353 dep = &dbc->eps[direction]; 354 dep->dbc = dbc; 355 dep->direction = direction; 356 dep->ring = direction ? dbc->ring_in : dbc->ring_out; 357 358 INIT_LIST_HEAD(&dep->list_pending); 359 } 360 361 static void xhci_dbc_eps_init(struct xhci_hcd *xhci) 362 { 363 xhci_dbc_do_eps_init(xhci, BULK_OUT); 364 xhci_dbc_do_eps_init(xhci, BULK_IN); 365 } 366 367 static void xhci_dbc_eps_exit(struct xhci_hcd *xhci) 368 { 369 struct xhci_dbc *dbc = xhci->dbc; 370 371 memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps)); 372 } 373 374 static int xhci_dbc_mem_init(struct xhci_hcd *xhci, gfp_t flags) 375 { 376 int ret; 377 dma_addr_t deq; 378 u32 string_length; 379 struct xhci_dbc *dbc = xhci->dbc; 380 381 /* Allocate various rings for events and transfers: */ 382 dbc->ring_evt = xhci_ring_alloc(xhci, 1, 1, TYPE_EVENT, 0, flags); 383 if (!dbc->ring_evt) 384 goto evt_fail; 385 386 dbc->ring_in = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags); 387 if (!dbc->ring_in) 388 goto in_fail; 389 390 dbc->ring_out = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags); 391 if (!dbc->ring_out) 392 goto out_fail; 393 394 /* Allocate and populate ERST: */ 395 ret = xhci_alloc_erst(xhci, dbc->ring_evt, &dbc->erst, flags); 396 if (ret) 397 goto erst_fail; 398 399 /* Allocate context data structure: */ 400 dbc->ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); 401 if (!dbc->ctx) 402 goto ctx_fail; 403 404 /* Allocate the string table: */ 405 dbc->string_size = sizeof(struct dbc_str_descs); 406 dbc->string = dbc_dma_alloc_coherent(xhci, 407 dbc->string_size, 408 &dbc->string_dma, 409 flags); 410 if (!dbc->string) 411 goto string_fail; 412 413 /* Setup ERST register: */ 414 writel(dbc->erst.erst_size, &dbc->regs->ersts); 415 xhci_write_64(xhci, dbc->erst.erst_dma_addr, &dbc->regs->erstba); 416 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg, 417 dbc->ring_evt->dequeue); 418 xhci_write_64(xhci, deq, &dbc->regs->erdp); 419 420 /* Setup strings and contexts: */ 421 string_length = xhci_dbc_populate_strings(dbc->string); 422 xhci_dbc_init_contexts(xhci, string_length); 423 424 mmiowb(); 425 426 xhci_dbc_eps_init(xhci); 427 dbc->state = DS_INITIALIZED; 428 429 return 0; 430 431 string_fail: 432 xhci_free_container_ctx(xhci, dbc->ctx); 433 dbc->ctx = NULL; 434 ctx_fail: 435 xhci_free_erst(xhci, &dbc->erst); 436 erst_fail: 437 xhci_ring_free(xhci, dbc->ring_out); 438 dbc->ring_out = NULL; 439 out_fail: 440 xhci_ring_free(xhci, dbc->ring_in); 441 dbc->ring_in = NULL; 442 in_fail: 443 xhci_ring_free(xhci, dbc->ring_evt); 444 dbc->ring_evt = NULL; 445 evt_fail: 446 return -ENOMEM; 447 } 448 449 static void xhci_dbc_mem_cleanup(struct xhci_hcd *xhci) 450 { 451 struct xhci_dbc *dbc = xhci->dbc; 452 453 if (!dbc) 454 return; 455 456 xhci_dbc_eps_exit(xhci); 457 458 if (dbc->string) { 459 dbc_dma_free_coherent(xhci, 460 dbc->string_size, 461 dbc->string, dbc->string_dma); 462 dbc->string = NULL; 463 } 464 465 xhci_free_container_ctx(xhci, dbc->ctx); 466 dbc->ctx = NULL; 467 468 xhci_free_erst(xhci, &dbc->erst); 469 xhci_ring_free(xhci, dbc->ring_out); 470 xhci_ring_free(xhci, dbc->ring_in); 471 xhci_ring_free(xhci, dbc->ring_evt); 472 dbc->ring_in = NULL; 473 dbc->ring_out = NULL; 474 dbc->ring_evt = NULL; 475 } 476 477 static int xhci_do_dbc_start(struct xhci_hcd *xhci) 478 { 479 int ret; 480 u32 ctrl; 481 struct xhci_dbc *dbc = xhci->dbc; 482 483 if (dbc->state != DS_DISABLED) 484 return -EINVAL; 485 486 writel(0, &dbc->regs->control); 487 ret = xhci_handshake(&dbc->regs->control, 488 DBC_CTRL_DBC_ENABLE, 489 0, 1000); 490 if (ret) 491 return ret; 492 493 ret = xhci_dbc_mem_init(xhci, GFP_ATOMIC); 494 if (ret) 495 return ret; 496 497 ctrl = readl(&dbc->regs->control); 498 writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE, 499 &dbc->regs->control); 500 ret = xhci_handshake(&dbc->regs->control, 501 DBC_CTRL_DBC_ENABLE, 502 DBC_CTRL_DBC_ENABLE, 1000); 503 if (ret) 504 return ret; 505 506 dbc->state = DS_ENABLED; 507 508 return 0; 509 } 510 511 static int xhci_do_dbc_stop(struct xhci_hcd *xhci) 512 { 513 struct xhci_dbc *dbc = xhci->dbc; 514 515 if (dbc->state == DS_DISABLED) 516 return -1; 517 518 writel(0, &dbc->regs->control); 519 xhci_dbc_mem_cleanup(xhci); 520 dbc->state = DS_DISABLED; 521 522 return 0; 523 } 524 525 static int xhci_dbc_start(struct xhci_hcd *xhci) 526 { 527 int ret; 528 unsigned long flags; 529 struct xhci_dbc *dbc = xhci->dbc; 530 531 WARN_ON(!dbc); 532 533 pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller); 534 535 spin_lock_irqsave(&dbc->lock, flags); 536 ret = xhci_do_dbc_start(xhci); 537 spin_unlock_irqrestore(&dbc->lock, flags); 538 539 if (ret) { 540 pm_runtime_put(xhci_to_hcd(xhci)->self.controller); 541 return ret; 542 } 543 544 return mod_delayed_work(system_wq, &dbc->event_work, 1); 545 } 546 547 static void xhci_dbc_stop(struct xhci_hcd *xhci) 548 { 549 int ret; 550 unsigned long flags; 551 struct xhci_dbc *dbc = xhci->dbc; 552 struct dbc_port *port = &dbc->port; 553 554 WARN_ON(!dbc); 555 556 cancel_delayed_work_sync(&dbc->event_work); 557 558 if (port->registered) 559 xhci_dbc_tty_unregister_device(xhci); 560 561 spin_lock_irqsave(&dbc->lock, flags); 562 ret = xhci_do_dbc_stop(xhci); 563 spin_unlock_irqrestore(&dbc->lock, flags); 564 565 if (!ret) 566 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); 567 } 568 569 static void 570 dbc_handle_port_status(struct xhci_hcd *xhci, union xhci_trb *event) 571 { 572 u32 portsc; 573 struct xhci_dbc *dbc = xhci->dbc; 574 575 portsc = readl(&dbc->regs->portsc); 576 if (portsc & DBC_PORTSC_CONN_CHANGE) 577 xhci_info(xhci, "DbC port connect change\n"); 578 579 if (portsc & DBC_PORTSC_RESET_CHANGE) 580 xhci_info(xhci, "DbC port reset change\n"); 581 582 if (portsc & DBC_PORTSC_LINK_CHANGE) 583 xhci_info(xhci, "DbC port link status change\n"); 584 585 if (portsc & DBC_PORTSC_CONFIG_CHANGE) 586 xhci_info(xhci, "DbC config error change\n"); 587 588 /* Port reset change bit will be cleared in other place: */ 589 writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc); 590 } 591 592 static void dbc_handle_xfer_event(struct xhci_hcd *xhci, union xhci_trb *event) 593 { 594 struct dbc_ep *dep; 595 struct xhci_ring *ring; 596 int ep_id; 597 int status; 598 u32 comp_code; 599 size_t remain_length; 600 struct dbc_request *req = NULL, *r; 601 602 comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2])); 603 remain_length = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2])); 604 ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3])); 605 dep = (ep_id == EPID_OUT) ? 606 get_out_ep(xhci) : get_in_ep(xhci); 607 ring = dep->ring; 608 609 switch (comp_code) { 610 case COMP_SUCCESS: 611 remain_length = 0; 612 /* FALLTHROUGH */ 613 case COMP_SHORT_PACKET: 614 status = 0; 615 break; 616 case COMP_TRB_ERROR: 617 case COMP_BABBLE_DETECTED_ERROR: 618 case COMP_USB_TRANSACTION_ERROR: 619 case COMP_STALL_ERROR: 620 xhci_warn(xhci, "tx error %d detected\n", comp_code); 621 status = -comp_code; 622 break; 623 default: 624 xhci_err(xhci, "unknown tx error %d\n", comp_code); 625 status = -comp_code; 626 break; 627 } 628 629 /* Match the pending request: */ 630 list_for_each_entry(r, &dep->list_pending, list_pending) { 631 if (r->trb_dma == event->trans_event.buffer) { 632 req = r; 633 break; 634 } 635 } 636 637 if (!req) { 638 xhci_warn(xhci, "no matched request\n"); 639 return; 640 } 641 642 trace_xhci_dbc_handle_transfer(ring, &req->trb->generic); 643 644 ring->num_trbs_free++; 645 req->actual = req->length - remain_length; 646 xhci_dbc_giveback(req, status); 647 } 648 649 static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) 650 { 651 dma_addr_t deq; 652 struct dbc_ep *dep; 653 union xhci_trb *evt; 654 u32 ctrl, portsc; 655 struct xhci_hcd *xhci = dbc->xhci; 656 bool update_erdp = false; 657 658 /* DbC state machine: */ 659 switch (dbc->state) { 660 case DS_DISABLED: 661 case DS_INITIALIZED: 662 663 return EVT_ERR; 664 case DS_ENABLED: 665 portsc = readl(&dbc->regs->portsc); 666 if (portsc & DBC_PORTSC_CONN_STATUS) { 667 dbc->state = DS_CONNECTED; 668 xhci_info(xhci, "DbC connected\n"); 669 } 670 671 return EVT_DONE; 672 case DS_CONNECTED: 673 ctrl = readl(&dbc->regs->control); 674 if (ctrl & DBC_CTRL_DBC_RUN) { 675 dbc->state = DS_CONFIGURED; 676 xhci_info(xhci, "DbC configured\n"); 677 portsc = readl(&dbc->regs->portsc); 678 writel(portsc, &dbc->regs->portsc); 679 return EVT_GSER; 680 } 681 682 return EVT_DONE; 683 case DS_CONFIGURED: 684 /* Handle cable unplug event: */ 685 portsc = readl(&dbc->regs->portsc); 686 if (!(portsc & DBC_PORTSC_PORT_ENABLED) && 687 !(portsc & DBC_PORTSC_CONN_STATUS)) { 688 xhci_info(xhci, "DbC cable unplugged\n"); 689 dbc->state = DS_ENABLED; 690 xhci_dbc_flush_reqests(dbc); 691 692 return EVT_DISC; 693 } 694 695 /* Handle debug port reset event: */ 696 if (portsc & DBC_PORTSC_RESET_CHANGE) { 697 xhci_info(xhci, "DbC port reset\n"); 698 writel(portsc, &dbc->regs->portsc); 699 dbc->state = DS_ENABLED; 700 xhci_dbc_flush_reqests(dbc); 701 702 return EVT_DISC; 703 } 704 705 /* Handle endpoint stall event: */ 706 ctrl = readl(&dbc->regs->control); 707 if ((ctrl & DBC_CTRL_HALT_IN_TR) || 708 (ctrl & DBC_CTRL_HALT_OUT_TR)) { 709 xhci_info(xhci, "DbC Endpoint stall\n"); 710 dbc->state = DS_STALLED; 711 712 if (ctrl & DBC_CTRL_HALT_IN_TR) { 713 dep = get_in_ep(xhci); 714 xhci_dbc_flush_endpoint_requests(dep); 715 } 716 717 if (ctrl & DBC_CTRL_HALT_OUT_TR) { 718 dep = get_out_ep(xhci); 719 xhci_dbc_flush_endpoint_requests(dep); 720 } 721 722 return EVT_DONE; 723 } 724 725 /* Clear DbC run change bit: */ 726 if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) { 727 writel(ctrl, &dbc->regs->control); 728 ctrl = readl(&dbc->regs->control); 729 } 730 731 break; 732 case DS_STALLED: 733 ctrl = readl(&dbc->regs->control); 734 if (!(ctrl & DBC_CTRL_HALT_IN_TR) && 735 !(ctrl & DBC_CTRL_HALT_OUT_TR) && 736 (ctrl & DBC_CTRL_DBC_RUN)) { 737 dbc->state = DS_CONFIGURED; 738 break; 739 } 740 741 return EVT_DONE; 742 default: 743 xhci_err(xhci, "Unknown DbC state %d\n", dbc->state); 744 break; 745 } 746 747 /* Handle the events in the event ring: */ 748 evt = dbc->ring_evt->dequeue; 749 while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) == 750 dbc->ring_evt->cycle_state) { 751 /* 752 * Add a barrier between reading the cycle flag and any 753 * reads of the event's flags/data below: 754 */ 755 rmb(); 756 757 trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic); 758 759 switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) { 760 case TRB_TYPE(TRB_PORT_STATUS): 761 dbc_handle_port_status(xhci, evt); 762 break; 763 case TRB_TYPE(TRB_TRANSFER): 764 dbc_handle_xfer_event(xhci, evt); 765 break; 766 default: 767 break; 768 } 769 770 inc_deq(xhci, dbc->ring_evt); 771 evt = dbc->ring_evt->dequeue; 772 update_erdp = true; 773 } 774 775 /* Update event ring dequeue pointer: */ 776 if (update_erdp) { 777 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg, 778 dbc->ring_evt->dequeue); 779 xhci_write_64(xhci, deq, &dbc->regs->erdp); 780 } 781 782 return EVT_DONE; 783 } 784 785 static void xhci_dbc_handle_events(struct work_struct *work) 786 { 787 int ret; 788 enum evtreturn evtr; 789 struct xhci_dbc *dbc; 790 unsigned long flags; 791 struct xhci_hcd *xhci; 792 793 dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work); 794 xhci = dbc->xhci; 795 796 spin_lock_irqsave(&dbc->lock, flags); 797 evtr = xhci_dbc_do_handle_events(dbc); 798 spin_unlock_irqrestore(&dbc->lock, flags); 799 800 switch (evtr) { 801 case EVT_GSER: 802 ret = xhci_dbc_tty_register_device(xhci); 803 if (ret) { 804 xhci_err(xhci, "failed to alloc tty device\n"); 805 break; 806 } 807 808 xhci_info(xhci, "DbC now attached to /dev/ttyDBC0\n"); 809 break; 810 case EVT_DISC: 811 xhci_dbc_tty_unregister_device(xhci); 812 break; 813 case EVT_DONE: 814 break; 815 default: 816 xhci_info(xhci, "stop handling dbc events\n"); 817 return; 818 } 819 820 mod_delayed_work(system_wq, &dbc->event_work, 1); 821 } 822 823 static void xhci_do_dbc_exit(struct xhci_hcd *xhci) 824 { 825 unsigned long flags; 826 827 spin_lock_irqsave(&xhci->lock, flags); 828 kfree(xhci->dbc); 829 xhci->dbc = NULL; 830 spin_unlock_irqrestore(&xhci->lock, flags); 831 } 832 833 static int xhci_do_dbc_init(struct xhci_hcd *xhci) 834 { 835 u32 reg; 836 struct xhci_dbc *dbc; 837 unsigned long flags; 838 void __iomem *base; 839 int dbc_cap_offs; 840 841 base = &xhci->cap_regs->hc_capbase; 842 dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG); 843 if (!dbc_cap_offs) 844 return -ENODEV; 845 846 dbc = kzalloc(sizeof(*dbc), GFP_KERNEL); 847 if (!dbc) 848 return -ENOMEM; 849 850 dbc->regs = base + dbc_cap_offs; 851 852 /* We will avoid using DbC in xhci driver if it's in use. */ 853 reg = readl(&dbc->regs->control); 854 if (reg & DBC_CTRL_DBC_ENABLE) { 855 kfree(dbc); 856 return -EBUSY; 857 } 858 859 spin_lock_irqsave(&xhci->lock, flags); 860 if (xhci->dbc) { 861 spin_unlock_irqrestore(&xhci->lock, flags); 862 kfree(dbc); 863 return -EBUSY; 864 } 865 xhci->dbc = dbc; 866 spin_unlock_irqrestore(&xhci->lock, flags); 867 868 dbc->xhci = xhci; 869 INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events); 870 spin_lock_init(&dbc->lock); 871 872 return 0; 873 } 874 875 static ssize_t dbc_show(struct device *dev, 876 struct device_attribute *attr, 877 char *buf) 878 { 879 const char *p; 880 struct xhci_dbc *dbc; 881 struct xhci_hcd *xhci; 882 883 xhci = hcd_to_xhci(dev_get_drvdata(dev)); 884 dbc = xhci->dbc; 885 886 switch (dbc->state) { 887 case DS_DISABLED: 888 p = "disabled"; 889 break; 890 case DS_INITIALIZED: 891 p = "initialized"; 892 break; 893 case DS_ENABLED: 894 p = "enabled"; 895 break; 896 case DS_CONNECTED: 897 p = "connected"; 898 break; 899 case DS_CONFIGURED: 900 p = "configured"; 901 break; 902 case DS_STALLED: 903 p = "stalled"; 904 break; 905 default: 906 p = "unknown"; 907 } 908 909 return sprintf(buf, "%s\n", p); 910 } 911 912 static ssize_t dbc_store(struct device *dev, 913 struct device_attribute *attr, 914 const char *buf, size_t count) 915 { 916 struct xhci_hcd *xhci; 917 918 xhci = hcd_to_xhci(dev_get_drvdata(dev)); 919 920 if (!strncmp(buf, "enable", 6)) 921 xhci_dbc_start(xhci); 922 else if (!strncmp(buf, "disable", 7)) 923 xhci_dbc_stop(xhci); 924 else 925 return -EINVAL; 926 927 return count; 928 } 929 930 static DEVICE_ATTR_RW(dbc); 931 932 int xhci_dbc_init(struct xhci_hcd *xhci) 933 { 934 int ret; 935 struct device *dev = xhci_to_hcd(xhci)->self.controller; 936 937 ret = xhci_do_dbc_init(xhci); 938 if (ret) 939 goto init_err3; 940 941 ret = xhci_dbc_tty_register_driver(xhci); 942 if (ret) 943 goto init_err2; 944 945 ret = device_create_file(dev, &dev_attr_dbc); 946 if (ret) 947 goto init_err1; 948 949 return 0; 950 951 init_err1: 952 xhci_dbc_tty_unregister_driver(); 953 init_err2: 954 xhci_do_dbc_exit(xhci); 955 init_err3: 956 return ret; 957 } 958 959 void xhci_dbc_exit(struct xhci_hcd *xhci) 960 { 961 struct device *dev = xhci_to_hcd(xhci)->self.controller; 962 963 if (!xhci->dbc) 964 return; 965 966 device_remove_file(dev, &dev_attr_dbc); 967 xhci_dbc_tty_unregister_driver(); 968 xhci_dbc_stop(xhci); 969 xhci_do_dbc_exit(xhci); 970 } 971 972 #ifdef CONFIG_PM 973 int xhci_dbc_suspend(struct xhci_hcd *xhci) 974 { 975 struct xhci_dbc *dbc = xhci->dbc; 976 977 if (!dbc) 978 return 0; 979 980 if (dbc->state == DS_CONFIGURED) 981 dbc->resume_required = 1; 982 983 xhci_dbc_stop(xhci); 984 985 return 0; 986 } 987 988 int xhci_dbc_resume(struct xhci_hcd *xhci) 989 { 990 int ret = 0; 991 struct xhci_dbc *dbc = xhci->dbc; 992 993 if (!dbc) 994 return 0; 995 996 if (dbc->resume_required) { 997 dbc->resume_required = 0; 998 xhci_dbc_start(xhci); 999 } 1000 1001 return ret; 1002 } 1003 #endif /* CONFIG_PM */ 1004