1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2014 Leon Dang <ldang@nahannisys.com> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /* 29 XHCI options: 30 -s <n>,xhci,{devices} 31 32 devices: 33 tablet USB tablet mouse 34 */ 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <sys/param.h> 39 #include <sys/uio.h> 40 #include <sys/types.h> 41 #include <sys/queue.h> 42 43 #include <stdio.h> 44 #include <stdlib.h> 45 #include <stdint.h> 46 #include <string.h> 47 #include <errno.h> 48 #include <pthread.h> 49 #include <unistd.h> 50 51 #include <machine/vmm_snapshot.h> 52 53 #include <dev/usb/usbdi.h> 54 #include <dev/usb/usb.h> 55 #include <dev/usb/usb_freebsd.h> 56 #include <xhcireg.h> 57 58 #include "bhyverun.h" 59 #include "config.h" 60 #include "debug.h" 61 #include "pci_emul.h" 62 #include "pci_xhci.h" 63 #include "usb_emul.h" 64 65 66 static int xhci_debug = 0; 67 #define DPRINTF(params) if (xhci_debug) PRINTLN params 68 #define WPRINTF(params) PRINTLN params 69 70 71 #define XHCI_NAME "xhci" 72 #define XHCI_MAX_DEVS 8 /* 4 USB3 + 4 USB2 devs */ 73 74 #define XHCI_MAX_SLOTS 64 /* min allowed by Windows drivers */ 75 76 /* 77 * XHCI data structures can be up to 64k, but limit paddr_guest2host mapping 78 * to 4k to avoid going over the guest physical memory barrier. 79 */ 80 #define XHCI_PADDR_SZ 4096 /* paddr_guest2host max size */ 81 82 #define XHCI_ERST_MAX 0 /* max 2^entries event ring seg tbl */ 83 84 #define XHCI_CAPLEN (4*8) /* offset of op register space */ 85 #define XHCI_HCCPRAMS2 0x1C /* offset of HCCPARAMS2 register */ 86 #define XHCI_PORTREGS_START 0x400 87 #define XHCI_DOORBELL_MAX 256 88 89 #define XHCI_STREAMS_MAX 1 /* 4-15 in XHCI spec */ 90 91 /* caplength and hci-version registers */ 92 #define XHCI_SET_CAPLEN(x) ((x) & 0xFF) 93 #define XHCI_SET_HCIVERSION(x) (((x) & 0xFFFF) << 16) 94 #define XHCI_GET_HCIVERSION(x) (((x) >> 16) & 0xFFFF) 95 96 /* hcsparams1 register */ 97 #define XHCI_SET_HCSP1_MAXSLOTS(x) ((x) & 0xFF) 98 #define XHCI_SET_HCSP1_MAXINTR(x) (((x) & 0x7FF) << 8) 99 #define XHCI_SET_HCSP1_MAXPORTS(x) (((x) & 0xFF) << 24) 100 101 /* hcsparams2 register */ 102 #define XHCI_SET_HCSP2_IST(x) ((x) & 0x0F) 103 #define XHCI_SET_HCSP2_ERSTMAX(x) (((x) & 0x0F) << 4) 104 #define XHCI_SET_HCSP2_MAXSCRATCH_HI(x) (((x) & 0x1F) << 21) 105 #define XHCI_SET_HCSP2_MAXSCRATCH_LO(x) (((x) & 0x1F) << 27) 106 107 /* hcsparams3 register */ 108 #define XHCI_SET_HCSP3_U1EXITLATENCY(x) ((x) & 0xFF) 109 #define XHCI_SET_HCSP3_U2EXITLATENCY(x) (((x) & 0xFFFF) << 16) 110 111 /* hccparams1 register */ 112 #define XHCI_SET_HCCP1_AC64(x) ((x) & 0x01) 113 #define XHCI_SET_HCCP1_BNC(x) (((x) & 0x01) << 1) 114 #define XHCI_SET_HCCP1_CSZ(x) (((x) & 0x01) << 2) 115 #define XHCI_SET_HCCP1_PPC(x) (((x) & 0x01) << 3) 116 #define XHCI_SET_HCCP1_PIND(x) (((x) & 0x01) << 4) 117 #define XHCI_SET_HCCP1_LHRC(x) (((x) & 0x01) << 5) 118 #define XHCI_SET_HCCP1_LTC(x) (((x) & 0x01) << 6) 119 #define XHCI_SET_HCCP1_NSS(x) (((x) & 0x01) << 7) 120 #define XHCI_SET_HCCP1_PAE(x) (((x) & 0x01) << 8) 121 #define XHCI_SET_HCCP1_SPC(x) (((x) & 0x01) << 9) 122 #define XHCI_SET_HCCP1_SEC(x) (((x) & 0x01) << 10) 123 #define XHCI_SET_HCCP1_CFC(x) (((x) & 0x01) << 11) 124 #define XHCI_SET_HCCP1_MAXPSA(x) (((x) & 0x0F) << 12) 125 #define XHCI_SET_HCCP1_XECP(x) (((x) & 0xFFFF) << 16) 126 127 /* hccparams2 register */ 128 #define XHCI_SET_HCCP2_U3C(x) ((x) & 0x01) 129 #define XHCI_SET_HCCP2_CMC(x) (((x) & 0x01) << 1) 130 #define XHCI_SET_HCCP2_FSC(x) (((x) & 0x01) << 2) 131 #define XHCI_SET_HCCP2_CTC(x) (((x) & 0x01) << 3) 132 #define XHCI_SET_HCCP2_LEC(x) (((x) & 0x01) << 4) 133 #define XHCI_SET_HCCP2_CIC(x) (((x) & 0x01) << 5) 134 135 /* other registers */ 136 #define XHCI_SET_DOORBELL(x) ((x) & ~0x03) 137 #define XHCI_SET_RTSOFFSET(x) ((x) & ~0x0F) 138 139 /* register masks */ 140 #define XHCI_PS_PLS_MASK (0xF << 5) /* port link state */ 141 #define XHCI_PS_SPEED_MASK (0xF << 10) /* port speed */ 142 #define XHCI_PS_PIC_MASK (0x3 << 14) /* port indicator */ 143 144 /* port register set */ 145 #define XHCI_PORTREGS_BASE 0x400 /* base offset */ 146 #define XHCI_PORTREGS_PORT0 0x3F0 147 #define XHCI_PORTREGS_SETSZ 0x10 /* size of a set */ 148 149 #define MASK_64_HI(x) ((x) & ~0xFFFFFFFFULL) 150 #define MASK_64_LO(x) ((x) & 0xFFFFFFFFULL) 151 152 #define FIELD_REPLACE(a,b,m,s) (((a) & ~((m) << (s))) | \ 153 (((b) & (m)) << (s))) 154 #define FIELD_COPY(a,b,m,s) (((a) & ~((m) << (s))) | \ 155 (((b) & ((m) << (s))))) 156 157 #define SNAP_DEV_NAME_LEN 128 158 159 struct pci_xhci_trb_ring { 160 uint64_t ringaddr; /* current dequeue guest address */ 161 uint32_t ccs; /* consumer cycle state */ 162 }; 163 164 /* device endpoint transfer/stream rings */ 165 struct pci_xhci_dev_ep { 166 union { 167 struct xhci_trb *_epu_tr; 168 struct xhci_stream_ctx *_epu_sctx; 169 } _ep_trbsctx; 170 #define ep_tr _ep_trbsctx._epu_tr 171 #define ep_sctx _ep_trbsctx._epu_sctx 172 173 /* 174 * Caches the value of MaxPStreams from the endpoint context 175 * when an endpoint is initialized and is used to validate the 176 * use of ep_ringaddr vs ep_sctx_trbs[] as well as the length 177 * of ep_sctx_trbs[]. 178 */ 179 uint32_t ep_MaxPStreams; 180 union { 181 struct pci_xhci_trb_ring _epu_trb; 182 struct pci_xhci_trb_ring *_epu_sctx_trbs; 183 } _ep_trb_rings; 184 #define ep_ringaddr _ep_trb_rings._epu_trb.ringaddr 185 #define ep_ccs _ep_trb_rings._epu_trb.ccs 186 #define ep_sctx_trbs _ep_trb_rings._epu_sctx_trbs 187 188 struct usb_data_xfer *ep_xfer; /* transfer chain */ 189 }; 190 191 /* device context base address array: maps slot->device context */ 192 struct xhci_dcbaa { 193 uint64_t dcba[USB_MAX_DEVICES+1]; /* xhci_dev_ctx ptrs */ 194 }; 195 196 /* port status registers */ 197 struct pci_xhci_portregs { 198 uint32_t portsc; /* port status and control */ 199 uint32_t portpmsc; /* port pwr mgmt status & control */ 200 uint32_t portli; /* port link info */ 201 uint32_t porthlpmc; /* port hardware LPM control */ 202 } __packed; 203 #define XHCI_PS_SPEED_SET(x) (((x) & 0xF) << 10) 204 205 /* xHC operational registers */ 206 struct pci_xhci_opregs { 207 uint32_t usbcmd; /* usb command */ 208 uint32_t usbsts; /* usb status */ 209 uint32_t pgsz; /* page size */ 210 uint32_t dnctrl; /* device notification control */ 211 uint64_t crcr; /* command ring control */ 212 uint64_t dcbaap; /* device ctx base addr array ptr */ 213 uint32_t config; /* configure */ 214 215 /* guest mapped addresses: */ 216 struct xhci_trb *cr_p; /* crcr dequeue */ 217 struct xhci_dcbaa *dcbaa_p; /* dev ctx array ptr */ 218 }; 219 220 /* xHC runtime registers */ 221 struct pci_xhci_rtsregs { 222 uint32_t mfindex; /* microframe index */ 223 struct { /* interrupter register set */ 224 uint32_t iman; /* interrupter management */ 225 uint32_t imod; /* interrupter moderation */ 226 uint32_t erstsz; /* event ring segment table size */ 227 uint32_t rsvd; 228 uint64_t erstba; /* event ring seg-tbl base addr */ 229 uint64_t erdp; /* event ring dequeue ptr */ 230 } intrreg __packed; 231 232 /* guest mapped addresses */ 233 struct xhci_event_ring_seg *erstba_p; 234 struct xhci_trb *erst_p; /* event ring segment tbl */ 235 int er_deq_seg; /* event ring dequeue segment */ 236 int er_enq_idx; /* event ring enqueue index - xHCI */ 237 int er_enq_seg; /* event ring enqueue segment */ 238 uint32_t er_events_cnt; /* number of events in ER */ 239 uint32_t event_pcs; /* producer cycle state flag */ 240 }; 241 242 243 struct pci_xhci_softc; 244 245 246 /* 247 * USB device emulation container. 248 * This is referenced from usb_hci->hci_sc; 1 pci_xhci_dev_emu for each 249 * emulated device instance. 250 */ 251 struct pci_xhci_dev_emu { 252 struct pci_xhci_softc *xsc; 253 254 /* XHCI contexts */ 255 struct xhci_dev_ctx *dev_ctx; 256 struct pci_xhci_dev_ep eps[XHCI_MAX_ENDPOINTS]; 257 int dev_slotstate; 258 259 struct usb_devemu *dev_ue; /* USB emulated dev */ 260 void *dev_sc; /* device's softc */ 261 262 struct usb_hci hci; 263 }; 264 265 struct pci_xhci_softc { 266 struct pci_devinst *xsc_pi; 267 268 pthread_mutex_t mtx; 269 270 uint32_t caplength; /* caplen & hciversion */ 271 uint32_t hcsparams1; /* structural parameters 1 */ 272 uint32_t hcsparams2; /* structural parameters 2 */ 273 uint32_t hcsparams3; /* structural parameters 3 */ 274 uint32_t hccparams1; /* capability parameters 1 */ 275 uint32_t dboff; /* doorbell offset */ 276 uint32_t rtsoff; /* runtime register space offset */ 277 uint32_t hccparams2; /* capability parameters 2 */ 278 279 uint32_t regsend; /* end of configuration registers */ 280 281 struct pci_xhci_opregs opregs; 282 struct pci_xhci_rtsregs rtsregs; 283 284 struct pci_xhci_portregs *portregs; 285 struct pci_xhci_dev_emu **devices; /* XHCI[port] = device */ 286 struct pci_xhci_dev_emu **slots; /* slots assigned from 1 */ 287 288 int usb2_port_start; 289 int usb3_port_start; 290 }; 291 292 293 /* port and slot numbering start from 1 */ 294 #define XHCI_PORTREG_PTR(x,n) &((x)->portregs[(n) - 1]) 295 #define XHCI_DEVINST_PTR(x,n) ((x)->devices[(n) - 1]) 296 #define XHCI_SLOTDEV_PTR(x,n) ((x)->slots[(n) - 1]) 297 298 #define XHCI_HALTED(sc) ((sc)->opregs.usbsts & XHCI_STS_HCH) 299 300 #define XHCI_GADDR_SIZE(a) (XHCI_PADDR_SZ - \ 301 (((uint64_t) (a)) & (XHCI_PADDR_SZ - 1))) 302 #define XHCI_GADDR(sc,a) paddr_guest2host((sc)->xsc_pi->pi_vmctx, \ 303 (a), XHCI_GADDR_SIZE(a)) 304 305 static int xhci_in_use; 306 307 /* map USB errors to XHCI */ 308 static const int xhci_usb_errors[USB_ERR_MAX] = { 309 [USB_ERR_NORMAL_COMPLETION] = XHCI_TRB_ERROR_SUCCESS, 310 [USB_ERR_PENDING_REQUESTS] = XHCI_TRB_ERROR_RESOURCE, 311 [USB_ERR_NOT_STARTED] = XHCI_TRB_ERROR_ENDP_NOT_ON, 312 [USB_ERR_INVAL] = XHCI_TRB_ERROR_INVALID, 313 [USB_ERR_NOMEM] = XHCI_TRB_ERROR_RESOURCE, 314 [USB_ERR_CANCELLED] = XHCI_TRB_ERROR_STOPPED, 315 [USB_ERR_BAD_ADDRESS] = XHCI_TRB_ERROR_PARAMETER, 316 [USB_ERR_BAD_BUFSIZE] = XHCI_TRB_ERROR_PARAMETER, 317 [USB_ERR_BAD_FLAG] = XHCI_TRB_ERROR_PARAMETER, 318 [USB_ERR_NO_CALLBACK] = XHCI_TRB_ERROR_STALL, 319 [USB_ERR_IN_USE] = XHCI_TRB_ERROR_RESOURCE, 320 [USB_ERR_NO_ADDR] = XHCI_TRB_ERROR_RESOURCE, 321 [USB_ERR_NO_PIPE] = XHCI_TRB_ERROR_RESOURCE, 322 [USB_ERR_ZERO_NFRAMES] = XHCI_TRB_ERROR_UNDEFINED, 323 [USB_ERR_ZERO_MAXP] = XHCI_TRB_ERROR_UNDEFINED, 324 [USB_ERR_SET_ADDR_FAILED] = XHCI_TRB_ERROR_RESOURCE, 325 [USB_ERR_NO_POWER] = XHCI_TRB_ERROR_ENDP_NOT_ON, 326 [USB_ERR_TOO_DEEP] = XHCI_TRB_ERROR_RESOURCE, 327 [USB_ERR_IOERROR] = XHCI_TRB_ERROR_TRB, 328 [USB_ERR_NOT_CONFIGURED] = XHCI_TRB_ERROR_ENDP_NOT_ON, 329 [USB_ERR_TIMEOUT] = XHCI_TRB_ERROR_CMD_ABORTED, 330 [USB_ERR_SHORT_XFER] = XHCI_TRB_ERROR_SHORT_PKT, 331 [USB_ERR_STALLED] = XHCI_TRB_ERROR_STALL, 332 [USB_ERR_INTERRUPTED] = XHCI_TRB_ERROR_CMD_ABORTED, 333 [USB_ERR_DMA_LOAD_FAILED] = XHCI_TRB_ERROR_DATA_BUF, 334 [USB_ERR_BAD_CONTEXT] = XHCI_TRB_ERROR_TRB, 335 [USB_ERR_NO_ROOT_HUB] = XHCI_TRB_ERROR_UNDEFINED, 336 [USB_ERR_NO_INTR_THREAD] = XHCI_TRB_ERROR_UNDEFINED, 337 [USB_ERR_NOT_LOCKED] = XHCI_TRB_ERROR_UNDEFINED, 338 }; 339 #define USB_TO_XHCI_ERR(e) ((e) < USB_ERR_MAX ? xhci_usb_errors[(e)] : \ 340 XHCI_TRB_ERROR_INVALID) 341 342 static int pci_xhci_insert_event(struct pci_xhci_softc *sc, 343 struct xhci_trb *evtrb, int do_intr); 344 static void pci_xhci_dump_trb(struct xhci_trb *trb); 345 static void pci_xhci_assert_interrupt(struct pci_xhci_softc *sc); 346 static void pci_xhci_reset_slot(struct pci_xhci_softc *sc, int slot); 347 static void pci_xhci_reset_port(struct pci_xhci_softc *sc, int portn, int warm); 348 static void pci_xhci_update_ep_ring(struct pci_xhci_softc *sc, 349 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep, 350 struct xhci_endp_ctx *ep_ctx, uint32_t streamid, 351 uint64_t ringaddr, int ccs); 352 353 static void 354 pci_xhci_set_evtrb(struct xhci_trb *evtrb, uint64_t port, uint32_t errcode, 355 uint32_t evtype) 356 { 357 evtrb->qwTrb0 = port << 24; 358 evtrb->dwTrb2 = XHCI_TRB_2_ERROR_SET(errcode); 359 evtrb->dwTrb3 = XHCI_TRB_3_TYPE_SET(evtype); 360 } 361 362 363 /* controller reset */ 364 static void 365 pci_xhci_reset(struct pci_xhci_softc *sc) 366 { 367 int i; 368 369 sc->rtsregs.er_enq_idx = 0; 370 sc->rtsregs.er_events_cnt = 0; 371 sc->rtsregs.event_pcs = 1; 372 373 for (i = 1; i <= XHCI_MAX_SLOTS; i++) { 374 pci_xhci_reset_slot(sc, i); 375 } 376 } 377 378 static uint32_t 379 pci_xhci_usbcmd_write(struct pci_xhci_softc *sc, uint32_t cmd) 380 { 381 int do_intr = 0; 382 int i; 383 384 if (cmd & XHCI_CMD_RS) { 385 do_intr = (sc->opregs.usbcmd & XHCI_CMD_RS) == 0; 386 387 sc->opregs.usbcmd |= XHCI_CMD_RS; 388 sc->opregs.usbsts &= ~XHCI_STS_HCH; 389 sc->opregs.usbsts |= XHCI_STS_PCD; 390 391 /* Queue port change event on controller run from stop */ 392 if (do_intr) 393 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 394 struct pci_xhci_dev_emu *dev; 395 struct pci_xhci_portregs *port; 396 struct xhci_trb evtrb; 397 398 if ((dev = XHCI_DEVINST_PTR(sc, i)) == NULL) 399 continue; 400 401 port = XHCI_PORTREG_PTR(sc, i); 402 port->portsc |= XHCI_PS_CSC | XHCI_PS_CCS; 403 port->portsc &= ~XHCI_PS_PLS_MASK; 404 405 /* 406 * XHCI 4.19.3 USB2 RxDetect->Polling, 407 * USB3 Polling->U0 408 */ 409 if (dev->dev_ue->ue_usbver == 2) 410 port->portsc |= 411 XHCI_PS_PLS_SET(UPS_PORT_LS_POLL); 412 else 413 port->portsc |= 414 XHCI_PS_PLS_SET(UPS_PORT_LS_U0); 415 416 pci_xhci_set_evtrb(&evtrb, i, 417 XHCI_TRB_ERROR_SUCCESS, 418 XHCI_TRB_EVENT_PORT_STS_CHANGE); 419 420 if (pci_xhci_insert_event(sc, &evtrb, 0) != 421 XHCI_TRB_ERROR_SUCCESS) 422 break; 423 } 424 } else { 425 sc->opregs.usbcmd &= ~XHCI_CMD_RS; 426 sc->opregs.usbsts |= XHCI_STS_HCH; 427 sc->opregs.usbsts &= ~XHCI_STS_PCD; 428 } 429 430 /* start execution of schedule; stop when set to 0 */ 431 cmd |= sc->opregs.usbcmd & XHCI_CMD_RS; 432 433 if (cmd & XHCI_CMD_HCRST) { 434 /* reset controller */ 435 pci_xhci_reset(sc); 436 cmd &= ~XHCI_CMD_HCRST; 437 } 438 439 cmd &= ~(XHCI_CMD_CSS | XHCI_CMD_CRS); 440 441 if (do_intr) 442 pci_xhci_assert_interrupt(sc); 443 444 return (cmd); 445 } 446 447 static void 448 pci_xhci_portregs_write(struct pci_xhci_softc *sc, uint64_t offset, 449 uint64_t value) 450 { 451 struct xhci_trb evtrb; 452 struct pci_xhci_portregs *p; 453 int port; 454 uint32_t oldpls, newpls; 455 456 if (sc->portregs == NULL) 457 return; 458 459 port = (offset - XHCI_PORTREGS_PORT0) / XHCI_PORTREGS_SETSZ; 460 offset = (offset - XHCI_PORTREGS_PORT0) % XHCI_PORTREGS_SETSZ; 461 462 DPRINTF(("pci_xhci: portregs wr offset 0x%lx, port %u: 0x%lx", 463 offset, port, value)); 464 465 assert(port >= 0); 466 467 if (port > XHCI_MAX_DEVS) { 468 DPRINTF(("pci_xhci: portregs_write port %d > ndevices", 469 port)); 470 return; 471 } 472 473 if (XHCI_DEVINST_PTR(sc, port) == NULL) { 474 DPRINTF(("pci_xhci: portregs_write to unattached port %d", 475 port)); 476 } 477 478 p = XHCI_PORTREG_PTR(sc, port); 479 switch (offset) { 480 case 0: 481 /* port reset or warm reset */ 482 if (value & (XHCI_PS_PR | XHCI_PS_WPR)) { 483 pci_xhci_reset_port(sc, port, value & XHCI_PS_WPR); 484 break; 485 } 486 487 if ((p->portsc & XHCI_PS_PP) == 0) { 488 WPRINTF(("pci_xhci: portregs_write to unpowered " 489 "port %d", port)); 490 break; 491 } 492 493 /* Port status and control register */ 494 oldpls = XHCI_PS_PLS_GET(p->portsc); 495 newpls = XHCI_PS_PLS_GET(value); 496 497 p->portsc &= XHCI_PS_PED | XHCI_PS_PLS_MASK | 498 XHCI_PS_SPEED_MASK | XHCI_PS_PIC_MASK; 499 500 if (XHCI_DEVINST_PTR(sc, port)) 501 p->portsc |= XHCI_PS_CCS; 502 503 p->portsc |= (value & 504 ~(XHCI_PS_OCA | 505 XHCI_PS_PR | 506 XHCI_PS_PED | 507 XHCI_PS_PLS_MASK | /* link state */ 508 XHCI_PS_SPEED_MASK | 509 XHCI_PS_PIC_MASK | /* port indicator */ 510 XHCI_PS_LWS | XHCI_PS_DR | XHCI_PS_WPR)); 511 512 /* clear control bits */ 513 p->portsc &= ~(value & 514 (XHCI_PS_CSC | 515 XHCI_PS_PEC | 516 XHCI_PS_WRC | 517 XHCI_PS_OCC | 518 XHCI_PS_PRC | 519 XHCI_PS_PLC | 520 XHCI_PS_CEC | 521 XHCI_PS_CAS)); 522 523 /* port disable request; for USB3, don't care */ 524 if (value & XHCI_PS_PED) 525 DPRINTF(("Disable port %d request", port)); 526 527 if (!(value & XHCI_PS_LWS)) 528 break; 529 530 DPRINTF(("Port new PLS: %d", newpls)); 531 switch (newpls) { 532 case 0: /* U0 */ 533 case 3: /* U3 */ 534 if (oldpls != newpls) { 535 p->portsc &= ~XHCI_PS_PLS_MASK; 536 p->portsc |= XHCI_PS_PLS_SET(newpls) | 537 XHCI_PS_PLC; 538 539 if (oldpls != 0 && newpls == 0) { 540 pci_xhci_set_evtrb(&evtrb, port, 541 XHCI_TRB_ERROR_SUCCESS, 542 XHCI_TRB_EVENT_PORT_STS_CHANGE); 543 544 pci_xhci_insert_event(sc, &evtrb, 1); 545 } 546 } 547 break; 548 549 default: 550 DPRINTF(("Unhandled change port %d PLS %u", 551 port, newpls)); 552 break; 553 } 554 break; 555 case 4: 556 /* Port power management status and control register */ 557 p->portpmsc = value; 558 break; 559 case 8: 560 /* Port link information register */ 561 DPRINTF(("pci_xhci attempted write to PORTLI, port %d", 562 port)); 563 break; 564 case 12: 565 /* 566 * Port hardware LPM control register. 567 * For USB3, this register is reserved. 568 */ 569 p->porthlpmc = value; 570 break; 571 default: 572 DPRINTF(("pci_xhci: unaligned portreg write offset %#lx", 573 offset)); 574 break; 575 } 576 } 577 578 static struct xhci_dev_ctx * 579 pci_xhci_get_dev_ctx(struct pci_xhci_softc *sc, uint32_t slot) 580 { 581 uint64_t devctx_addr; 582 struct xhci_dev_ctx *devctx; 583 584 assert(slot > 0 && slot <= XHCI_MAX_DEVS); 585 assert(XHCI_SLOTDEV_PTR(sc, slot) != NULL); 586 assert(sc->opregs.dcbaa_p != NULL); 587 588 devctx_addr = sc->opregs.dcbaa_p->dcba[slot]; 589 590 if (devctx_addr == 0) { 591 DPRINTF(("get_dev_ctx devctx_addr == 0")); 592 return (NULL); 593 } 594 595 DPRINTF(("pci_xhci: get dev ctx, slot %u devctx addr %016lx", 596 slot, devctx_addr)); 597 devctx = XHCI_GADDR(sc, devctx_addr & ~0x3FUL); 598 599 return (devctx); 600 } 601 602 static struct xhci_trb * 603 pci_xhci_trb_next(struct pci_xhci_softc *sc, struct xhci_trb *curtrb, 604 uint64_t *guestaddr) 605 { 606 struct xhci_trb *next; 607 608 assert(curtrb != NULL); 609 610 if (XHCI_TRB_3_TYPE_GET(curtrb->dwTrb3) == XHCI_TRB_TYPE_LINK) { 611 if (guestaddr) 612 *guestaddr = curtrb->qwTrb0 & ~0xFUL; 613 614 next = XHCI_GADDR(sc, curtrb->qwTrb0 & ~0xFUL); 615 } else { 616 if (guestaddr) 617 *guestaddr += sizeof(struct xhci_trb) & ~0xFUL; 618 619 next = curtrb + 1; 620 } 621 622 return (next); 623 } 624 625 static void 626 pci_xhci_assert_interrupt(struct pci_xhci_softc *sc) 627 { 628 629 sc->rtsregs.intrreg.erdp |= XHCI_ERDP_LO_BUSY; 630 sc->rtsregs.intrreg.iman |= XHCI_IMAN_INTR_PEND; 631 sc->opregs.usbsts |= XHCI_STS_EINT; 632 633 /* only trigger interrupt if permitted */ 634 if ((sc->opregs.usbcmd & XHCI_CMD_INTE) && 635 (sc->rtsregs.intrreg.iman & XHCI_IMAN_INTR_ENA)) { 636 if (pci_msi_enabled(sc->xsc_pi)) 637 pci_generate_msi(sc->xsc_pi, 0); 638 else 639 pci_lintr_assert(sc->xsc_pi); 640 } 641 } 642 643 static void 644 pci_xhci_deassert_interrupt(struct pci_xhci_softc *sc) 645 { 646 647 if (!pci_msi_enabled(sc->xsc_pi)) 648 pci_lintr_assert(sc->xsc_pi); 649 } 650 651 static void 652 pci_xhci_init_ep(struct pci_xhci_dev_emu *dev, int epid) 653 { 654 struct xhci_dev_ctx *dev_ctx; 655 struct pci_xhci_dev_ep *devep; 656 struct xhci_endp_ctx *ep_ctx; 657 uint32_t i, pstreams; 658 659 dev_ctx = dev->dev_ctx; 660 ep_ctx = &dev_ctx->ctx_ep[epid]; 661 devep = &dev->eps[epid]; 662 pstreams = XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0); 663 if (pstreams > 0) { 664 DPRINTF(("init_ep %d with pstreams %d", epid, pstreams)); 665 assert(devep->ep_sctx_trbs == NULL); 666 667 devep->ep_sctx = XHCI_GADDR(dev->xsc, ep_ctx->qwEpCtx2 & 668 XHCI_EPCTX_2_TR_DQ_PTR_MASK); 669 devep->ep_sctx_trbs = calloc(pstreams, 670 sizeof(struct pci_xhci_trb_ring)); 671 for (i = 0; i < pstreams; i++) { 672 devep->ep_sctx_trbs[i].ringaddr = 673 devep->ep_sctx[i].qwSctx0 & 674 XHCI_SCTX_0_TR_DQ_PTR_MASK; 675 devep->ep_sctx_trbs[i].ccs = 676 XHCI_SCTX_0_DCS_GET(devep->ep_sctx[i].qwSctx0); 677 } 678 } else { 679 DPRINTF(("init_ep %d with no pstreams", epid)); 680 devep->ep_ringaddr = ep_ctx->qwEpCtx2 & 681 XHCI_EPCTX_2_TR_DQ_PTR_MASK; 682 devep->ep_ccs = XHCI_EPCTX_2_DCS_GET(ep_ctx->qwEpCtx2); 683 devep->ep_tr = XHCI_GADDR(dev->xsc, devep->ep_ringaddr); 684 DPRINTF(("init_ep tr DCS %x", devep->ep_ccs)); 685 } 686 devep->ep_MaxPStreams = pstreams; 687 688 if (devep->ep_xfer == NULL) { 689 devep->ep_xfer = malloc(sizeof(struct usb_data_xfer)); 690 USB_DATA_XFER_INIT(devep->ep_xfer); 691 } 692 } 693 694 static void 695 pci_xhci_disable_ep(struct pci_xhci_dev_emu *dev, int epid) 696 { 697 struct xhci_dev_ctx *dev_ctx; 698 struct pci_xhci_dev_ep *devep; 699 struct xhci_endp_ctx *ep_ctx; 700 701 DPRINTF(("pci_xhci disable_ep %d", epid)); 702 703 dev_ctx = dev->dev_ctx; 704 ep_ctx = &dev_ctx->ctx_ep[epid]; 705 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_DISABLED; 706 707 devep = &dev->eps[epid]; 708 if (devep->ep_MaxPStreams > 0) 709 free(devep->ep_sctx_trbs); 710 711 if (devep->ep_xfer != NULL) { 712 free(devep->ep_xfer); 713 devep->ep_xfer = NULL; 714 } 715 716 memset(devep, 0, sizeof(struct pci_xhci_dev_ep)); 717 } 718 719 720 /* reset device at slot and data structures related to it */ 721 static void 722 pci_xhci_reset_slot(struct pci_xhci_softc *sc, int slot) 723 { 724 struct pci_xhci_dev_emu *dev; 725 726 dev = XHCI_SLOTDEV_PTR(sc, slot); 727 728 if (!dev) { 729 DPRINTF(("xhci reset unassigned slot (%d)?", slot)); 730 } else { 731 dev->dev_slotstate = XHCI_ST_DISABLED; 732 } 733 734 /* TODO: reset ring buffer pointers */ 735 } 736 737 static int 738 pci_xhci_insert_event(struct pci_xhci_softc *sc, struct xhci_trb *evtrb, 739 int do_intr) 740 { 741 struct pci_xhci_rtsregs *rts; 742 uint64_t erdp; 743 int erdp_idx; 744 int err; 745 struct xhci_trb *evtrbptr; 746 747 err = XHCI_TRB_ERROR_SUCCESS; 748 749 rts = &sc->rtsregs; 750 751 erdp = rts->intrreg.erdp & ~0xF; 752 erdp_idx = (erdp - rts->erstba_p[rts->er_deq_seg].qwEvrsTablePtr) / 753 sizeof(struct xhci_trb); 754 755 DPRINTF(("pci_xhci: insert event 0[%lx] 2[%x] 3[%x]", 756 evtrb->qwTrb0, evtrb->dwTrb2, evtrb->dwTrb3)); 757 DPRINTF(("\terdp idx %d/seg %d, enq idx %d/seg %d, pcs %u", 758 erdp_idx, rts->er_deq_seg, rts->er_enq_idx, 759 rts->er_enq_seg, rts->event_pcs)); 760 DPRINTF(("\t(erdp=0x%lx, erst=0x%lx, tblsz=%u, do_intr %d)", 761 erdp, rts->erstba_p->qwEvrsTablePtr, 762 rts->erstba_p->dwEvrsTableSize, do_intr)); 763 764 evtrbptr = &rts->erst_p[rts->er_enq_idx]; 765 766 /* TODO: multi-segment table */ 767 if (rts->er_events_cnt >= rts->erstba_p->dwEvrsTableSize) { 768 DPRINTF(("pci_xhci[%d] cannot insert event; ring full", 769 __LINE__)); 770 err = XHCI_TRB_ERROR_EV_RING_FULL; 771 goto done; 772 } 773 774 if (rts->er_events_cnt == rts->erstba_p->dwEvrsTableSize - 1) { 775 struct xhci_trb errev; 776 777 if ((evtrbptr->dwTrb3 & 0x1) == (rts->event_pcs & 0x1)) { 778 779 DPRINTF(("pci_xhci[%d] insert evt err: ring full", 780 __LINE__)); 781 782 errev.qwTrb0 = 0; 783 errev.dwTrb2 = XHCI_TRB_2_ERROR_SET( 784 XHCI_TRB_ERROR_EV_RING_FULL); 785 errev.dwTrb3 = XHCI_TRB_3_TYPE_SET( 786 XHCI_TRB_EVENT_HOST_CTRL) | 787 rts->event_pcs; 788 rts->er_events_cnt++; 789 memcpy(&rts->erst_p[rts->er_enq_idx], &errev, 790 sizeof(struct xhci_trb)); 791 rts->er_enq_idx = (rts->er_enq_idx + 1) % 792 rts->erstba_p->dwEvrsTableSize; 793 err = XHCI_TRB_ERROR_EV_RING_FULL; 794 do_intr = 1; 795 796 goto done; 797 } 798 } else { 799 rts->er_events_cnt++; 800 } 801 802 evtrb->dwTrb3 &= ~XHCI_TRB_3_CYCLE_BIT; 803 evtrb->dwTrb3 |= rts->event_pcs; 804 805 memcpy(&rts->erst_p[rts->er_enq_idx], evtrb, sizeof(struct xhci_trb)); 806 rts->er_enq_idx = (rts->er_enq_idx + 1) % 807 rts->erstba_p->dwEvrsTableSize; 808 809 if (rts->er_enq_idx == 0) 810 rts->event_pcs ^= 1; 811 812 done: 813 if (do_intr) 814 pci_xhci_assert_interrupt(sc); 815 816 return (err); 817 } 818 819 static uint32_t 820 pci_xhci_cmd_enable_slot(struct pci_xhci_softc *sc, uint32_t *slot) 821 { 822 struct pci_xhci_dev_emu *dev; 823 uint32_t cmderr; 824 int i; 825 826 cmderr = XHCI_TRB_ERROR_NO_SLOTS; 827 if (sc->portregs != NULL) 828 for (i = 1; i <= XHCI_MAX_SLOTS; i++) { 829 dev = XHCI_SLOTDEV_PTR(sc, i); 830 if (dev && dev->dev_slotstate == XHCI_ST_DISABLED) { 831 *slot = i; 832 dev->dev_slotstate = XHCI_ST_ENABLED; 833 cmderr = XHCI_TRB_ERROR_SUCCESS; 834 dev->hci.hci_address = i; 835 break; 836 } 837 } 838 839 DPRINTF(("pci_xhci enable slot (error=%d) slot %u", 840 cmderr != XHCI_TRB_ERROR_SUCCESS, *slot)); 841 842 return (cmderr); 843 } 844 845 static uint32_t 846 pci_xhci_cmd_disable_slot(struct pci_xhci_softc *sc, uint32_t slot) 847 { 848 struct pci_xhci_dev_emu *dev; 849 uint32_t cmderr; 850 851 DPRINTF(("pci_xhci disable slot %u", slot)); 852 853 cmderr = XHCI_TRB_ERROR_NO_SLOTS; 854 if (sc->portregs == NULL) 855 goto done; 856 857 if (slot > XHCI_MAX_SLOTS) { 858 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 859 goto done; 860 } 861 862 dev = XHCI_SLOTDEV_PTR(sc, slot); 863 if (dev) { 864 if (dev->dev_slotstate == XHCI_ST_DISABLED) { 865 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 866 } else { 867 dev->dev_slotstate = XHCI_ST_DISABLED; 868 cmderr = XHCI_TRB_ERROR_SUCCESS; 869 /* TODO: reset events and endpoints */ 870 } 871 } else 872 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 873 874 done: 875 return (cmderr); 876 } 877 878 static uint32_t 879 pci_xhci_cmd_reset_device(struct pci_xhci_softc *sc, uint32_t slot) 880 { 881 struct pci_xhci_dev_emu *dev; 882 struct xhci_dev_ctx *dev_ctx; 883 struct xhci_endp_ctx *ep_ctx; 884 uint32_t cmderr; 885 int i; 886 887 cmderr = XHCI_TRB_ERROR_NO_SLOTS; 888 if (sc->portregs == NULL) 889 goto done; 890 891 DPRINTF(("pci_xhci reset device slot %u", slot)); 892 893 dev = XHCI_SLOTDEV_PTR(sc, slot); 894 if (!dev || dev->dev_slotstate == XHCI_ST_DISABLED) 895 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 896 else { 897 dev->dev_slotstate = XHCI_ST_DEFAULT; 898 899 dev->hci.hci_address = 0; 900 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 901 902 /* slot state */ 903 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE( 904 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_DEFAULT, 905 0x1F, 27); 906 907 /* number of contexts */ 908 dev_ctx->ctx_slot.dwSctx0 = FIELD_REPLACE( 909 dev_ctx->ctx_slot.dwSctx0, 1, 0x1F, 27); 910 911 /* reset all eps other than ep-0 */ 912 for (i = 2; i <= 31; i++) { 913 ep_ctx = &dev_ctx->ctx_ep[i]; 914 ep_ctx->dwEpCtx0 = FIELD_REPLACE( ep_ctx->dwEpCtx0, 915 XHCI_ST_EPCTX_DISABLED, 0x7, 0); 916 } 917 918 cmderr = XHCI_TRB_ERROR_SUCCESS; 919 } 920 921 pci_xhci_reset_slot(sc, slot); 922 923 done: 924 return (cmderr); 925 } 926 927 static uint32_t 928 pci_xhci_cmd_address_device(struct pci_xhci_softc *sc, uint32_t slot, 929 struct xhci_trb *trb) 930 { 931 struct pci_xhci_dev_emu *dev; 932 struct xhci_input_dev_ctx *input_ctx; 933 struct xhci_slot_ctx *islot_ctx; 934 struct xhci_dev_ctx *dev_ctx; 935 struct xhci_endp_ctx *ep0_ctx; 936 uint32_t cmderr; 937 938 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL); 939 islot_ctx = &input_ctx->ctx_slot; 940 ep0_ctx = &input_ctx->ctx_ep[1]; 941 942 cmderr = XHCI_TRB_ERROR_SUCCESS; 943 944 DPRINTF(("pci_xhci: address device, input ctl: D 0x%08x A 0x%08x,", 945 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1)); 946 DPRINTF((" slot %08x %08x %08x %08x", 947 islot_ctx->dwSctx0, islot_ctx->dwSctx1, 948 islot_ctx->dwSctx2, islot_ctx->dwSctx3)); 949 DPRINTF((" ep0 %08x %08x %016lx %08x", 950 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, 951 ep0_ctx->dwEpCtx4)); 952 953 /* when setting address: drop-ctx=0, add-ctx=slot+ep0 */ 954 if ((input_ctx->ctx_input.dwInCtx0 != 0) || 955 (input_ctx->ctx_input.dwInCtx1 & 0x03) != 0x03) { 956 DPRINTF(("pci_xhci: address device, input ctl invalid")); 957 cmderr = XHCI_TRB_ERROR_TRB; 958 goto done; 959 } 960 961 /* assign address to slot */ 962 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 963 964 DPRINTF(("pci_xhci: address device, dev ctx")); 965 DPRINTF((" slot %08x %08x %08x %08x", 966 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 967 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 968 969 dev = XHCI_SLOTDEV_PTR(sc, slot); 970 assert(dev != NULL); 971 972 dev->hci.hci_address = slot; 973 dev->dev_ctx = dev_ctx; 974 975 if (dev->dev_ue->ue_reset == NULL || 976 dev->dev_ue->ue_reset(dev->dev_sc) < 0) { 977 cmderr = XHCI_TRB_ERROR_ENDP_NOT_ON; 978 goto done; 979 } 980 981 memcpy(&dev_ctx->ctx_slot, islot_ctx, sizeof(struct xhci_slot_ctx)); 982 983 dev_ctx->ctx_slot.dwSctx3 = 984 XHCI_SCTX_3_SLOT_STATE_SET(XHCI_ST_SLCTX_ADDRESSED) | 985 XHCI_SCTX_3_DEV_ADDR_SET(slot); 986 987 memcpy(&dev_ctx->ctx_ep[1], ep0_ctx, sizeof(struct xhci_endp_ctx)); 988 ep0_ctx = &dev_ctx->ctx_ep[1]; 989 ep0_ctx->dwEpCtx0 = (ep0_ctx->dwEpCtx0 & ~0x7) | 990 XHCI_EPCTX_0_EPSTATE_SET(XHCI_ST_EPCTX_RUNNING); 991 992 pci_xhci_init_ep(dev, 1); 993 994 dev->dev_slotstate = XHCI_ST_ADDRESSED; 995 996 DPRINTF(("pci_xhci: address device, output ctx")); 997 DPRINTF((" slot %08x %08x %08x %08x", 998 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 999 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 1000 DPRINTF((" ep0 %08x %08x %016lx %08x", 1001 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, 1002 ep0_ctx->dwEpCtx4)); 1003 1004 done: 1005 return (cmderr); 1006 } 1007 1008 static uint32_t 1009 pci_xhci_cmd_config_ep(struct pci_xhci_softc *sc, uint32_t slot, 1010 struct xhci_trb *trb) 1011 { 1012 struct xhci_input_dev_ctx *input_ctx; 1013 struct pci_xhci_dev_emu *dev; 1014 struct xhci_dev_ctx *dev_ctx; 1015 struct xhci_endp_ctx *ep_ctx, *iep_ctx; 1016 uint32_t cmderr; 1017 int i; 1018 1019 cmderr = XHCI_TRB_ERROR_SUCCESS; 1020 1021 DPRINTF(("pci_xhci config_ep slot %u", slot)); 1022 1023 dev = XHCI_SLOTDEV_PTR(sc, slot); 1024 assert(dev != NULL); 1025 1026 if ((trb->dwTrb3 & XHCI_TRB_3_DCEP_BIT) != 0) { 1027 DPRINTF(("pci_xhci config_ep - deconfigure ep slot %u", 1028 slot)); 1029 if (dev->dev_ue->ue_stop != NULL) 1030 dev->dev_ue->ue_stop(dev->dev_sc); 1031 1032 dev->dev_slotstate = XHCI_ST_ADDRESSED; 1033 1034 dev->hci.hci_address = 0; 1035 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 1036 1037 /* number of contexts */ 1038 dev_ctx->ctx_slot.dwSctx0 = FIELD_REPLACE( 1039 dev_ctx->ctx_slot.dwSctx0, 1, 0x1F, 27); 1040 1041 /* slot state */ 1042 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE( 1043 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_ADDRESSED, 1044 0x1F, 27); 1045 1046 /* disable endpoints */ 1047 for (i = 2; i < 32; i++) 1048 pci_xhci_disable_ep(dev, i); 1049 1050 cmderr = XHCI_TRB_ERROR_SUCCESS; 1051 1052 goto done; 1053 } 1054 1055 if (dev->dev_slotstate < XHCI_ST_ADDRESSED) { 1056 DPRINTF(("pci_xhci: config_ep slotstate x%x != addressed", 1057 dev->dev_slotstate)); 1058 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 1059 goto done; 1060 } 1061 1062 /* In addressed/configured state; 1063 * for each drop endpoint ctx flag: 1064 * ep->state = DISABLED 1065 * for each add endpoint ctx flag: 1066 * cp(ep-in, ep-out) 1067 * ep->state = RUNNING 1068 * for each drop+add endpoint flag: 1069 * reset ep resources 1070 * cp(ep-in, ep-out) 1071 * ep->state = RUNNING 1072 * if input->DisabledCtx[2-31] < 30: (at least 1 ep not disabled) 1073 * slot->state = configured 1074 */ 1075 1076 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL); 1077 dev_ctx = dev->dev_ctx; 1078 DPRINTF(("pci_xhci: config_ep inputctx: D:x%08x A:x%08x 7:x%08x", 1079 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1, 1080 input_ctx->ctx_input.dwInCtx7)); 1081 1082 for (i = 2; i <= 31; i++) { 1083 ep_ctx = &dev_ctx->ctx_ep[i]; 1084 1085 if (input_ctx->ctx_input.dwInCtx0 & 1086 XHCI_INCTX_0_DROP_MASK(i)) { 1087 DPRINTF((" config ep - dropping ep %d", i)); 1088 pci_xhci_disable_ep(dev, i); 1089 } 1090 1091 if (input_ctx->ctx_input.dwInCtx1 & 1092 XHCI_INCTX_1_ADD_MASK(i)) { 1093 iep_ctx = &input_ctx->ctx_ep[i]; 1094 1095 DPRINTF((" enable ep[%d] %08x %08x %016lx %08x", 1096 i, iep_ctx->dwEpCtx0, iep_ctx->dwEpCtx1, 1097 iep_ctx->qwEpCtx2, iep_ctx->dwEpCtx4)); 1098 1099 memcpy(ep_ctx, iep_ctx, sizeof(struct xhci_endp_ctx)); 1100 1101 pci_xhci_init_ep(dev, i); 1102 1103 /* ep state */ 1104 ep_ctx->dwEpCtx0 = FIELD_REPLACE( 1105 ep_ctx->dwEpCtx0, XHCI_ST_EPCTX_RUNNING, 0x7, 0); 1106 } 1107 } 1108 1109 /* slot state to configured */ 1110 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE( 1111 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_CONFIGURED, 0x1F, 27); 1112 dev_ctx->ctx_slot.dwSctx0 = FIELD_COPY( 1113 dev_ctx->ctx_slot.dwSctx0, input_ctx->ctx_slot.dwSctx0, 0x1F, 27); 1114 dev->dev_slotstate = XHCI_ST_CONFIGURED; 1115 1116 DPRINTF(("EP configured; slot %u [0]=0x%08x [1]=0x%08x [2]=0x%08x " 1117 "[3]=0x%08x", 1118 slot, dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 1119 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 1120 1121 done: 1122 return (cmderr); 1123 } 1124 1125 static uint32_t 1126 pci_xhci_cmd_reset_ep(struct pci_xhci_softc *sc, uint32_t slot, 1127 struct xhci_trb *trb) 1128 { 1129 struct pci_xhci_dev_emu *dev; 1130 struct pci_xhci_dev_ep *devep; 1131 struct xhci_dev_ctx *dev_ctx; 1132 struct xhci_endp_ctx *ep_ctx; 1133 uint32_t cmderr, epid; 1134 uint32_t type; 1135 1136 epid = XHCI_TRB_3_EP_GET(trb->dwTrb3); 1137 1138 DPRINTF(("pci_xhci: reset ep %u: slot %u", epid, slot)); 1139 1140 cmderr = XHCI_TRB_ERROR_SUCCESS; 1141 1142 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3); 1143 1144 dev = XHCI_SLOTDEV_PTR(sc, slot); 1145 assert(dev != NULL); 1146 1147 if (type == XHCI_TRB_TYPE_STOP_EP && 1148 (trb->dwTrb3 & XHCI_TRB_3_SUSP_EP_BIT) != 0) { 1149 /* XXX suspend endpoint for 10ms */ 1150 } 1151 1152 if (epid < 1 || epid > 31) { 1153 DPRINTF(("pci_xhci: reset ep: invalid epid %u", epid)); 1154 cmderr = XHCI_TRB_ERROR_TRB; 1155 goto done; 1156 } 1157 1158 devep = &dev->eps[epid]; 1159 if (devep->ep_xfer != NULL) 1160 USB_DATA_XFER_RESET(devep->ep_xfer); 1161 1162 dev_ctx = dev->dev_ctx; 1163 assert(dev_ctx != NULL); 1164 1165 ep_ctx = &dev_ctx->ctx_ep[epid]; 1166 1167 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_STOPPED; 1168 1169 if (devep->ep_MaxPStreams == 0) 1170 ep_ctx->qwEpCtx2 = devep->ep_ringaddr | devep->ep_ccs; 1171 1172 DPRINTF(("pci_xhci: reset ep[%u] %08x %08x %016lx %08x", 1173 epid, ep_ctx->dwEpCtx0, ep_ctx->dwEpCtx1, ep_ctx->qwEpCtx2, 1174 ep_ctx->dwEpCtx4)); 1175 1176 if (type == XHCI_TRB_TYPE_RESET_EP && 1177 (dev->dev_ue->ue_reset == NULL || 1178 dev->dev_ue->ue_reset(dev->dev_sc) < 0)) { 1179 cmderr = XHCI_TRB_ERROR_ENDP_NOT_ON; 1180 goto done; 1181 } 1182 1183 done: 1184 return (cmderr); 1185 } 1186 1187 1188 static uint32_t 1189 pci_xhci_find_stream(struct pci_xhci_softc *sc, struct xhci_endp_ctx *ep, 1190 struct pci_xhci_dev_ep *devep, uint32_t streamid) 1191 { 1192 struct xhci_stream_ctx *sctx; 1193 1194 if (devep->ep_MaxPStreams == 0) 1195 return (XHCI_TRB_ERROR_TRB); 1196 1197 if (devep->ep_MaxPStreams > XHCI_STREAMS_MAX) 1198 return (XHCI_TRB_ERROR_INVALID_SID); 1199 1200 if (XHCI_EPCTX_0_LSA_GET(ep->dwEpCtx0) == 0) { 1201 DPRINTF(("pci_xhci: find_stream; LSA bit not set")); 1202 return (XHCI_TRB_ERROR_INVALID_SID); 1203 } 1204 1205 /* only support primary stream */ 1206 if (streamid > devep->ep_MaxPStreams) 1207 return (XHCI_TRB_ERROR_STREAM_TYPE); 1208 1209 sctx = (struct xhci_stream_ctx *)XHCI_GADDR(sc, ep->qwEpCtx2 & ~0xFUL) + 1210 streamid; 1211 if (!XHCI_SCTX_0_SCT_GET(sctx->qwSctx0)) 1212 return (XHCI_TRB_ERROR_STREAM_TYPE); 1213 1214 return (XHCI_TRB_ERROR_SUCCESS); 1215 } 1216 1217 1218 static uint32_t 1219 pci_xhci_cmd_set_tr(struct pci_xhci_softc *sc, uint32_t slot, 1220 struct xhci_trb *trb) 1221 { 1222 struct pci_xhci_dev_emu *dev; 1223 struct pci_xhci_dev_ep *devep; 1224 struct xhci_dev_ctx *dev_ctx; 1225 struct xhci_endp_ctx *ep_ctx; 1226 uint32_t cmderr, epid; 1227 uint32_t streamid; 1228 1229 cmderr = XHCI_TRB_ERROR_SUCCESS; 1230 1231 dev = XHCI_SLOTDEV_PTR(sc, slot); 1232 assert(dev != NULL); 1233 1234 DPRINTF(("pci_xhci set_tr: new-tr x%016lx, SCT %u DCS %u", 1235 (trb->qwTrb0 & ~0xF), (uint32_t)((trb->qwTrb0 >> 1) & 0x7), 1236 (uint32_t)(trb->qwTrb0 & 0x1))); 1237 DPRINTF((" stream-id %u, slot %u, epid %u, C %u", 1238 (trb->dwTrb2 >> 16) & 0xFFFF, 1239 XHCI_TRB_3_SLOT_GET(trb->dwTrb3), 1240 XHCI_TRB_3_EP_GET(trb->dwTrb3), trb->dwTrb3 & 0x1)); 1241 1242 epid = XHCI_TRB_3_EP_GET(trb->dwTrb3); 1243 if (epid < 1 || epid > 31) { 1244 DPRINTF(("pci_xhci: set_tr_deq: invalid epid %u", epid)); 1245 cmderr = XHCI_TRB_ERROR_TRB; 1246 goto done; 1247 } 1248 1249 dev_ctx = dev->dev_ctx; 1250 assert(dev_ctx != NULL); 1251 1252 ep_ctx = &dev_ctx->ctx_ep[epid]; 1253 devep = &dev->eps[epid]; 1254 1255 switch (XHCI_EPCTX_0_EPSTATE_GET(ep_ctx->dwEpCtx0)) { 1256 case XHCI_ST_EPCTX_STOPPED: 1257 case XHCI_ST_EPCTX_ERROR: 1258 break; 1259 default: 1260 DPRINTF(("pci_xhci cmd set_tr invalid state %x", 1261 XHCI_EPCTX_0_EPSTATE_GET(ep_ctx->dwEpCtx0))); 1262 cmderr = XHCI_TRB_ERROR_CONTEXT_STATE; 1263 goto done; 1264 } 1265 1266 streamid = XHCI_TRB_2_STREAM_GET(trb->dwTrb2); 1267 if (devep->ep_MaxPStreams > 0) { 1268 cmderr = pci_xhci_find_stream(sc, ep_ctx, devep, streamid); 1269 if (cmderr == XHCI_TRB_ERROR_SUCCESS) { 1270 assert(devep->ep_sctx != NULL); 1271 1272 devep->ep_sctx[streamid].qwSctx0 = trb->qwTrb0; 1273 devep->ep_sctx_trbs[streamid].ringaddr = 1274 trb->qwTrb0 & ~0xF; 1275 devep->ep_sctx_trbs[streamid].ccs = 1276 XHCI_EPCTX_2_DCS_GET(trb->qwTrb0); 1277 } 1278 } else { 1279 if (streamid != 0) { 1280 DPRINTF(("pci_xhci cmd set_tr streamid %x != 0", 1281 streamid)); 1282 } 1283 ep_ctx->qwEpCtx2 = trb->qwTrb0 & ~0xFUL; 1284 devep->ep_ringaddr = ep_ctx->qwEpCtx2 & ~0xFUL; 1285 devep->ep_ccs = trb->qwTrb0 & 0x1; 1286 devep->ep_tr = XHCI_GADDR(sc, devep->ep_ringaddr); 1287 1288 DPRINTF(("pci_xhci set_tr first TRB:")); 1289 pci_xhci_dump_trb(devep->ep_tr); 1290 } 1291 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_STOPPED; 1292 1293 done: 1294 return (cmderr); 1295 } 1296 1297 static uint32_t 1298 pci_xhci_cmd_eval_ctx(struct pci_xhci_softc *sc, uint32_t slot, 1299 struct xhci_trb *trb) 1300 { 1301 struct xhci_input_dev_ctx *input_ctx; 1302 struct xhci_slot_ctx *islot_ctx; 1303 struct xhci_dev_ctx *dev_ctx; 1304 struct xhci_endp_ctx *ep0_ctx; 1305 uint32_t cmderr; 1306 1307 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL); 1308 islot_ctx = &input_ctx->ctx_slot; 1309 ep0_ctx = &input_ctx->ctx_ep[1]; 1310 1311 cmderr = XHCI_TRB_ERROR_SUCCESS; 1312 DPRINTF(("pci_xhci: eval ctx, input ctl: D 0x%08x A 0x%08x,", 1313 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1)); 1314 DPRINTF((" slot %08x %08x %08x %08x", 1315 islot_ctx->dwSctx0, islot_ctx->dwSctx1, 1316 islot_ctx->dwSctx2, islot_ctx->dwSctx3)); 1317 DPRINTF((" ep0 %08x %08x %016lx %08x", 1318 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, 1319 ep0_ctx->dwEpCtx4)); 1320 1321 /* this command expects drop-ctx=0 & add-ctx=slot+ep0 */ 1322 if ((input_ctx->ctx_input.dwInCtx0 != 0) || 1323 (input_ctx->ctx_input.dwInCtx1 & 0x03) == 0) { 1324 DPRINTF(("pci_xhci: eval ctx, input ctl invalid")); 1325 cmderr = XHCI_TRB_ERROR_TRB; 1326 goto done; 1327 } 1328 1329 /* assign address to slot; in this emulation, slot_id = address */ 1330 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 1331 1332 DPRINTF(("pci_xhci: eval ctx, dev ctx")); 1333 DPRINTF((" slot %08x %08x %08x %08x", 1334 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 1335 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 1336 1337 if (input_ctx->ctx_input.dwInCtx1 & 0x01) { /* slot ctx */ 1338 /* set max exit latency */ 1339 dev_ctx->ctx_slot.dwSctx1 = FIELD_COPY( 1340 dev_ctx->ctx_slot.dwSctx1, input_ctx->ctx_slot.dwSctx1, 1341 0xFFFF, 0); 1342 1343 /* set interrupter target */ 1344 dev_ctx->ctx_slot.dwSctx2 = FIELD_COPY( 1345 dev_ctx->ctx_slot.dwSctx2, input_ctx->ctx_slot.dwSctx2, 1346 0x3FF, 22); 1347 } 1348 if (input_ctx->ctx_input.dwInCtx1 & 0x02) { /* control ctx */ 1349 /* set max packet size */ 1350 dev_ctx->ctx_ep[1].dwEpCtx1 = FIELD_COPY( 1351 dev_ctx->ctx_ep[1].dwEpCtx1, ep0_ctx->dwEpCtx1, 1352 0xFFFF, 16); 1353 1354 ep0_ctx = &dev_ctx->ctx_ep[1]; 1355 } 1356 1357 DPRINTF(("pci_xhci: eval ctx, output ctx")); 1358 DPRINTF((" slot %08x %08x %08x %08x", 1359 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 1360 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 1361 DPRINTF((" ep0 %08x %08x %016lx %08x", 1362 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, 1363 ep0_ctx->dwEpCtx4)); 1364 1365 done: 1366 return (cmderr); 1367 } 1368 1369 static int 1370 pci_xhci_complete_commands(struct pci_xhci_softc *sc) 1371 { 1372 struct xhci_trb evtrb; 1373 struct xhci_trb *trb; 1374 uint64_t crcr; 1375 uint32_t ccs; /* cycle state (XHCI 4.9.2) */ 1376 uint32_t type; 1377 uint32_t slot; 1378 uint32_t cmderr; 1379 int error; 1380 1381 error = 0; 1382 sc->opregs.crcr |= XHCI_CRCR_LO_CRR; 1383 1384 trb = sc->opregs.cr_p; 1385 ccs = sc->opregs.crcr & XHCI_CRCR_LO_RCS; 1386 crcr = sc->opregs.crcr & ~0xF; 1387 1388 while (1) { 1389 sc->opregs.cr_p = trb; 1390 1391 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3); 1392 1393 if ((trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT) != 1394 (ccs & XHCI_TRB_3_CYCLE_BIT)) 1395 break; 1396 1397 DPRINTF(("pci_xhci: cmd type 0x%x, Trb0 x%016lx dwTrb2 x%08x" 1398 " dwTrb3 x%08x, TRB_CYCLE %u/ccs %u", 1399 type, trb->qwTrb0, trb->dwTrb2, trb->dwTrb3, 1400 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT, ccs)); 1401 1402 cmderr = XHCI_TRB_ERROR_SUCCESS; 1403 evtrb.dwTrb2 = 0; 1404 evtrb.dwTrb3 = (ccs & XHCI_TRB_3_CYCLE_BIT) | 1405 XHCI_TRB_3_TYPE_SET(XHCI_TRB_EVENT_CMD_COMPLETE); 1406 slot = 0; 1407 1408 switch (type) { 1409 case XHCI_TRB_TYPE_LINK: /* 0x06 */ 1410 if (trb->dwTrb3 & XHCI_TRB_3_TC_BIT) 1411 ccs ^= XHCI_CRCR_LO_RCS; 1412 break; 1413 1414 case XHCI_TRB_TYPE_ENABLE_SLOT: /* 0x09 */ 1415 cmderr = pci_xhci_cmd_enable_slot(sc, &slot); 1416 break; 1417 1418 case XHCI_TRB_TYPE_DISABLE_SLOT: /* 0x0A */ 1419 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1420 cmderr = pci_xhci_cmd_disable_slot(sc, slot); 1421 break; 1422 1423 case XHCI_TRB_TYPE_ADDRESS_DEVICE: /* 0x0B */ 1424 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1425 cmderr = pci_xhci_cmd_address_device(sc, slot, trb); 1426 break; 1427 1428 case XHCI_TRB_TYPE_CONFIGURE_EP: /* 0x0C */ 1429 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1430 cmderr = pci_xhci_cmd_config_ep(sc, slot, trb); 1431 break; 1432 1433 case XHCI_TRB_TYPE_EVALUATE_CTX: /* 0x0D */ 1434 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1435 cmderr = pci_xhci_cmd_eval_ctx(sc, slot, trb); 1436 break; 1437 1438 case XHCI_TRB_TYPE_RESET_EP: /* 0x0E */ 1439 DPRINTF(("Reset Endpoint on slot %d", slot)); 1440 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1441 cmderr = pci_xhci_cmd_reset_ep(sc, slot, trb); 1442 break; 1443 1444 case XHCI_TRB_TYPE_STOP_EP: /* 0x0F */ 1445 DPRINTF(("Stop Endpoint on slot %d", slot)); 1446 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1447 cmderr = pci_xhci_cmd_reset_ep(sc, slot, trb); 1448 break; 1449 1450 case XHCI_TRB_TYPE_SET_TR_DEQUEUE: /* 0x10 */ 1451 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1452 cmderr = pci_xhci_cmd_set_tr(sc, slot, trb); 1453 break; 1454 1455 case XHCI_TRB_TYPE_RESET_DEVICE: /* 0x11 */ 1456 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1457 cmderr = pci_xhci_cmd_reset_device(sc, slot); 1458 break; 1459 1460 case XHCI_TRB_TYPE_FORCE_EVENT: /* 0x12 */ 1461 /* TODO: */ 1462 break; 1463 1464 case XHCI_TRB_TYPE_NEGOTIATE_BW: /* 0x13 */ 1465 break; 1466 1467 case XHCI_TRB_TYPE_SET_LATENCY_TOL: /* 0x14 */ 1468 break; 1469 1470 case XHCI_TRB_TYPE_GET_PORT_BW: /* 0x15 */ 1471 break; 1472 1473 case XHCI_TRB_TYPE_FORCE_HEADER: /* 0x16 */ 1474 break; 1475 1476 case XHCI_TRB_TYPE_NOOP_CMD: /* 0x17 */ 1477 break; 1478 1479 default: 1480 DPRINTF(("pci_xhci: unsupported cmd %x", type)); 1481 break; 1482 } 1483 1484 if (type != XHCI_TRB_TYPE_LINK) { 1485 /* 1486 * insert command completion event and assert intr 1487 */ 1488 evtrb.qwTrb0 = crcr; 1489 evtrb.dwTrb2 |= XHCI_TRB_2_ERROR_SET(cmderr); 1490 evtrb.dwTrb3 |= XHCI_TRB_3_SLOT_SET(slot); 1491 DPRINTF(("pci_xhci: command 0x%x result: 0x%x", 1492 type, cmderr)); 1493 pci_xhci_insert_event(sc, &evtrb, 1); 1494 } 1495 1496 trb = pci_xhci_trb_next(sc, trb, &crcr); 1497 } 1498 1499 sc->opregs.crcr = crcr | (sc->opregs.crcr & XHCI_CRCR_LO_CA) | ccs; 1500 sc->opregs.crcr &= ~XHCI_CRCR_LO_CRR; 1501 return (error); 1502 } 1503 1504 static void 1505 pci_xhci_dump_trb(struct xhci_trb *trb) 1506 { 1507 static const char *trbtypes[] = { 1508 "RESERVED", 1509 "NORMAL", 1510 "SETUP_STAGE", 1511 "DATA_STAGE", 1512 "STATUS_STAGE", 1513 "ISOCH", 1514 "LINK", 1515 "EVENT_DATA", 1516 "NOOP", 1517 "ENABLE_SLOT", 1518 "DISABLE_SLOT", 1519 "ADDRESS_DEVICE", 1520 "CONFIGURE_EP", 1521 "EVALUATE_CTX", 1522 "RESET_EP", 1523 "STOP_EP", 1524 "SET_TR_DEQUEUE", 1525 "RESET_DEVICE", 1526 "FORCE_EVENT", 1527 "NEGOTIATE_BW", 1528 "SET_LATENCY_TOL", 1529 "GET_PORT_BW", 1530 "FORCE_HEADER", 1531 "NOOP_CMD" 1532 }; 1533 uint32_t type; 1534 1535 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3); 1536 DPRINTF(("pci_xhci: trb[@%p] type x%02x %s 0:x%016lx 2:x%08x 3:x%08x", 1537 trb, type, 1538 type <= XHCI_TRB_TYPE_NOOP_CMD ? trbtypes[type] : "INVALID", 1539 trb->qwTrb0, trb->dwTrb2, trb->dwTrb3)); 1540 } 1541 1542 static int 1543 pci_xhci_xfer_complete(struct pci_xhci_softc *sc, struct usb_data_xfer *xfer, 1544 uint32_t slot, uint32_t epid, int *do_intr) 1545 { 1546 struct pci_xhci_dev_emu *dev; 1547 struct pci_xhci_dev_ep *devep; 1548 struct xhci_dev_ctx *dev_ctx; 1549 struct xhci_endp_ctx *ep_ctx; 1550 struct xhci_trb *trb; 1551 struct xhci_trb evtrb; 1552 uint32_t trbflags; 1553 uint32_t edtla; 1554 int i, err; 1555 1556 dev = XHCI_SLOTDEV_PTR(sc, slot); 1557 devep = &dev->eps[epid]; 1558 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 1559 1560 assert(dev_ctx != NULL); 1561 1562 ep_ctx = &dev_ctx->ctx_ep[epid]; 1563 1564 err = XHCI_TRB_ERROR_SUCCESS; 1565 *do_intr = 0; 1566 edtla = 0; 1567 1568 /* go through list of TRBs and insert event(s) */ 1569 for (i = xfer->head; xfer->ndata > 0; ) { 1570 evtrb.qwTrb0 = (uint64_t)xfer->data[i].hci_data; 1571 trb = XHCI_GADDR(sc, evtrb.qwTrb0); 1572 trbflags = trb->dwTrb3; 1573 1574 DPRINTF(("pci_xhci: xfer[%d] done?%u:%d trb %x %016lx %x " 1575 "(err %d) IOC?%d", 1576 i, xfer->data[i].processed, xfer->data[i].blen, 1577 XHCI_TRB_3_TYPE_GET(trbflags), evtrb.qwTrb0, 1578 trbflags, err, 1579 trb->dwTrb3 & XHCI_TRB_3_IOC_BIT ? 1 : 0)); 1580 1581 if (!xfer->data[i].processed) { 1582 xfer->head = i; 1583 break; 1584 } 1585 1586 xfer->ndata--; 1587 edtla += xfer->data[i].bdone; 1588 1589 trb->dwTrb3 = (trb->dwTrb3 & ~0x1) | (xfer->data[i].ccs); 1590 1591 pci_xhci_update_ep_ring(sc, dev, devep, ep_ctx, 1592 xfer->data[i].streamid, xfer->data[i].trbnext, 1593 xfer->data[i].ccs); 1594 1595 /* Only interrupt if IOC or short packet */ 1596 if (!(trb->dwTrb3 & XHCI_TRB_3_IOC_BIT) && 1597 !((err == XHCI_TRB_ERROR_SHORT_PKT) && 1598 (trb->dwTrb3 & XHCI_TRB_3_ISP_BIT))) { 1599 1600 i = (i + 1) % USB_MAX_XFER_BLOCKS; 1601 continue; 1602 } 1603 1604 evtrb.dwTrb2 = XHCI_TRB_2_ERROR_SET(err) | 1605 XHCI_TRB_2_REM_SET(xfer->data[i].blen); 1606 1607 evtrb.dwTrb3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_EVENT_TRANSFER) | 1608 XHCI_TRB_3_SLOT_SET(slot) | XHCI_TRB_3_EP_SET(epid); 1609 1610 if (XHCI_TRB_3_TYPE_GET(trbflags) == XHCI_TRB_TYPE_EVENT_DATA) { 1611 DPRINTF(("pci_xhci EVENT_DATA edtla %u", edtla)); 1612 evtrb.qwTrb0 = trb->qwTrb0; 1613 evtrb.dwTrb2 = (edtla & 0xFFFFF) | 1614 XHCI_TRB_2_ERROR_SET(err); 1615 evtrb.dwTrb3 |= XHCI_TRB_3_ED_BIT; 1616 edtla = 0; 1617 } 1618 1619 *do_intr = 1; 1620 1621 err = pci_xhci_insert_event(sc, &evtrb, 0); 1622 if (err != XHCI_TRB_ERROR_SUCCESS) { 1623 break; 1624 } 1625 1626 i = (i + 1) % USB_MAX_XFER_BLOCKS; 1627 } 1628 1629 return (err); 1630 } 1631 1632 static void 1633 pci_xhci_update_ep_ring(struct pci_xhci_softc *sc, 1634 struct pci_xhci_dev_emu *dev __unused, struct pci_xhci_dev_ep *devep, 1635 struct xhci_endp_ctx *ep_ctx, uint32_t streamid, uint64_t ringaddr, int ccs) 1636 { 1637 1638 if (devep->ep_MaxPStreams != 0) { 1639 devep->ep_sctx[streamid].qwSctx0 = (ringaddr & ~0xFUL) | 1640 (ccs & 0x1); 1641 1642 devep->ep_sctx_trbs[streamid].ringaddr = ringaddr & ~0xFUL; 1643 devep->ep_sctx_trbs[streamid].ccs = ccs & 0x1; 1644 ep_ctx->qwEpCtx2 = (ep_ctx->qwEpCtx2 & ~0x1) | (ccs & 0x1); 1645 1646 DPRINTF(("xhci update ep-ring stream %d, addr %lx", 1647 streamid, devep->ep_sctx[streamid].qwSctx0)); 1648 } else { 1649 devep->ep_ringaddr = ringaddr & ~0xFUL; 1650 devep->ep_ccs = ccs & 0x1; 1651 devep->ep_tr = XHCI_GADDR(sc, ringaddr & ~0xFUL); 1652 ep_ctx->qwEpCtx2 = (ringaddr & ~0xFUL) | (ccs & 0x1); 1653 1654 DPRINTF(("xhci update ep-ring, addr %lx", 1655 (devep->ep_ringaddr | devep->ep_ccs))); 1656 } 1657 } 1658 1659 /* 1660 * Outstanding transfer still in progress (device NAK'd earlier) so retry 1661 * the transfer again to see if it succeeds. 1662 */ 1663 static int 1664 pci_xhci_try_usb_xfer(struct pci_xhci_softc *sc, 1665 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep, 1666 struct xhci_endp_ctx *ep_ctx, uint32_t slot, uint32_t epid) 1667 { 1668 struct usb_data_xfer *xfer; 1669 int err; 1670 int do_intr; 1671 1672 ep_ctx->dwEpCtx0 = FIELD_REPLACE( 1673 ep_ctx->dwEpCtx0, XHCI_ST_EPCTX_RUNNING, 0x7, 0); 1674 1675 err = 0; 1676 do_intr = 0; 1677 1678 xfer = devep->ep_xfer; 1679 USB_DATA_XFER_LOCK(xfer); 1680 1681 /* outstanding requests queued up */ 1682 if (dev->dev_ue->ue_data != NULL) { 1683 err = dev->dev_ue->ue_data(dev->dev_sc, xfer, 1684 epid & 0x1 ? USB_XFER_IN : USB_XFER_OUT, epid/2); 1685 if (err == USB_ERR_CANCELLED) { 1686 if (USB_DATA_GET_ERRCODE(&xfer->data[xfer->head]) == 1687 USB_NAK) 1688 err = XHCI_TRB_ERROR_SUCCESS; 1689 } else { 1690 err = pci_xhci_xfer_complete(sc, xfer, slot, epid, 1691 &do_intr); 1692 if (err == XHCI_TRB_ERROR_SUCCESS && do_intr) { 1693 pci_xhci_assert_interrupt(sc); 1694 } 1695 1696 1697 /* XXX should not do it if error? */ 1698 USB_DATA_XFER_RESET(xfer); 1699 } 1700 } 1701 1702 USB_DATA_XFER_UNLOCK(xfer); 1703 1704 1705 return (err); 1706 } 1707 1708 1709 static int 1710 pci_xhci_handle_transfer(struct pci_xhci_softc *sc, 1711 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep, 1712 struct xhci_endp_ctx *ep_ctx, struct xhci_trb *trb, uint32_t slot, 1713 uint32_t epid, uint64_t addr, uint32_t ccs, uint32_t streamid) 1714 { 1715 struct xhci_trb *setup_trb; 1716 struct usb_data_xfer *xfer; 1717 struct usb_data_xfer_block *xfer_block; 1718 uint64_t val; 1719 uint32_t trbflags; 1720 int do_intr, err; 1721 int do_retry; 1722 1723 ep_ctx->dwEpCtx0 = FIELD_REPLACE(ep_ctx->dwEpCtx0, 1724 XHCI_ST_EPCTX_RUNNING, 0x7, 0); 1725 1726 xfer = devep->ep_xfer; 1727 USB_DATA_XFER_LOCK(xfer); 1728 1729 DPRINTF(("pci_xhci handle_transfer slot %u", slot)); 1730 1731 retry: 1732 err = XHCI_TRB_ERROR_INVALID; 1733 do_retry = 0; 1734 do_intr = 0; 1735 setup_trb = NULL; 1736 1737 while (1) { 1738 pci_xhci_dump_trb(trb); 1739 1740 trbflags = trb->dwTrb3; 1741 1742 if (XHCI_TRB_3_TYPE_GET(trbflags) != XHCI_TRB_TYPE_LINK && 1743 (trbflags & XHCI_TRB_3_CYCLE_BIT) != 1744 (ccs & XHCI_TRB_3_CYCLE_BIT)) { 1745 DPRINTF(("Cycle-bit changed trbflags %x, ccs %x", 1746 trbflags & XHCI_TRB_3_CYCLE_BIT, ccs)); 1747 break; 1748 } 1749 1750 xfer_block = NULL; 1751 1752 switch (XHCI_TRB_3_TYPE_GET(trbflags)) { 1753 case XHCI_TRB_TYPE_LINK: 1754 if (trb->dwTrb3 & XHCI_TRB_3_TC_BIT) 1755 ccs ^= 0x1; 1756 1757 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1758 (void *)addr, ccs); 1759 xfer_block->processed = 1; 1760 break; 1761 1762 case XHCI_TRB_TYPE_SETUP_STAGE: 1763 if ((trbflags & XHCI_TRB_3_IDT_BIT) == 0 || 1764 XHCI_TRB_2_BYTES_GET(trb->dwTrb2) != 8) { 1765 DPRINTF(("pci_xhci: invalid setup trb")); 1766 err = XHCI_TRB_ERROR_TRB; 1767 goto errout; 1768 } 1769 setup_trb = trb; 1770 1771 val = trb->qwTrb0; 1772 if (!xfer->ureq) 1773 xfer->ureq = malloc( 1774 sizeof(struct usb_device_request)); 1775 memcpy(xfer->ureq, &val, 1776 sizeof(struct usb_device_request)); 1777 1778 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1779 (void *)addr, ccs); 1780 xfer_block->processed = 1; 1781 break; 1782 1783 case XHCI_TRB_TYPE_NORMAL: 1784 case XHCI_TRB_TYPE_ISOCH: 1785 if (setup_trb != NULL) { 1786 DPRINTF(("pci_xhci: trb not supposed to be in " 1787 "ctl scope")); 1788 err = XHCI_TRB_ERROR_TRB; 1789 goto errout; 1790 } 1791 /* fall through */ 1792 1793 case XHCI_TRB_TYPE_DATA_STAGE: 1794 xfer_block = usb_data_xfer_append(xfer, 1795 (void *)(trbflags & XHCI_TRB_3_IDT_BIT ? 1796 &trb->qwTrb0 : XHCI_GADDR(sc, trb->qwTrb0)), 1797 trb->dwTrb2 & 0x1FFFF, (void *)addr, ccs); 1798 break; 1799 1800 case XHCI_TRB_TYPE_STATUS_STAGE: 1801 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1802 (void *)addr, ccs); 1803 break; 1804 1805 case XHCI_TRB_TYPE_NOOP: 1806 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1807 (void *)addr, ccs); 1808 xfer_block->processed = 1; 1809 break; 1810 1811 case XHCI_TRB_TYPE_EVENT_DATA: 1812 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1813 (void *)addr, ccs); 1814 if ((epid > 1) && (trbflags & XHCI_TRB_3_IOC_BIT)) { 1815 xfer_block->processed = 1; 1816 } 1817 break; 1818 1819 default: 1820 DPRINTF(("pci_xhci: handle xfer unexpected trb type " 1821 "0x%x", 1822 XHCI_TRB_3_TYPE_GET(trbflags))); 1823 err = XHCI_TRB_ERROR_TRB; 1824 goto errout; 1825 } 1826 1827 trb = pci_xhci_trb_next(sc, trb, &addr); 1828 1829 DPRINTF(("pci_xhci: next trb: 0x%lx", (uint64_t)trb)); 1830 1831 if (xfer_block) { 1832 xfer_block->trbnext = addr; 1833 xfer_block->streamid = streamid; 1834 } 1835 1836 if (!setup_trb && !(trbflags & XHCI_TRB_3_CHAIN_BIT) && 1837 XHCI_TRB_3_TYPE_GET(trbflags) != XHCI_TRB_TYPE_LINK) { 1838 break; 1839 } 1840 1841 /* handle current batch that requires interrupt on complete */ 1842 if (trbflags & XHCI_TRB_3_IOC_BIT) { 1843 DPRINTF(("pci_xhci: trb IOC bit set")); 1844 if (epid == 1) 1845 do_retry = 1; 1846 break; 1847 } 1848 } 1849 1850 DPRINTF(("pci_xhci[%d]: xfer->ndata %u", __LINE__, xfer->ndata)); 1851 1852 if (xfer->ndata <= 0) 1853 goto errout; 1854 1855 if (epid == 1) { 1856 int usberr; 1857 1858 if (dev->dev_ue->ue_request != NULL) 1859 usberr = dev->dev_ue->ue_request(dev->dev_sc, xfer); 1860 else 1861 usberr = USB_ERR_NOT_STARTED; 1862 err = USB_TO_XHCI_ERR(usberr); 1863 if (err == XHCI_TRB_ERROR_SUCCESS || 1864 err == XHCI_TRB_ERROR_STALL || 1865 err == XHCI_TRB_ERROR_SHORT_PKT) { 1866 err = pci_xhci_xfer_complete(sc, xfer, slot, epid, 1867 &do_intr); 1868 if (err != XHCI_TRB_ERROR_SUCCESS) 1869 do_retry = 0; 1870 } 1871 1872 } else { 1873 /* handle data transfer */ 1874 pci_xhci_try_usb_xfer(sc, dev, devep, ep_ctx, slot, epid); 1875 err = XHCI_TRB_ERROR_SUCCESS; 1876 } 1877 1878 errout: 1879 if (err == XHCI_TRB_ERROR_EV_RING_FULL) 1880 DPRINTF(("pci_xhci[%d]: event ring full", __LINE__)); 1881 1882 if (!do_retry) 1883 USB_DATA_XFER_UNLOCK(xfer); 1884 1885 if (do_intr) 1886 pci_xhci_assert_interrupt(sc); 1887 1888 if (do_retry) { 1889 USB_DATA_XFER_RESET(xfer); 1890 DPRINTF(("pci_xhci[%d]: retry:continuing with next TRBs", 1891 __LINE__)); 1892 goto retry; 1893 } 1894 1895 if (epid == 1) 1896 USB_DATA_XFER_RESET(xfer); 1897 1898 return (err); 1899 } 1900 1901 static void 1902 pci_xhci_device_doorbell(struct pci_xhci_softc *sc, uint32_t slot, 1903 uint32_t epid, uint32_t streamid) 1904 { 1905 struct pci_xhci_dev_emu *dev; 1906 struct pci_xhci_dev_ep *devep; 1907 struct xhci_dev_ctx *dev_ctx; 1908 struct xhci_endp_ctx *ep_ctx; 1909 struct pci_xhci_trb_ring *sctx_tr; 1910 struct xhci_trb *trb; 1911 uint64_t ringaddr; 1912 uint32_t ccs; 1913 int error; 1914 1915 DPRINTF(("pci_xhci doorbell slot %u epid %u stream %u", 1916 slot, epid, streamid)); 1917 1918 if (slot == 0 || slot > XHCI_MAX_SLOTS) { 1919 DPRINTF(("pci_xhci: invalid doorbell slot %u", slot)); 1920 return; 1921 } 1922 1923 if (epid == 0 || epid >= XHCI_MAX_ENDPOINTS) { 1924 DPRINTF(("pci_xhci: invalid endpoint %u", epid)); 1925 return; 1926 } 1927 1928 dev = XHCI_SLOTDEV_PTR(sc, slot); 1929 devep = &dev->eps[epid]; 1930 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 1931 if (!dev_ctx) { 1932 return; 1933 } 1934 ep_ctx = &dev_ctx->ctx_ep[epid]; 1935 1936 sctx_tr = NULL; 1937 1938 DPRINTF(("pci_xhci: device doorbell ep[%u] %08x %08x %016lx %08x", 1939 epid, ep_ctx->dwEpCtx0, ep_ctx->dwEpCtx1, ep_ctx->qwEpCtx2, 1940 ep_ctx->dwEpCtx4)); 1941 1942 if (ep_ctx->qwEpCtx2 == 0) 1943 return; 1944 1945 /* handle pending transfers */ 1946 if (devep->ep_xfer->ndata > 0) { 1947 pci_xhci_try_usb_xfer(sc, dev, devep, ep_ctx, slot, epid); 1948 return; 1949 } 1950 1951 /* get next trb work item */ 1952 if (devep->ep_MaxPStreams != 0) { 1953 /* 1954 * Stream IDs of 0, 65535 (any stream), and 65534 1955 * (prime) are invalid. 1956 */ 1957 if (streamid == 0 || streamid == 65534 || streamid == 65535) { 1958 DPRINTF(("pci_xhci: invalid stream %u", streamid)); 1959 return; 1960 } 1961 1962 error = pci_xhci_find_stream(sc, ep_ctx, devep, streamid); 1963 if (error != XHCI_TRB_ERROR_SUCCESS) { 1964 DPRINTF(("pci_xhci: invalid stream %u: %d", 1965 streamid, error)); 1966 return; 1967 } 1968 sctx_tr = &devep->ep_sctx_trbs[streamid]; 1969 ringaddr = sctx_tr->ringaddr; 1970 ccs = sctx_tr->ccs; 1971 trb = XHCI_GADDR(sc, sctx_tr->ringaddr & ~0xFUL); 1972 DPRINTF(("doorbell, stream %u, ccs %lx, trb ccs %x", 1973 streamid, ep_ctx->qwEpCtx2 & XHCI_TRB_3_CYCLE_BIT, 1974 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT)); 1975 } else { 1976 if (streamid != 0) { 1977 DPRINTF(("pci_xhci: invalid stream %u", streamid)); 1978 return; 1979 } 1980 ringaddr = devep->ep_ringaddr; 1981 ccs = devep->ep_ccs; 1982 trb = devep->ep_tr; 1983 DPRINTF(("doorbell, ccs %lx, trb ccs %x", 1984 ep_ctx->qwEpCtx2 & XHCI_TRB_3_CYCLE_BIT, 1985 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT)); 1986 } 1987 1988 if (XHCI_TRB_3_TYPE_GET(trb->dwTrb3) == 0) { 1989 DPRINTF(("pci_xhci: ring %lx trb[%lx] EP %u is RESERVED?", 1990 ep_ctx->qwEpCtx2, devep->ep_ringaddr, epid)); 1991 return; 1992 } 1993 1994 pci_xhci_handle_transfer(sc, dev, devep, ep_ctx, trb, slot, epid, 1995 ringaddr, ccs, streamid); 1996 } 1997 1998 static void 1999 pci_xhci_dbregs_write(struct pci_xhci_softc *sc, uint64_t offset, 2000 uint64_t value) 2001 { 2002 2003 offset = (offset - sc->dboff) / sizeof(uint32_t); 2004 2005 DPRINTF(("pci_xhci: doorbell write offset 0x%lx: 0x%lx", 2006 offset, value)); 2007 2008 if (XHCI_HALTED(sc)) { 2009 DPRINTF(("pci_xhci: controller halted")); 2010 return; 2011 } 2012 2013 if (offset == 0) 2014 pci_xhci_complete_commands(sc); 2015 else if (sc->portregs != NULL) 2016 pci_xhci_device_doorbell(sc, offset, 2017 XHCI_DB_TARGET_GET(value), XHCI_DB_SID_GET(value)); 2018 } 2019 2020 static void 2021 pci_xhci_rtsregs_write(struct pci_xhci_softc *sc, uint64_t offset, 2022 uint64_t value) 2023 { 2024 struct pci_xhci_rtsregs *rts; 2025 2026 offset -= sc->rtsoff; 2027 2028 if (offset == 0) { 2029 DPRINTF(("pci_xhci attempted write to MFINDEX")); 2030 return; 2031 } 2032 2033 DPRINTF(("pci_xhci: runtime regs write offset 0x%lx: 0x%lx", 2034 offset, value)); 2035 2036 offset -= 0x20; /* start of intrreg */ 2037 2038 rts = &sc->rtsregs; 2039 2040 switch (offset) { 2041 case 0x00: 2042 if (value & XHCI_IMAN_INTR_PEND) 2043 rts->intrreg.iman &= ~XHCI_IMAN_INTR_PEND; 2044 rts->intrreg.iman = (value & XHCI_IMAN_INTR_ENA) | 2045 (rts->intrreg.iman & XHCI_IMAN_INTR_PEND); 2046 2047 if (!(value & XHCI_IMAN_INTR_ENA)) 2048 pci_xhci_deassert_interrupt(sc); 2049 2050 break; 2051 2052 case 0x04: 2053 rts->intrreg.imod = value; 2054 break; 2055 2056 case 0x08: 2057 rts->intrreg.erstsz = value & 0xFFFF; 2058 break; 2059 2060 case 0x10: 2061 /* ERSTBA low bits */ 2062 rts->intrreg.erstba = MASK_64_HI(sc->rtsregs.intrreg.erstba) | 2063 (value & ~0x3F); 2064 break; 2065 2066 case 0x14: 2067 /* ERSTBA high bits */ 2068 rts->intrreg.erstba = (value << 32) | 2069 MASK_64_LO(sc->rtsregs.intrreg.erstba); 2070 2071 rts->erstba_p = XHCI_GADDR(sc, 2072 sc->rtsregs.intrreg.erstba & ~0x3FUL); 2073 2074 rts->erst_p = XHCI_GADDR(sc, 2075 sc->rtsregs.erstba_p->qwEvrsTablePtr & ~0x3FUL); 2076 2077 rts->er_enq_idx = 0; 2078 rts->er_events_cnt = 0; 2079 2080 DPRINTF(("pci_xhci: wr erstba erst (%p) ptr 0x%lx, sz %u", 2081 rts->erstba_p, 2082 rts->erstba_p->qwEvrsTablePtr, 2083 rts->erstba_p->dwEvrsTableSize)); 2084 break; 2085 2086 case 0x18: 2087 /* ERDP low bits */ 2088 rts->intrreg.erdp = 2089 MASK_64_HI(sc->rtsregs.intrreg.erdp) | 2090 (rts->intrreg.erdp & XHCI_ERDP_LO_BUSY) | 2091 (value & ~0xF); 2092 if (value & XHCI_ERDP_LO_BUSY) { 2093 rts->intrreg.erdp &= ~XHCI_ERDP_LO_BUSY; 2094 rts->intrreg.iman &= ~XHCI_IMAN_INTR_PEND; 2095 } 2096 2097 rts->er_deq_seg = XHCI_ERDP_LO_SINDEX(value); 2098 2099 break; 2100 2101 case 0x1C: 2102 /* ERDP high bits */ 2103 rts->intrreg.erdp = (value << 32) | 2104 MASK_64_LO(sc->rtsregs.intrreg.erdp); 2105 2106 if (rts->er_events_cnt > 0) { 2107 uint64_t erdp; 2108 int erdp_i; 2109 2110 erdp = rts->intrreg.erdp & ~0xF; 2111 erdp_i = (erdp - rts->erstba_p->qwEvrsTablePtr) / 2112 sizeof(struct xhci_trb); 2113 2114 if (erdp_i <= rts->er_enq_idx) 2115 rts->er_events_cnt = rts->er_enq_idx - erdp_i; 2116 else 2117 rts->er_events_cnt = 2118 rts->erstba_p->dwEvrsTableSize - 2119 (erdp_i - rts->er_enq_idx); 2120 2121 DPRINTF(("pci_xhci: erdp 0x%lx, events cnt %u", 2122 erdp, rts->er_events_cnt)); 2123 } 2124 2125 break; 2126 2127 default: 2128 DPRINTF(("pci_xhci attempted write to RTS offset 0x%lx", 2129 offset)); 2130 break; 2131 } 2132 } 2133 2134 static uint64_t 2135 pci_xhci_portregs_read(struct pci_xhci_softc *sc, uint64_t offset) 2136 { 2137 struct pci_xhci_portregs *portregs; 2138 int port; 2139 uint32_t reg; 2140 2141 if (sc->portregs == NULL) 2142 return (0); 2143 2144 port = (offset - XHCI_PORTREGS_PORT0) / XHCI_PORTREGS_SETSZ; 2145 offset = (offset - XHCI_PORTREGS_PORT0) % XHCI_PORTREGS_SETSZ; 2146 2147 if (port > XHCI_MAX_DEVS) { 2148 DPRINTF(("pci_xhci: portregs_read port %d >= XHCI_MAX_DEVS", 2149 port)); 2150 2151 /* return default value for unused port */ 2152 return (XHCI_PS_SPEED_SET(3)); 2153 } 2154 2155 portregs = XHCI_PORTREG_PTR(sc, port); 2156 switch (offset) { 2157 case 0: 2158 reg = portregs->portsc; 2159 break; 2160 case 4: 2161 reg = portregs->portpmsc; 2162 break; 2163 case 8: 2164 reg = portregs->portli; 2165 break; 2166 case 12: 2167 reg = portregs->porthlpmc; 2168 break; 2169 default: 2170 DPRINTF(("pci_xhci: unaligned portregs read offset %#lx", 2171 offset)); 2172 reg = 0xffffffff; 2173 break; 2174 } 2175 2176 DPRINTF(("pci_xhci: portregs read offset 0x%lx port %u -> 0x%x", 2177 offset, port, reg)); 2178 2179 return (reg); 2180 } 2181 2182 static void 2183 pci_xhci_hostop_write(struct pci_xhci_softc *sc, uint64_t offset, 2184 uint64_t value) 2185 { 2186 offset -= XHCI_CAPLEN; 2187 2188 if (offset < 0x400) 2189 DPRINTF(("pci_xhci: hostop write offset 0x%lx: 0x%lx", 2190 offset, value)); 2191 2192 switch (offset) { 2193 case XHCI_USBCMD: 2194 sc->opregs.usbcmd = pci_xhci_usbcmd_write(sc, value & 0x3F0F); 2195 break; 2196 2197 case XHCI_USBSTS: 2198 /* clear bits on write */ 2199 sc->opregs.usbsts &= ~(value & 2200 (XHCI_STS_HSE|XHCI_STS_EINT|XHCI_STS_PCD|XHCI_STS_SSS| 2201 XHCI_STS_RSS|XHCI_STS_SRE|XHCI_STS_CNR)); 2202 break; 2203 2204 case XHCI_PAGESIZE: 2205 /* read only */ 2206 break; 2207 2208 case XHCI_DNCTRL: 2209 sc->opregs.dnctrl = value & 0xFFFF; 2210 break; 2211 2212 case XHCI_CRCR_LO: 2213 if (sc->opregs.crcr & XHCI_CRCR_LO_CRR) { 2214 sc->opregs.crcr &= ~(XHCI_CRCR_LO_CS|XHCI_CRCR_LO_CA); 2215 sc->opregs.crcr |= value & 2216 (XHCI_CRCR_LO_CS|XHCI_CRCR_LO_CA); 2217 } else { 2218 sc->opregs.crcr = MASK_64_HI(sc->opregs.crcr) | 2219 (value & (0xFFFFFFC0 | XHCI_CRCR_LO_RCS)); 2220 } 2221 break; 2222 2223 case XHCI_CRCR_HI: 2224 if (!(sc->opregs.crcr & XHCI_CRCR_LO_CRR)) { 2225 sc->opregs.crcr = MASK_64_LO(sc->opregs.crcr) | 2226 (value << 32); 2227 2228 sc->opregs.cr_p = XHCI_GADDR(sc, 2229 sc->opregs.crcr & ~0xF); 2230 } 2231 2232 if (sc->opregs.crcr & XHCI_CRCR_LO_CS) { 2233 /* Stop operation of Command Ring */ 2234 } 2235 2236 if (sc->opregs.crcr & XHCI_CRCR_LO_CA) { 2237 /* Abort command */ 2238 } 2239 2240 break; 2241 2242 case XHCI_DCBAAP_LO: 2243 sc->opregs.dcbaap = MASK_64_HI(sc->opregs.dcbaap) | 2244 (value & 0xFFFFFFC0); 2245 break; 2246 2247 case XHCI_DCBAAP_HI: 2248 sc->opregs.dcbaap = MASK_64_LO(sc->opregs.dcbaap) | 2249 (value << 32); 2250 sc->opregs.dcbaa_p = XHCI_GADDR(sc, sc->opregs.dcbaap & ~0x3FUL); 2251 2252 DPRINTF(("pci_xhci: opregs dcbaap = 0x%lx (vaddr 0x%lx)", 2253 sc->opregs.dcbaap, (uint64_t)sc->opregs.dcbaa_p)); 2254 break; 2255 2256 case XHCI_CONFIG: 2257 sc->opregs.config = value & 0x03FF; 2258 break; 2259 2260 default: 2261 if (offset >= 0x400) 2262 pci_xhci_portregs_write(sc, offset, value); 2263 2264 break; 2265 } 2266 } 2267 2268 2269 static void 2270 pci_xhci_write(struct pci_devinst *pi, int baridx, uint64_t offset, 2271 int size __unused, uint64_t value) 2272 { 2273 struct pci_xhci_softc *sc; 2274 2275 sc = pi->pi_arg; 2276 2277 assert(baridx == 0); 2278 2279 pthread_mutex_lock(&sc->mtx); 2280 if (offset < XHCI_CAPLEN) /* read only registers */ 2281 WPRINTF(("pci_xhci: write RO-CAPs offset %ld", offset)); 2282 else if (offset < sc->dboff) 2283 pci_xhci_hostop_write(sc, offset, value); 2284 else if (offset < sc->rtsoff) 2285 pci_xhci_dbregs_write(sc, offset, value); 2286 else if (offset < sc->regsend) 2287 pci_xhci_rtsregs_write(sc, offset, value); 2288 else 2289 WPRINTF(("pci_xhci: write invalid offset %ld", offset)); 2290 2291 pthread_mutex_unlock(&sc->mtx); 2292 } 2293 2294 static uint64_t 2295 pci_xhci_hostcap_read(struct pci_xhci_softc *sc, uint64_t offset) 2296 { 2297 uint64_t value; 2298 2299 switch (offset) { 2300 case XHCI_CAPLENGTH: /* 0x00 */ 2301 value = sc->caplength; 2302 break; 2303 2304 case XHCI_HCSPARAMS1: /* 0x04 */ 2305 value = sc->hcsparams1; 2306 break; 2307 2308 case XHCI_HCSPARAMS2: /* 0x08 */ 2309 value = sc->hcsparams2; 2310 break; 2311 2312 case XHCI_HCSPARAMS3: /* 0x0C */ 2313 value = sc->hcsparams3; 2314 break; 2315 2316 case XHCI_HCSPARAMS0: /* 0x10 */ 2317 value = sc->hccparams1; 2318 break; 2319 2320 case XHCI_DBOFF: /* 0x14 */ 2321 value = sc->dboff; 2322 break; 2323 2324 case XHCI_RTSOFF: /* 0x18 */ 2325 value = sc->rtsoff; 2326 break; 2327 2328 case XHCI_HCCPRAMS2: /* 0x1C */ 2329 value = sc->hccparams2; 2330 break; 2331 2332 default: 2333 value = 0; 2334 break; 2335 } 2336 2337 DPRINTF(("pci_xhci: hostcap read offset 0x%lx -> 0x%lx", 2338 offset, value)); 2339 2340 return (value); 2341 } 2342 2343 static uint64_t 2344 pci_xhci_hostop_read(struct pci_xhci_softc *sc, uint64_t offset) 2345 { 2346 uint64_t value; 2347 2348 offset = (offset - XHCI_CAPLEN); 2349 2350 switch (offset) { 2351 case XHCI_USBCMD: /* 0x00 */ 2352 value = sc->opregs.usbcmd; 2353 break; 2354 2355 case XHCI_USBSTS: /* 0x04 */ 2356 value = sc->opregs.usbsts; 2357 break; 2358 2359 case XHCI_PAGESIZE: /* 0x08 */ 2360 value = sc->opregs.pgsz; 2361 break; 2362 2363 case XHCI_DNCTRL: /* 0x14 */ 2364 value = sc->opregs.dnctrl; 2365 break; 2366 2367 case XHCI_CRCR_LO: /* 0x18 */ 2368 value = sc->opregs.crcr & XHCI_CRCR_LO_CRR; 2369 break; 2370 2371 case XHCI_CRCR_HI: /* 0x1C */ 2372 value = 0; 2373 break; 2374 2375 case XHCI_DCBAAP_LO: /* 0x30 */ 2376 value = sc->opregs.dcbaap & 0xFFFFFFFF; 2377 break; 2378 2379 case XHCI_DCBAAP_HI: /* 0x34 */ 2380 value = (sc->opregs.dcbaap >> 32) & 0xFFFFFFFF; 2381 break; 2382 2383 case XHCI_CONFIG: /* 0x38 */ 2384 value = sc->opregs.config; 2385 break; 2386 2387 default: 2388 if (offset >= 0x400) 2389 value = pci_xhci_portregs_read(sc, offset); 2390 else 2391 value = 0; 2392 2393 break; 2394 } 2395 2396 if (offset < 0x400) 2397 DPRINTF(("pci_xhci: hostop read offset 0x%lx -> 0x%lx", 2398 offset, value)); 2399 2400 return (value); 2401 } 2402 2403 static uint64_t 2404 pci_xhci_dbregs_read(struct pci_xhci_softc *sc __unused, 2405 uint64_t offset __unused) 2406 { 2407 /* read doorbell always returns 0 */ 2408 return (0); 2409 } 2410 2411 static uint64_t 2412 pci_xhci_rtsregs_read(struct pci_xhci_softc *sc, uint64_t offset) 2413 { 2414 uint32_t value; 2415 2416 offset -= sc->rtsoff; 2417 value = 0; 2418 2419 if (offset == XHCI_MFINDEX) { 2420 value = sc->rtsregs.mfindex; 2421 } else if (offset >= 0x20) { 2422 int item; 2423 uint32_t *p; 2424 2425 offset -= 0x20; 2426 item = offset % 32; 2427 2428 assert(offset < sizeof(sc->rtsregs.intrreg)); 2429 2430 p = &sc->rtsregs.intrreg.iman; 2431 p += item / sizeof(uint32_t); 2432 value = *p; 2433 } 2434 2435 DPRINTF(("pci_xhci: rtsregs read offset 0x%lx -> 0x%x", 2436 offset, value)); 2437 2438 return (value); 2439 } 2440 2441 static uint64_t 2442 pci_xhci_xecp_read(struct pci_xhci_softc *sc, uint64_t offset) 2443 { 2444 uint32_t value; 2445 2446 offset -= sc->regsend; 2447 value = 0; 2448 2449 switch (offset) { 2450 case 0: 2451 /* rev major | rev minor | next-cap | cap-id */ 2452 value = (0x02 << 24) | (4 << 8) | XHCI_ID_PROTOCOLS; 2453 break; 2454 case 4: 2455 /* name string = "USB" */ 2456 value = 0x20425355; 2457 break; 2458 case 8: 2459 /* psic | proto-defined | compat # | compat offset */ 2460 value = ((XHCI_MAX_DEVS/2) << 8) | sc->usb2_port_start; 2461 break; 2462 case 12: 2463 break; 2464 case 16: 2465 /* rev major | rev minor | next-cap | cap-id */ 2466 value = (0x03 << 24) | XHCI_ID_PROTOCOLS; 2467 break; 2468 case 20: 2469 /* name string = "USB" */ 2470 value = 0x20425355; 2471 break; 2472 case 24: 2473 /* psic | proto-defined | compat # | compat offset */ 2474 value = ((XHCI_MAX_DEVS/2) << 8) | sc->usb3_port_start; 2475 break; 2476 case 28: 2477 break; 2478 default: 2479 DPRINTF(("pci_xhci: xecp invalid offset 0x%lx", offset)); 2480 break; 2481 } 2482 2483 DPRINTF(("pci_xhci: xecp read offset 0x%lx -> 0x%x", 2484 offset, value)); 2485 2486 return (value); 2487 } 2488 2489 2490 static uint64_t 2491 pci_xhci_read(struct pci_devinst *pi, int baridx, uint64_t offset, int size) 2492 { 2493 struct pci_xhci_softc *sc; 2494 uint32_t value; 2495 2496 sc = pi->pi_arg; 2497 2498 assert(baridx == 0); 2499 2500 pthread_mutex_lock(&sc->mtx); 2501 if (offset < XHCI_CAPLEN) 2502 value = pci_xhci_hostcap_read(sc, offset); 2503 else if (offset < sc->dboff) 2504 value = pci_xhci_hostop_read(sc, offset); 2505 else if (offset < sc->rtsoff) 2506 value = pci_xhci_dbregs_read(sc, offset); 2507 else if (offset < sc->regsend) 2508 value = pci_xhci_rtsregs_read(sc, offset); 2509 else if (offset < (sc->regsend + 4*32)) 2510 value = pci_xhci_xecp_read(sc, offset); 2511 else { 2512 value = 0; 2513 WPRINTF(("pci_xhci: read invalid offset %ld", offset)); 2514 } 2515 2516 pthread_mutex_unlock(&sc->mtx); 2517 2518 switch (size) { 2519 case 1: 2520 value &= 0xFF; 2521 break; 2522 case 2: 2523 value &= 0xFFFF; 2524 break; 2525 case 4: 2526 value &= 0xFFFFFFFF; 2527 break; 2528 } 2529 2530 return (value); 2531 } 2532 2533 static void 2534 pci_xhci_reset_port(struct pci_xhci_softc *sc, int portn, int warm) 2535 { 2536 struct pci_xhci_portregs *port; 2537 struct pci_xhci_dev_emu *dev; 2538 struct xhci_trb evtrb; 2539 int error; 2540 2541 assert(portn <= XHCI_MAX_DEVS); 2542 2543 DPRINTF(("xhci reset port %d", portn)); 2544 2545 port = XHCI_PORTREG_PTR(sc, portn); 2546 dev = XHCI_DEVINST_PTR(sc, portn); 2547 if (dev) { 2548 port->portsc &= ~(XHCI_PS_PLS_MASK | XHCI_PS_PR | XHCI_PS_PRC); 2549 port->portsc |= XHCI_PS_PED | 2550 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed); 2551 2552 if (warm && dev->dev_ue->ue_usbver == 3) { 2553 port->portsc |= XHCI_PS_WRC; 2554 } 2555 2556 if ((port->portsc & XHCI_PS_PRC) == 0) { 2557 port->portsc |= XHCI_PS_PRC; 2558 2559 pci_xhci_set_evtrb(&evtrb, portn, 2560 XHCI_TRB_ERROR_SUCCESS, 2561 XHCI_TRB_EVENT_PORT_STS_CHANGE); 2562 error = pci_xhci_insert_event(sc, &evtrb, 1); 2563 if (error != XHCI_TRB_ERROR_SUCCESS) 2564 DPRINTF(("xhci reset port insert event " 2565 "failed")); 2566 } 2567 } 2568 } 2569 2570 static void 2571 pci_xhci_init_port(struct pci_xhci_softc *sc, int portn) 2572 { 2573 struct pci_xhci_portregs *port; 2574 struct pci_xhci_dev_emu *dev; 2575 2576 port = XHCI_PORTREG_PTR(sc, portn); 2577 dev = XHCI_DEVINST_PTR(sc, portn); 2578 if (dev) { 2579 port->portsc = XHCI_PS_CCS | /* connected */ 2580 XHCI_PS_PP; /* port power */ 2581 2582 if (dev->dev_ue->ue_usbver == 2) { 2583 port->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_POLL) | 2584 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed); 2585 } else { 2586 port->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_U0) | 2587 XHCI_PS_PED | /* enabled */ 2588 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed); 2589 } 2590 2591 DPRINTF(("Init port %d 0x%x", portn, port->portsc)); 2592 } else { 2593 port->portsc = XHCI_PS_PLS_SET(UPS_PORT_LS_RX_DET) | XHCI_PS_PP; 2594 DPRINTF(("Init empty port %d 0x%x", portn, port->portsc)); 2595 } 2596 } 2597 2598 static int 2599 pci_xhci_dev_intr(struct usb_hci *hci, int epctx) 2600 { 2601 struct pci_xhci_dev_emu *dev; 2602 struct xhci_dev_ctx *dev_ctx; 2603 struct xhci_trb evtrb; 2604 struct pci_xhci_softc *sc; 2605 struct pci_xhci_portregs *p; 2606 struct xhci_endp_ctx *ep_ctx; 2607 int error = 0; 2608 int dir_in; 2609 int epid; 2610 2611 dir_in = epctx & 0x80; 2612 epid = epctx & ~0x80; 2613 2614 /* HW endpoint contexts are 0-15; convert to epid based on dir */ 2615 epid = (epid * 2) + (dir_in ? 1 : 0); 2616 2617 assert(epid >= 1 && epid <= 31); 2618 2619 dev = hci->hci_sc; 2620 sc = dev->xsc; 2621 2622 /* check if device is ready; OS has to initialise it */ 2623 if (sc->rtsregs.erstba_p == NULL || 2624 (sc->opregs.usbcmd & XHCI_CMD_RS) == 0 || 2625 dev->dev_ctx == NULL) 2626 return (0); 2627 2628 p = XHCI_PORTREG_PTR(sc, hci->hci_port); 2629 2630 /* raise event if link U3 (suspended) state */ 2631 if (XHCI_PS_PLS_GET(p->portsc) == 3) { 2632 p->portsc &= ~XHCI_PS_PLS_MASK; 2633 p->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_RESUME); 2634 if ((p->portsc & XHCI_PS_PLC) != 0) 2635 return (0); 2636 2637 p->portsc |= XHCI_PS_PLC; 2638 2639 pci_xhci_set_evtrb(&evtrb, hci->hci_port, 2640 XHCI_TRB_ERROR_SUCCESS, XHCI_TRB_EVENT_PORT_STS_CHANGE); 2641 error = pci_xhci_insert_event(sc, &evtrb, 0); 2642 if (error != XHCI_TRB_ERROR_SUCCESS) 2643 goto done; 2644 } 2645 2646 dev_ctx = dev->dev_ctx; 2647 ep_ctx = &dev_ctx->ctx_ep[epid]; 2648 if ((ep_ctx->dwEpCtx0 & 0x7) == XHCI_ST_EPCTX_DISABLED) { 2649 DPRINTF(("xhci device interrupt on disabled endpoint %d", 2650 epid)); 2651 return (0); 2652 } 2653 2654 DPRINTF(("xhci device interrupt on endpoint %d", epid)); 2655 2656 pci_xhci_device_doorbell(sc, hci->hci_port, epid, 0); 2657 2658 done: 2659 return (error); 2660 } 2661 2662 static int 2663 pci_xhci_dev_event(struct usb_hci *hci, enum hci_usbev evid __unused, 2664 void *param __unused) 2665 { 2666 DPRINTF(("xhci device event port %d", hci->hci_port)); 2667 return (0); 2668 } 2669 2670 /* 2671 * Each controller contains a "slot" node which contains a list of 2672 * child nodes each of which is a device. Each slot node's name 2673 * corresponds to a specific controller slot. These nodes 2674 * contain a "device" variable identifying the device model of the 2675 * USB device. For example: 2676 * 2677 * pci.0.1.0 2678 * .device="xhci" 2679 * .slot 2680 * .1 2681 * .device="tablet" 2682 */ 2683 static int 2684 pci_xhci_legacy_config(nvlist_t *nvl, const char *opts) 2685 { 2686 char node_name[16]; 2687 nvlist_t *slots_nvl, *slot_nvl; 2688 char *cp, *opt, *str, *tofree; 2689 int slot; 2690 2691 if (opts == NULL) 2692 return (0); 2693 2694 slots_nvl = create_relative_config_node(nvl, "slot"); 2695 slot = 1; 2696 tofree = str = strdup(opts); 2697 while ((opt = strsep(&str, ",")) != NULL) { 2698 /* device[=<config>] */ 2699 cp = strchr(opt, '='); 2700 if (cp != NULL) { 2701 *cp = '\0'; 2702 cp++; 2703 } 2704 2705 snprintf(node_name, sizeof(node_name), "%d", slot); 2706 slot++; 2707 slot_nvl = create_relative_config_node(slots_nvl, node_name); 2708 set_config_value_node(slot_nvl, "device", opt); 2709 2710 /* 2711 * NB: Given that we split on commas above, the legacy 2712 * format only supports a single option. 2713 */ 2714 if (cp != NULL && *cp != '\0') 2715 pci_parse_legacy_config(slot_nvl, cp); 2716 } 2717 free(tofree); 2718 return (0); 2719 } 2720 2721 static int 2722 pci_xhci_parse_devices(struct pci_xhci_softc *sc, nvlist_t *nvl) 2723 { 2724 struct pci_xhci_dev_emu *dev; 2725 struct usb_devemu *ue; 2726 const nvlist_t *slots_nvl, *slot_nvl; 2727 const char *name, *device; 2728 char *cp; 2729 void *devsc, *cookie; 2730 long slot; 2731 int type, usb3_port, usb2_port, i, ndevices; 2732 2733 usb3_port = sc->usb3_port_start; 2734 usb2_port = sc->usb2_port_start; 2735 2736 sc->devices = calloc(XHCI_MAX_DEVS, sizeof(struct pci_xhci_dev_emu *)); 2737 sc->slots = calloc(XHCI_MAX_SLOTS, sizeof(struct pci_xhci_dev_emu *)); 2738 2739 ndevices = 0; 2740 2741 slots_nvl = find_relative_config_node(nvl, "slot"); 2742 if (slots_nvl == NULL) 2743 goto portsfinal; 2744 2745 cookie = NULL; 2746 while ((name = nvlist_next(slots_nvl, &type, &cookie)) != NULL) { 2747 if (usb2_port == ((sc->usb2_port_start) + XHCI_MAX_DEVS/2) || 2748 usb3_port == ((sc->usb3_port_start) + XHCI_MAX_DEVS/2)) { 2749 WPRINTF(("pci_xhci max number of USB 2 or 3 " 2750 "devices reached, max %d", XHCI_MAX_DEVS/2)); 2751 goto bad; 2752 } 2753 2754 if (type != NV_TYPE_NVLIST) { 2755 EPRINTLN( 2756 "pci_xhci: config variable '%s' under slot node", 2757 name); 2758 goto bad; 2759 } 2760 2761 slot = strtol(name, &cp, 0); 2762 if (*cp != '\0' || slot <= 0 || slot > XHCI_MAX_SLOTS) { 2763 EPRINTLN("pci_xhci: invalid slot '%s'", name); 2764 goto bad; 2765 } 2766 2767 if (XHCI_SLOTDEV_PTR(sc, slot) != NULL) { 2768 EPRINTLN("pci_xhci: duplicate slot '%s'", name); 2769 goto bad; 2770 } 2771 2772 slot_nvl = nvlist_get_nvlist(slots_nvl, name); 2773 device = get_config_value_node(slot_nvl, "device"); 2774 if (device == NULL) { 2775 EPRINTLN( 2776 "pci_xhci: missing \"device\" value for slot '%s'", 2777 name); 2778 goto bad; 2779 } 2780 2781 ue = usb_emu_finddev(device); 2782 if (ue == NULL) { 2783 EPRINTLN("pci_xhci: unknown device model \"%s\"", 2784 device); 2785 goto bad; 2786 } 2787 2788 DPRINTF(("pci_xhci adding device %s", device)); 2789 2790 dev = calloc(1, sizeof(struct pci_xhci_dev_emu)); 2791 dev->xsc = sc; 2792 dev->hci.hci_sc = dev; 2793 dev->hci.hci_intr = pci_xhci_dev_intr; 2794 dev->hci.hci_event = pci_xhci_dev_event; 2795 2796 if (ue->ue_usbver == 2) { 2797 if (usb2_port == sc->usb2_port_start + 2798 XHCI_MAX_DEVS / 2) { 2799 WPRINTF(("pci_xhci max number of USB 2 devices " 2800 "reached, max %d", XHCI_MAX_DEVS / 2)); 2801 goto bad; 2802 } 2803 dev->hci.hci_port = usb2_port; 2804 usb2_port++; 2805 } else { 2806 if (usb3_port == sc->usb3_port_start + 2807 XHCI_MAX_DEVS / 2) { 2808 WPRINTF(("pci_xhci max number of USB 3 devices " 2809 "reached, max %d", XHCI_MAX_DEVS / 2)); 2810 goto bad; 2811 } 2812 dev->hci.hci_port = usb3_port; 2813 usb3_port++; 2814 } 2815 XHCI_DEVINST_PTR(sc, dev->hci.hci_port) = dev; 2816 2817 dev->hci.hci_address = 0; 2818 devsc = ue->ue_init(&dev->hci, nvl); 2819 if (devsc == NULL) { 2820 goto bad; 2821 } 2822 2823 dev->dev_ue = ue; 2824 dev->dev_sc = devsc; 2825 2826 XHCI_SLOTDEV_PTR(sc, slot) = dev; 2827 ndevices++; 2828 } 2829 2830 portsfinal: 2831 sc->portregs = calloc(XHCI_MAX_DEVS, sizeof(struct pci_xhci_portregs)); 2832 2833 if (ndevices > 0) { 2834 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 2835 pci_xhci_init_port(sc, i); 2836 } 2837 } else { 2838 WPRINTF(("pci_xhci no USB devices configured")); 2839 } 2840 return (0); 2841 2842 bad: 2843 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 2844 free(XHCI_DEVINST_PTR(sc, i)); 2845 } 2846 2847 free(sc->devices); 2848 free(sc->slots); 2849 2850 return (-1); 2851 } 2852 2853 static int 2854 pci_xhci_init(struct pci_devinst *pi, nvlist_t *nvl) 2855 { 2856 struct pci_xhci_softc *sc; 2857 int error; 2858 2859 if (xhci_in_use) { 2860 WPRINTF(("pci_xhci controller already defined")); 2861 return (-1); 2862 } 2863 xhci_in_use = 1; 2864 2865 sc = calloc(1, sizeof(struct pci_xhci_softc)); 2866 pi->pi_arg = sc; 2867 sc->xsc_pi = pi; 2868 2869 sc->usb2_port_start = (XHCI_MAX_DEVS/2) + 1; 2870 sc->usb3_port_start = 1; 2871 2872 /* discover devices */ 2873 error = pci_xhci_parse_devices(sc, nvl); 2874 if (error < 0) 2875 goto done; 2876 else 2877 error = 0; 2878 2879 sc->caplength = XHCI_SET_CAPLEN(XHCI_CAPLEN) | 2880 XHCI_SET_HCIVERSION(0x0100); 2881 sc->hcsparams1 = XHCI_SET_HCSP1_MAXPORTS(XHCI_MAX_DEVS) | 2882 XHCI_SET_HCSP1_MAXINTR(1) | /* interrupters */ 2883 XHCI_SET_HCSP1_MAXSLOTS(XHCI_MAX_SLOTS); 2884 sc->hcsparams2 = XHCI_SET_HCSP2_ERSTMAX(XHCI_ERST_MAX) | 2885 XHCI_SET_HCSP2_IST(0x04); 2886 sc->hcsparams3 = 0; /* no latency */ 2887 sc->hccparams1 = XHCI_SET_HCCP1_AC64(1) | /* 64-bit addrs */ 2888 XHCI_SET_HCCP1_NSS(1) | /* no 2nd-streams */ 2889 XHCI_SET_HCCP1_SPC(1) | /* short packet */ 2890 XHCI_SET_HCCP1_MAXPSA(XHCI_STREAMS_MAX); 2891 sc->hccparams2 = XHCI_SET_HCCP2_LEC(1) | 2892 XHCI_SET_HCCP2_U3C(1); 2893 sc->dboff = XHCI_SET_DOORBELL(XHCI_CAPLEN + XHCI_PORTREGS_START + 2894 XHCI_MAX_DEVS * sizeof(struct pci_xhci_portregs)); 2895 2896 /* dboff must be 32-bit aligned */ 2897 if (sc->dboff & 0x3) 2898 sc->dboff = (sc->dboff + 0x3) & ~0x3; 2899 2900 /* rtsoff must be 32-bytes aligned */ 2901 sc->rtsoff = XHCI_SET_RTSOFFSET(sc->dboff + (XHCI_MAX_SLOTS+1) * 32); 2902 if (sc->rtsoff & 0x1F) 2903 sc->rtsoff = (sc->rtsoff + 0x1F) & ~0x1F; 2904 2905 DPRINTF(("pci_xhci dboff: 0x%x, rtsoff: 0x%x", sc->dboff, 2906 sc->rtsoff)); 2907 2908 sc->opregs.usbsts = XHCI_STS_HCH; 2909 sc->opregs.pgsz = XHCI_PAGESIZE_4K; 2910 2911 pci_xhci_reset(sc); 2912 2913 sc->regsend = sc->rtsoff + 0x20 + 32; /* only 1 intrpter */ 2914 2915 /* 2916 * Set extended capabilities pointer to be after regsend; 2917 * value of xecp field is 32-bit offset. 2918 */ 2919 sc->hccparams1 |= XHCI_SET_HCCP1_XECP(sc->regsend/4); 2920 2921 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x1E31); 2922 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x8086); 2923 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_SERIALBUS); 2924 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_SERIALBUS_USB); 2925 pci_set_cfgdata8(pi, PCIR_PROGIF,PCIP_SERIALBUS_USB_XHCI); 2926 pci_set_cfgdata8(pi, PCI_USBREV, PCI_USB_REV_3_0); 2927 2928 pci_emul_add_msicap(pi, 1); 2929 2930 /* regsend + xecp registers */ 2931 pci_emul_alloc_bar(pi, 0, PCIBAR_MEM32, sc->regsend + 4*32); 2932 DPRINTF(("pci_xhci pci_emu_alloc: %d", sc->regsend + 4*32)); 2933 2934 2935 pci_lintr_request(pi); 2936 2937 pthread_mutex_init(&sc->mtx, NULL); 2938 2939 done: 2940 if (error) { 2941 free(sc); 2942 } 2943 2944 return (error); 2945 } 2946 2947 #ifdef BHYVE_SNAPSHOT 2948 static void 2949 pci_xhci_map_devs_slots(struct pci_xhci_softc *sc, int maps[]) 2950 { 2951 int i, j; 2952 struct pci_xhci_dev_emu *dev, *slot; 2953 2954 memset(maps, 0, sizeof(maps[0]) * XHCI_MAX_SLOTS); 2955 2956 for (i = 1; i <= XHCI_MAX_SLOTS; i++) { 2957 for (j = 1; j <= XHCI_MAX_DEVS; j++) { 2958 slot = XHCI_SLOTDEV_PTR(sc, i); 2959 dev = XHCI_DEVINST_PTR(sc, j); 2960 2961 if (slot == dev) 2962 maps[i] = j; 2963 } 2964 } 2965 } 2966 2967 static int 2968 pci_xhci_snapshot_ep(struct pci_xhci_softc *sc __unused, 2969 struct pci_xhci_dev_emu *dev, int idx, struct vm_snapshot_meta *meta) 2970 { 2971 int k; 2972 int ret; 2973 struct usb_data_xfer *xfer; 2974 struct usb_data_xfer_block *xfer_block; 2975 2976 /* some sanity checks */ 2977 if (meta->op == VM_SNAPSHOT_SAVE) 2978 xfer = dev->eps[idx].ep_xfer; 2979 2980 SNAPSHOT_VAR_OR_LEAVE(xfer, meta, ret, done); 2981 if (xfer == NULL) { 2982 ret = 0; 2983 goto done; 2984 } 2985 2986 if (meta->op == VM_SNAPSHOT_RESTORE) { 2987 pci_xhci_init_ep(dev, idx); 2988 xfer = dev->eps[idx].ep_xfer; 2989 } 2990 2991 /* save / restore proper */ 2992 for (k = 0; k < USB_MAX_XFER_BLOCKS; k++) { 2993 xfer_block = &xfer->data[k]; 2994 2995 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(xfer_block->buf, 2996 XHCI_GADDR_SIZE(xfer_block->buf), true, meta, ret, 2997 done); 2998 SNAPSHOT_VAR_OR_LEAVE(xfer_block->blen, meta, ret, done); 2999 SNAPSHOT_VAR_OR_LEAVE(xfer_block->bdone, meta, ret, done); 3000 SNAPSHOT_VAR_OR_LEAVE(xfer_block->processed, meta, ret, done); 3001 SNAPSHOT_VAR_OR_LEAVE(xfer_block->hci_data, meta, ret, done); 3002 SNAPSHOT_VAR_OR_LEAVE(xfer_block->ccs, meta, ret, done); 3003 SNAPSHOT_VAR_OR_LEAVE(xfer_block->streamid, meta, ret, done); 3004 SNAPSHOT_VAR_OR_LEAVE(xfer_block->trbnext, meta, ret, done); 3005 } 3006 3007 SNAPSHOT_VAR_OR_LEAVE(xfer->ureq, meta, ret, done); 3008 if (xfer->ureq) { 3009 /* xfer->ureq is not allocated at restore time */ 3010 if (meta->op == VM_SNAPSHOT_RESTORE) 3011 xfer->ureq = malloc(sizeof(struct usb_device_request)); 3012 3013 SNAPSHOT_BUF_OR_LEAVE(xfer->ureq, 3014 sizeof(struct usb_device_request), 3015 meta, ret, done); 3016 } 3017 3018 SNAPSHOT_VAR_OR_LEAVE(xfer->ndata, meta, ret, done); 3019 SNAPSHOT_VAR_OR_LEAVE(xfer->head, meta, ret, done); 3020 SNAPSHOT_VAR_OR_LEAVE(xfer->tail, meta, ret, done); 3021 3022 done: 3023 return (ret); 3024 } 3025 3026 static int 3027 pci_xhci_snapshot(struct vm_snapshot_meta *meta) 3028 { 3029 int i, j; 3030 int ret; 3031 int restore_idx; 3032 struct pci_devinst *pi; 3033 struct pci_xhci_softc *sc; 3034 struct pci_xhci_portregs *port; 3035 struct pci_xhci_dev_emu *dev; 3036 char dname[SNAP_DEV_NAME_LEN]; 3037 int maps[XHCI_MAX_SLOTS + 1]; 3038 3039 pi = meta->dev_data; 3040 sc = pi->pi_arg; 3041 3042 SNAPSHOT_VAR_OR_LEAVE(sc->caplength, meta, ret, done); 3043 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams1, meta, ret, done); 3044 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams2, meta, ret, done); 3045 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams3, meta, ret, done); 3046 SNAPSHOT_VAR_OR_LEAVE(sc->hccparams1, meta, ret, done); 3047 SNAPSHOT_VAR_OR_LEAVE(sc->dboff, meta, ret, done); 3048 SNAPSHOT_VAR_OR_LEAVE(sc->rtsoff, meta, ret, done); 3049 SNAPSHOT_VAR_OR_LEAVE(sc->hccparams2, meta, ret, done); 3050 SNAPSHOT_VAR_OR_LEAVE(sc->regsend, meta, ret, done); 3051 3052 /* opregs */ 3053 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.usbcmd, meta, ret, done); 3054 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.usbsts, meta, ret, done); 3055 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.pgsz, meta, ret, done); 3056 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.dnctrl, meta, ret, done); 3057 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.crcr, meta, ret, done); 3058 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.dcbaap, meta, ret, done); 3059 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.config, meta, ret, done); 3060 3061 /* opregs.cr_p */ 3062 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->opregs.cr_p, 3063 XHCI_GADDR_SIZE(sc->opregs.cr_p), true, meta, ret, done); 3064 3065 /* opregs.dcbaa_p */ 3066 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->opregs.dcbaa_p, 3067 XHCI_GADDR_SIZE(sc->opregs.dcbaa_p), true, meta, ret, done); 3068 3069 /* rtsregs */ 3070 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.mfindex, meta, ret, done); 3071 3072 /* rtsregs.intrreg */ 3073 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.iman, meta, ret, done); 3074 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.imod, meta, ret, done); 3075 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erstsz, meta, ret, done); 3076 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.rsvd, meta, ret, done); 3077 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erstba, meta, ret, done); 3078 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erdp, meta, ret, done); 3079 3080 /* rtsregs.erstba_p */ 3081 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->rtsregs.erstba_p, 3082 XHCI_GADDR_SIZE(sc->rtsregs.erstba_p), true, meta, ret, done); 3083 3084 /* rtsregs.erst_p */ 3085 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->rtsregs.erst_p, 3086 XHCI_GADDR_SIZE(sc->rtsregs.erst_p), true, meta, ret, done); 3087 3088 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_deq_seg, meta, ret, done); 3089 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_enq_idx, meta, ret, done); 3090 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_enq_seg, meta, ret, done); 3091 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_events_cnt, meta, ret, done); 3092 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.event_pcs, meta, ret, done); 3093 3094 /* sanity checking */ 3095 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 3096 dev = XHCI_DEVINST_PTR(sc, i); 3097 if (dev == NULL) 3098 continue; 3099 3100 if (meta->op == VM_SNAPSHOT_SAVE) 3101 restore_idx = i; 3102 SNAPSHOT_VAR_OR_LEAVE(restore_idx, meta, ret, done); 3103 3104 /* check if the restored device (when restoring) is sane */ 3105 if (restore_idx != i) { 3106 fprintf(stderr, "%s: idx not matching: actual: %d, " 3107 "expected: %d\r\n", __func__, restore_idx, i); 3108 ret = EINVAL; 3109 goto done; 3110 } 3111 3112 if (meta->op == VM_SNAPSHOT_SAVE) { 3113 memset(dname, 0, sizeof(dname)); 3114 strncpy(dname, dev->dev_ue->ue_emu, sizeof(dname) - 1); 3115 } 3116 3117 SNAPSHOT_BUF_OR_LEAVE(dname, sizeof(dname), meta, ret, done); 3118 3119 if (meta->op == VM_SNAPSHOT_RESTORE) { 3120 dname[sizeof(dname) - 1] = '\0'; 3121 if (strcmp(dev->dev_ue->ue_emu, dname)) { 3122 fprintf(stderr, "%s: device names mismatch: " 3123 "actual: %s, expected: %s\r\n", 3124 __func__, dname, dev->dev_ue->ue_emu); 3125 3126 ret = EINVAL; 3127 goto done; 3128 } 3129 } 3130 } 3131 3132 /* portregs */ 3133 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 3134 port = XHCI_PORTREG_PTR(sc, i); 3135 dev = XHCI_DEVINST_PTR(sc, i); 3136 3137 if (dev == NULL) 3138 continue; 3139 3140 SNAPSHOT_VAR_OR_LEAVE(port->portsc, meta, ret, done); 3141 SNAPSHOT_VAR_OR_LEAVE(port->portpmsc, meta, ret, done); 3142 SNAPSHOT_VAR_OR_LEAVE(port->portli, meta, ret, done); 3143 SNAPSHOT_VAR_OR_LEAVE(port->porthlpmc, meta, ret, done); 3144 } 3145 3146 /* slots */ 3147 if (meta->op == VM_SNAPSHOT_SAVE) 3148 pci_xhci_map_devs_slots(sc, maps); 3149 3150 for (i = 1; i <= XHCI_MAX_SLOTS; i++) { 3151 SNAPSHOT_VAR_OR_LEAVE(maps[i], meta, ret, done); 3152 3153 if (meta->op == VM_SNAPSHOT_SAVE) { 3154 dev = XHCI_SLOTDEV_PTR(sc, i); 3155 } else if (meta->op == VM_SNAPSHOT_RESTORE) { 3156 if (maps[i] != 0) 3157 dev = XHCI_DEVINST_PTR(sc, maps[i]); 3158 else 3159 dev = NULL; 3160 3161 XHCI_SLOTDEV_PTR(sc, i) = dev; 3162 } else { 3163 /* error */ 3164 ret = EINVAL; 3165 goto done; 3166 } 3167 3168 if (dev == NULL) 3169 continue; 3170 3171 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(dev->dev_ctx, 3172 XHCI_GADDR_SIZE(dev->dev_ctx), true, meta, ret, done); 3173 3174 if (dev->dev_ctx != NULL) { 3175 for (j = 1; j < XHCI_MAX_ENDPOINTS; j++) { 3176 ret = pci_xhci_snapshot_ep(sc, dev, j, meta); 3177 if (ret != 0) 3178 goto done; 3179 } 3180 } 3181 3182 SNAPSHOT_VAR_OR_LEAVE(dev->dev_slotstate, meta, ret, done); 3183 3184 /* devices[i]->dev_sc */ 3185 dev->dev_ue->ue_snapshot(dev->dev_sc, meta); 3186 3187 /* devices[i]->hci */ 3188 SNAPSHOT_VAR_OR_LEAVE(dev->hci.hci_address, meta, ret, done); 3189 SNAPSHOT_VAR_OR_LEAVE(dev->hci.hci_port, meta, ret, done); 3190 } 3191 3192 SNAPSHOT_VAR_OR_LEAVE(sc->usb2_port_start, meta, ret, done); 3193 SNAPSHOT_VAR_OR_LEAVE(sc->usb3_port_start, meta, ret, done); 3194 3195 done: 3196 return (ret); 3197 } 3198 #endif 3199 3200 static const struct pci_devemu pci_de_xhci = { 3201 .pe_emu = "xhci", 3202 .pe_init = pci_xhci_init, 3203 .pe_legacy_config = pci_xhci_legacy_config, 3204 .pe_barwrite = pci_xhci_write, 3205 .pe_barread = pci_xhci_read, 3206 #ifdef BHYVE_SNAPSHOT 3207 .pe_snapshot = pci_xhci_snapshot, 3208 #endif 3209 }; 3210 PCI_EMUL_SET(pci_de_xhci); 3211