1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2014 Leon Dang <ldang@nahannisys.com> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /* 29 XHCI options: 30 -s <n>,xhci,{devices} 31 32 devices: 33 tablet USB tablet mouse 34 */ 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <sys/param.h> 39 #include <sys/uio.h> 40 #include <sys/types.h> 41 #include <sys/queue.h> 42 43 #include <stdio.h> 44 #include <stdlib.h> 45 #include <stdint.h> 46 #include <string.h> 47 #include <errno.h> 48 #include <pthread.h> 49 #include <unistd.h> 50 51 #include <machine/vmm_snapshot.h> 52 53 #include <dev/usb/usbdi.h> 54 #include <dev/usb/usb.h> 55 #include <dev/usb/usb_freebsd.h> 56 #include <xhcireg.h> 57 58 #include "bhyverun.h" 59 #include "debug.h" 60 #include "pci_emul.h" 61 #include "pci_xhci.h" 62 #include "usb_emul.h" 63 64 65 static int xhci_debug = 0; 66 #define DPRINTF(params) if (xhci_debug) PRINTLN params 67 #define WPRINTF(params) PRINTLN params 68 69 70 #define XHCI_NAME "xhci" 71 #define XHCI_MAX_DEVS 8 /* 4 USB3 + 4 USB2 devs */ 72 73 #define XHCI_MAX_SLOTS 64 /* min allowed by Windows drivers */ 74 75 /* 76 * XHCI data structures can be up to 64k, but limit paddr_guest2host mapping 77 * to 4k to avoid going over the guest physical memory barrier. 78 */ 79 #define XHCI_PADDR_SZ 4096 /* paddr_guest2host max size */ 80 81 #define XHCI_ERST_MAX 0 /* max 2^entries event ring seg tbl */ 82 83 #define XHCI_CAPLEN (4*8) /* offset of op register space */ 84 #define XHCI_HCCPRAMS2 0x1C /* offset of HCCPARAMS2 register */ 85 #define XHCI_PORTREGS_START 0x400 86 #define XHCI_DOORBELL_MAX 256 87 88 #define XHCI_STREAMS_MAX 1 /* 4-15 in XHCI spec */ 89 90 /* caplength and hci-version registers */ 91 #define XHCI_SET_CAPLEN(x) ((x) & 0xFF) 92 #define XHCI_SET_HCIVERSION(x) (((x) & 0xFFFF) << 16) 93 #define XHCI_GET_HCIVERSION(x) (((x) >> 16) & 0xFFFF) 94 95 /* hcsparams1 register */ 96 #define XHCI_SET_HCSP1_MAXSLOTS(x) ((x) & 0xFF) 97 #define XHCI_SET_HCSP1_MAXINTR(x) (((x) & 0x7FF) << 8) 98 #define XHCI_SET_HCSP1_MAXPORTS(x) (((x) & 0xFF) << 24) 99 100 /* hcsparams2 register */ 101 #define XHCI_SET_HCSP2_IST(x) ((x) & 0x0F) 102 #define XHCI_SET_HCSP2_ERSTMAX(x) (((x) & 0x0F) << 4) 103 #define XHCI_SET_HCSP2_MAXSCRATCH_HI(x) (((x) & 0x1F) << 21) 104 #define XHCI_SET_HCSP2_MAXSCRATCH_LO(x) (((x) & 0x1F) << 27) 105 106 /* hcsparams3 register */ 107 #define XHCI_SET_HCSP3_U1EXITLATENCY(x) ((x) & 0xFF) 108 #define XHCI_SET_HCSP3_U2EXITLATENCY(x) (((x) & 0xFFFF) << 16) 109 110 /* hccparams1 register */ 111 #define XHCI_SET_HCCP1_AC64(x) ((x) & 0x01) 112 #define XHCI_SET_HCCP1_BNC(x) (((x) & 0x01) << 1) 113 #define XHCI_SET_HCCP1_CSZ(x) (((x) & 0x01) << 2) 114 #define XHCI_SET_HCCP1_PPC(x) (((x) & 0x01) << 3) 115 #define XHCI_SET_HCCP1_PIND(x) (((x) & 0x01) << 4) 116 #define XHCI_SET_HCCP1_LHRC(x) (((x) & 0x01) << 5) 117 #define XHCI_SET_HCCP1_LTC(x) (((x) & 0x01) << 6) 118 #define XHCI_SET_HCCP1_NSS(x) (((x) & 0x01) << 7) 119 #define XHCI_SET_HCCP1_PAE(x) (((x) & 0x01) << 8) 120 #define XHCI_SET_HCCP1_SPC(x) (((x) & 0x01) << 9) 121 #define XHCI_SET_HCCP1_SEC(x) (((x) & 0x01) << 10) 122 #define XHCI_SET_HCCP1_CFC(x) (((x) & 0x01) << 11) 123 #define XHCI_SET_HCCP1_MAXPSA(x) (((x) & 0x0F) << 12) 124 #define XHCI_SET_HCCP1_XECP(x) (((x) & 0xFFFF) << 16) 125 126 /* hccparams2 register */ 127 #define XHCI_SET_HCCP2_U3C(x) ((x) & 0x01) 128 #define XHCI_SET_HCCP2_CMC(x) (((x) & 0x01) << 1) 129 #define XHCI_SET_HCCP2_FSC(x) (((x) & 0x01) << 2) 130 #define XHCI_SET_HCCP2_CTC(x) (((x) & 0x01) << 3) 131 #define XHCI_SET_HCCP2_LEC(x) (((x) & 0x01) << 4) 132 #define XHCI_SET_HCCP2_CIC(x) (((x) & 0x01) << 5) 133 134 /* other registers */ 135 #define XHCI_SET_DOORBELL(x) ((x) & ~0x03) 136 #define XHCI_SET_RTSOFFSET(x) ((x) & ~0x0F) 137 138 /* register masks */ 139 #define XHCI_PS_PLS_MASK (0xF << 5) /* port link state */ 140 #define XHCI_PS_SPEED_MASK (0xF << 10) /* port speed */ 141 #define XHCI_PS_PIC_MASK (0x3 << 14) /* port indicator */ 142 143 /* port register set */ 144 #define XHCI_PORTREGS_BASE 0x400 /* base offset */ 145 #define XHCI_PORTREGS_PORT0 0x3F0 146 #define XHCI_PORTREGS_SETSZ 0x10 /* size of a set */ 147 148 #define MASK_64_HI(x) ((x) & ~0xFFFFFFFFULL) 149 #define MASK_64_LO(x) ((x) & 0xFFFFFFFFULL) 150 151 #define FIELD_REPLACE(a,b,m,s) (((a) & ~((m) << (s))) | \ 152 (((b) & (m)) << (s))) 153 #define FIELD_COPY(a,b,m,s) (((a) & ~((m) << (s))) | \ 154 (((b) & ((m) << (s))))) 155 156 #define SNAP_DEV_NAME_LEN 128 157 158 struct pci_xhci_trb_ring { 159 uint64_t ringaddr; /* current dequeue guest address */ 160 uint32_t ccs; /* consumer cycle state */ 161 }; 162 163 /* device endpoint transfer/stream rings */ 164 struct pci_xhci_dev_ep { 165 union { 166 struct xhci_trb *_epu_tr; 167 struct xhci_stream_ctx *_epu_sctx; 168 } _ep_trbsctx; 169 #define ep_tr _ep_trbsctx._epu_tr 170 #define ep_sctx _ep_trbsctx._epu_sctx 171 172 union { 173 struct pci_xhci_trb_ring _epu_trb; 174 struct pci_xhci_trb_ring *_epu_sctx_trbs; 175 } _ep_trb_rings; 176 #define ep_ringaddr _ep_trb_rings._epu_trb.ringaddr 177 #define ep_ccs _ep_trb_rings._epu_trb.ccs 178 #define ep_sctx_trbs _ep_trb_rings._epu_sctx_trbs 179 180 struct usb_data_xfer *ep_xfer; /* transfer chain */ 181 }; 182 183 /* device context base address array: maps slot->device context */ 184 struct xhci_dcbaa { 185 uint64_t dcba[USB_MAX_DEVICES+1]; /* xhci_dev_ctx ptrs */ 186 }; 187 188 /* port status registers */ 189 struct pci_xhci_portregs { 190 uint32_t portsc; /* port status and control */ 191 uint32_t portpmsc; /* port pwr mgmt status & control */ 192 uint32_t portli; /* port link info */ 193 uint32_t porthlpmc; /* port hardware LPM control */ 194 } __packed; 195 #define XHCI_PS_SPEED_SET(x) (((x) & 0xF) << 10) 196 197 /* xHC operational registers */ 198 struct pci_xhci_opregs { 199 uint32_t usbcmd; /* usb command */ 200 uint32_t usbsts; /* usb status */ 201 uint32_t pgsz; /* page size */ 202 uint32_t dnctrl; /* device notification control */ 203 uint64_t crcr; /* command ring control */ 204 uint64_t dcbaap; /* device ctx base addr array ptr */ 205 uint32_t config; /* configure */ 206 207 /* guest mapped addresses: */ 208 struct xhci_trb *cr_p; /* crcr dequeue */ 209 struct xhci_dcbaa *dcbaa_p; /* dev ctx array ptr */ 210 }; 211 212 /* xHC runtime registers */ 213 struct pci_xhci_rtsregs { 214 uint32_t mfindex; /* microframe index */ 215 struct { /* interrupter register set */ 216 uint32_t iman; /* interrupter management */ 217 uint32_t imod; /* interrupter moderation */ 218 uint32_t erstsz; /* event ring segment table size */ 219 uint32_t rsvd; 220 uint64_t erstba; /* event ring seg-tbl base addr */ 221 uint64_t erdp; /* event ring dequeue ptr */ 222 } intrreg __packed; 223 224 /* guest mapped addresses */ 225 struct xhci_event_ring_seg *erstba_p; 226 struct xhci_trb *erst_p; /* event ring segment tbl */ 227 int er_deq_seg; /* event ring dequeue segment */ 228 int er_enq_idx; /* event ring enqueue index - xHCI */ 229 int er_enq_seg; /* event ring enqueue segment */ 230 uint32_t er_events_cnt; /* number of events in ER */ 231 uint32_t event_pcs; /* producer cycle state flag */ 232 }; 233 234 235 struct pci_xhci_softc; 236 237 238 /* 239 * USB device emulation container. 240 * This is referenced from usb_hci->hci_sc; 1 pci_xhci_dev_emu for each 241 * emulated device instance. 242 */ 243 struct pci_xhci_dev_emu { 244 struct pci_xhci_softc *xsc; 245 246 /* XHCI contexts */ 247 struct xhci_dev_ctx *dev_ctx; 248 struct pci_xhci_dev_ep eps[XHCI_MAX_ENDPOINTS]; 249 int dev_slotstate; 250 251 struct usb_devemu *dev_ue; /* USB emulated dev */ 252 void *dev_sc; /* device's softc */ 253 254 struct usb_hci hci; 255 }; 256 257 struct pci_xhci_softc { 258 struct pci_devinst *xsc_pi; 259 260 pthread_mutex_t mtx; 261 262 uint32_t caplength; /* caplen & hciversion */ 263 uint32_t hcsparams1; /* structural parameters 1 */ 264 uint32_t hcsparams2; /* structural parameters 2 */ 265 uint32_t hcsparams3; /* structural parameters 3 */ 266 uint32_t hccparams1; /* capability parameters 1 */ 267 uint32_t dboff; /* doorbell offset */ 268 uint32_t rtsoff; /* runtime register space offset */ 269 uint32_t hccparams2; /* capability parameters 2 */ 270 271 uint32_t regsend; /* end of configuration registers */ 272 273 struct pci_xhci_opregs opregs; 274 struct pci_xhci_rtsregs rtsregs; 275 276 struct pci_xhci_portregs *portregs; 277 struct pci_xhci_dev_emu **devices; /* XHCI[port] = device */ 278 struct pci_xhci_dev_emu **slots; /* slots assigned from 1 */ 279 int ndevices; 280 281 int usb2_port_start; 282 int usb3_port_start; 283 }; 284 285 286 /* portregs and devices arrays are set up to start from idx=1 */ 287 #define XHCI_PORTREG_PTR(x,n) &(x)->portregs[(n)] 288 #define XHCI_DEVINST_PTR(x,n) (x)->devices[(n)] 289 #define XHCI_SLOTDEV_PTR(x,n) (x)->slots[(n)] 290 291 #define XHCI_HALTED(sc) ((sc)->opregs.usbsts & XHCI_STS_HCH) 292 293 #define XHCI_GADDR_SIZE(a) (XHCI_PADDR_SZ - \ 294 (((uint64_t) (a)) & (XHCI_PADDR_SZ - 1))) 295 #define XHCI_GADDR(sc,a) paddr_guest2host((sc)->xsc_pi->pi_vmctx, \ 296 (a), XHCI_GADDR_SIZE(a)) 297 298 static int xhci_in_use; 299 300 /* map USB errors to XHCI */ 301 static const int xhci_usb_errors[USB_ERR_MAX] = { 302 [USB_ERR_NORMAL_COMPLETION] = XHCI_TRB_ERROR_SUCCESS, 303 [USB_ERR_PENDING_REQUESTS] = XHCI_TRB_ERROR_RESOURCE, 304 [USB_ERR_NOT_STARTED] = XHCI_TRB_ERROR_ENDP_NOT_ON, 305 [USB_ERR_INVAL] = XHCI_TRB_ERROR_INVALID, 306 [USB_ERR_NOMEM] = XHCI_TRB_ERROR_RESOURCE, 307 [USB_ERR_CANCELLED] = XHCI_TRB_ERROR_STOPPED, 308 [USB_ERR_BAD_ADDRESS] = XHCI_TRB_ERROR_PARAMETER, 309 [USB_ERR_BAD_BUFSIZE] = XHCI_TRB_ERROR_PARAMETER, 310 [USB_ERR_BAD_FLAG] = XHCI_TRB_ERROR_PARAMETER, 311 [USB_ERR_NO_CALLBACK] = XHCI_TRB_ERROR_STALL, 312 [USB_ERR_IN_USE] = XHCI_TRB_ERROR_RESOURCE, 313 [USB_ERR_NO_ADDR] = XHCI_TRB_ERROR_RESOURCE, 314 [USB_ERR_NO_PIPE] = XHCI_TRB_ERROR_RESOURCE, 315 [USB_ERR_ZERO_NFRAMES] = XHCI_TRB_ERROR_UNDEFINED, 316 [USB_ERR_ZERO_MAXP] = XHCI_TRB_ERROR_UNDEFINED, 317 [USB_ERR_SET_ADDR_FAILED] = XHCI_TRB_ERROR_RESOURCE, 318 [USB_ERR_NO_POWER] = XHCI_TRB_ERROR_ENDP_NOT_ON, 319 [USB_ERR_TOO_DEEP] = XHCI_TRB_ERROR_RESOURCE, 320 [USB_ERR_IOERROR] = XHCI_TRB_ERROR_TRB, 321 [USB_ERR_NOT_CONFIGURED] = XHCI_TRB_ERROR_ENDP_NOT_ON, 322 [USB_ERR_TIMEOUT] = XHCI_TRB_ERROR_CMD_ABORTED, 323 [USB_ERR_SHORT_XFER] = XHCI_TRB_ERROR_SHORT_PKT, 324 [USB_ERR_STALLED] = XHCI_TRB_ERROR_STALL, 325 [USB_ERR_INTERRUPTED] = XHCI_TRB_ERROR_CMD_ABORTED, 326 [USB_ERR_DMA_LOAD_FAILED] = XHCI_TRB_ERROR_DATA_BUF, 327 [USB_ERR_BAD_CONTEXT] = XHCI_TRB_ERROR_TRB, 328 [USB_ERR_NO_ROOT_HUB] = XHCI_TRB_ERROR_UNDEFINED, 329 [USB_ERR_NO_INTR_THREAD] = XHCI_TRB_ERROR_UNDEFINED, 330 [USB_ERR_NOT_LOCKED] = XHCI_TRB_ERROR_UNDEFINED, 331 }; 332 #define USB_TO_XHCI_ERR(e) ((e) < USB_ERR_MAX ? xhci_usb_errors[(e)] : \ 333 XHCI_TRB_ERROR_INVALID) 334 335 static int pci_xhci_insert_event(struct pci_xhci_softc *sc, 336 struct xhci_trb *evtrb, int do_intr); 337 static void pci_xhci_dump_trb(struct xhci_trb *trb); 338 static void pci_xhci_assert_interrupt(struct pci_xhci_softc *sc); 339 static void pci_xhci_reset_slot(struct pci_xhci_softc *sc, int slot); 340 static void pci_xhci_reset_port(struct pci_xhci_softc *sc, int portn, int warm); 341 static void pci_xhci_update_ep_ring(struct pci_xhci_softc *sc, 342 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep, 343 struct xhci_endp_ctx *ep_ctx, uint32_t streamid, 344 uint64_t ringaddr, int ccs); 345 346 static void 347 pci_xhci_set_evtrb(struct xhci_trb *evtrb, uint64_t port, uint32_t errcode, 348 uint32_t evtype) 349 { 350 evtrb->qwTrb0 = port << 24; 351 evtrb->dwTrb2 = XHCI_TRB_2_ERROR_SET(errcode); 352 evtrb->dwTrb3 = XHCI_TRB_3_TYPE_SET(evtype); 353 } 354 355 356 /* controller reset */ 357 static void 358 pci_xhci_reset(struct pci_xhci_softc *sc) 359 { 360 int i; 361 362 sc->rtsregs.er_enq_idx = 0; 363 sc->rtsregs.er_events_cnt = 0; 364 sc->rtsregs.event_pcs = 1; 365 366 for (i = 1; i <= XHCI_MAX_SLOTS; i++) { 367 pci_xhci_reset_slot(sc, i); 368 } 369 } 370 371 static uint32_t 372 pci_xhci_usbcmd_write(struct pci_xhci_softc *sc, uint32_t cmd) 373 { 374 int do_intr = 0; 375 int i; 376 377 if (cmd & XHCI_CMD_RS) { 378 do_intr = (sc->opregs.usbcmd & XHCI_CMD_RS) == 0; 379 380 sc->opregs.usbcmd |= XHCI_CMD_RS; 381 sc->opregs.usbsts &= ~XHCI_STS_HCH; 382 sc->opregs.usbsts |= XHCI_STS_PCD; 383 384 /* Queue port change event on controller run from stop */ 385 if (do_intr) 386 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 387 struct pci_xhci_dev_emu *dev; 388 struct pci_xhci_portregs *port; 389 struct xhci_trb evtrb; 390 391 if ((dev = XHCI_DEVINST_PTR(sc, i)) == NULL) 392 continue; 393 394 port = XHCI_PORTREG_PTR(sc, i); 395 port->portsc |= XHCI_PS_CSC | XHCI_PS_CCS; 396 port->portsc &= ~XHCI_PS_PLS_MASK; 397 398 /* 399 * XHCI 4.19.3 USB2 RxDetect->Polling, 400 * USB3 Polling->U0 401 */ 402 if (dev->dev_ue->ue_usbver == 2) 403 port->portsc |= 404 XHCI_PS_PLS_SET(UPS_PORT_LS_POLL); 405 else 406 port->portsc |= 407 XHCI_PS_PLS_SET(UPS_PORT_LS_U0); 408 409 pci_xhci_set_evtrb(&evtrb, i, 410 XHCI_TRB_ERROR_SUCCESS, 411 XHCI_TRB_EVENT_PORT_STS_CHANGE); 412 413 if (pci_xhci_insert_event(sc, &evtrb, 0) != 414 XHCI_TRB_ERROR_SUCCESS) 415 break; 416 } 417 } else { 418 sc->opregs.usbcmd &= ~XHCI_CMD_RS; 419 sc->opregs.usbsts |= XHCI_STS_HCH; 420 sc->opregs.usbsts &= ~XHCI_STS_PCD; 421 } 422 423 /* start execution of schedule; stop when set to 0 */ 424 cmd |= sc->opregs.usbcmd & XHCI_CMD_RS; 425 426 if (cmd & XHCI_CMD_HCRST) { 427 /* reset controller */ 428 pci_xhci_reset(sc); 429 cmd &= ~XHCI_CMD_HCRST; 430 } 431 432 cmd &= ~(XHCI_CMD_CSS | XHCI_CMD_CRS); 433 434 if (do_intr) 435 pci_xhci_assert_interrupt(sc); 436 437 return (cmd); 438 } 439 440 static void 441 pci_xhci_portregs_write(struct pci_xhci_softc *sc, uint64_t offset, 442 uint64_t value) 443 { 444 struct xhci_trb evtrb; 445 struct pci_xhci_portregs *p; 446 int port; 447 uint32_t oldpls, newpls; 448 449 if (sc->portregs == NULL) 450 return; 451 452 port = (offset - XHCI_PORTREGS_PORT0) / XHCI_PORTREGS_SETSZ; 453 offset = (offset - XHCI_PORTREGS_PORT0) % XHCI_PORTREGS_SETSZ; 454 455 DPRINTF(("pci_xhci: portregs wr offset 0x%lx, port %u: 0x%lx", 456 offset, port, value)); 457 458 assert(port >= 0); 459 460 if (port > XHCI_MAX_DEVS) { 461 DPRINTF(("pci_xhci: portregs_write port %d > ndevices", 462 port)); 463 return; 464 } 465 466 if (XHCI_DEVINST_PTR(sc, port) == NULL) { 467 DPRINTF(("pci_xhci: portregs_write to unattached port %d", 468 port)); 469 } 470 471 p = XHCI_PORTREG_PTR(sc, port); 472 switch (offset) { 473 case 0: 474 /* port reset or warm reset */ 475 if (value & (XHCI_PS_PR | XHCI_PS_WPR)) { 476 pci_xhci_reset_port(sc, port, value & XHCI_PS_WPR); 477 break; 478 } 479 480 if ((p->portsc & XHCI_PS_PP) == 0) { 481 WPRINTF(("pci_xhci: portregs_write to unpowered " 482 "port %d", port)); 483 break; 484 } 485 486 /* Port status and control register */ 487 oldpls = XHCI_PS_PLS_GET(p->portsc); 488 newpls = XHCI_PS_PLS_GET(value); 489 490 p->portsc &= XHCI_PS_PED | XHCI_PS_PLS_MASK | 491 XHCI_PS_SPEED_MASK | XHCI_PS_PIC_MASK; 492 493 if (XHCI_DEVINST_PTR(sc, port)) 494 p->portsc |= XHCI_PS_CCS; 495 496 p->portsc |= (value & 497 ~(XHCI_PS_OCA | 498 XHCI_PS_PR | 499 XHCI_PS_PED | 500 XHCI_PS_PLS_MASK | /* link state */ 501 XHCI_PS_SPEED_MASK | 502 XHCI_PS_PIC_MASK | /* port indicator */ 503 XHCI_PS_LWS | XHCI_PS_DR | XHCI_PS_WPR)); 504 505 /* clear control bits */ 506 p->portsc &= ~(value & 507 (XHCI_PS_CSC | 508 XHCI_PS_PEC | 509 XHCI_PS_WRC | 510 XHCI_PS_OCC | 511 XHCI_PS_PRC | 512 XHCI_PS_PLC | 513 XHCI_PS_CEC | 514 XHCI_PS_CAS)); 515 516 /* port disable request; for USB3, don't care */ 517 if (value & XHCI_PS_PED) 518 DPRINTF(("Disable port %d request", port)); 519 520 if (!(value & XHCI_PS_LWS)) 521 break; 522 523 DPRINTF(("Port new PLS: %d", newpls)); 524 switch (newpls) { 525 case 0: /* U0 */ 526 case 3: /* U3 */ 527 if (oldpls != newpls) { 528 p->portsc &= ~XHCI_PS_PLS_MASK; 529 p->portsc |= XHCI_PS_PLS_SET(newpls) | 530 XHCI_PS_PLC; 531 532 if (oldpls != 0 && newpls == 0) { 533 pci_xhci_set_evtrb(&evtrb, port, 534 XHCI_TRB_ERROR_SUCCESS, 535 XHCI_TRB_EVENT_PORT_STS_CHANGE); 536 537 pci_xhci_insert_event(sc, &evtrb, 1); 538 } 539 } 540 break; 541 542 default: 543 DPRINTF(("Unhandled change port %d PLS %u", 544 port, newpls)); 545 break; 546 } 547 break; 548 case 4: 549 /* Port power management status and control register */ 550 p->portpmsc = value; 551 break; 552 case 8: 553 /* Port link information register */ 554 DPRINTF(("pci_xhci attempted write to PORTLI, port %d", 555 port)); 556 break; 557 case 12: 558 /* 559 * Port hardware LPM control register. 560 * For USB3, this register is reserved. 561 */ 562 p->porthlpmc = value; 563 break; 564 } 565 } 566 567 struct xhci_dev_ctx * 568 pci_xhci_get_dev_ctx(struct pci_xhci_softc *sc, uint32_t slot) 569 { 570 uint64_t devctx_addr; 571 struct xhci_dev_ctx *devctx; 572 573 assert(slot > 0 && slot <= sc->ndevices); 574 assert(sc->opregs.dcbaa_p != NULL); 575 576 devctx_addr = sc->opregs.dcbaa_p->dcba[slot]; 577 578 if (devctx_addr == 0) { 579 DPRINTF(("get_dev_ctx devctx_addr == 0")); 580 return (NULL); 581 } 582 583 DPRINTF(("pci_xhci: get dev ctx, slot %u devctx addr %016lx", 584 slot, devctx_addr)); 585 devctx = XHCI_GADDR(sc, devctx_addr & ~0x3FUL); 586 587 return (devctx); 588 } 589 590 struct xhci_trb * 591 pci_xhci_trb_next(struct pci_xhci_softc *sc, struct xhci_trb *curtrb, 592 uint64_t *guestaddr) 593 { 594 struct xhci_trb *next; 595 596 assert(curtrb != NULL); 597 598 if (XHCI_TRB_3_TYPE_GET(curtrb->dwTrb3) == XHCI_TRB_TYPE_LINK) { 599 if (guestaddr) 600 *guestaddr = curtrb->qwTrb0 & ~0xFUL; 601 602 next = XHCI_GADDR(sc, curtrb->qwTrb0 & ~0xFUL); 603 } else { 604 if (guestaddr) 605 *guestaddr += sizeof(struct xhci_trb) & ~0xFUL; 606 607 next = curtrb + 1; 608 } 609 610 return (next); 611 } 612 613 static void 614 pci_xhci_assert_interrupt(struct pci_xhci_softc *sc) 615 { 616 617 sc->rtsregs.intrreg.erdp |= XHCI_ERDP_LO_BUSY; 618 sc->rtsregs.intrreg.iman |= XHCI_IMAN_INTR_PEND; 619 sc->opregs.usbsts |= XHCI_STS_EINT; 620 621 /* only trigger interrupt if permitted */ 622 if ((sc->opregs.usbcmd & XHCI_CMD_INTE) && 623 (sc->rtsregs.intrreg.iman & XHCI_IMAN_INTR_ENA)) { 624 if (pci_msi_enabled(sc->xsc_pi)) 625 pci_generate_msi(sc->xsc_pi, 0); 626 else 627 pci_lintr_assert(sc->xsc_pi); 628 } 629 } 630 631 static void 632 pci_xhci_deassert_interrupt(struct pci_xhci_softc *sc) 633 { 634 635 if (!pci_msi_enabled(sc->xsc_pi)) 636 pci_lintr_assert(sc->xsc_pi); 637 } 638 639 static void 640 pci_xhci_init_ep(struct pci_xhci_dev_emu *dev, int epid) 641 { 642 struct xhci_dev_ctx *dev_ctx; 643 struct pci_xhci_dev_ep *devep; 644 struct xhci_endp_ctx *ep_ctx; 645 uint32_t pstreams; 646 int i; 647 648 dev_ctx = dev->dev_ctx; 649 ep_ctx = &dev_ctx->ctx_ep[epid]; 650 devep = &dev->eps[epid]; 651 pstreams = XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0); 652 if (pstreams > 0) { 653 DPRINTF(("init_ep %d with pstreams %d", epid, pstreams)); 654 assert(devep->ep_sctx_trbs == NULL); 655 656 devep->ep_sctx = XHCI_GADDR(dev->xsc, ep_ctx->qwEpCtx2 & 657 XHCI_EPCTX_2_TR_DQ_PTR_MASK); 658 devep->ep_sctx_trbs = calloc(pstreams, 659 sizeof(struct pci_xhci_trb_ring)); 660 for (i = 0; i < pstreams; i++) { 661 devep->ep_sctx_trbs[i].ringaddr = 662 devep->ep_sctx[i].qwSctx0 & 663 XHCI_SCTX_0_TR_DQ_PTR_MASK; 664 devep->ep_sctx_trbs[i].ccs = 665 XHCI_SCTX_0_DCS_GET(devep->ep_sctx[i].qwSctx0); 666 } 667 } else { 668 DPRINTF(("init_ep %d with no pstreams", epid)); 669 devep->ep_ringaddr = ep_ctx->qwEpCtx2 & 670 XHCI_EPCTX_2_TR_DQ_PTR_MASK; 671 devep->ep_ccs = XHCI_EPCTX_2_DCS_GET(ep_ctx->qwEpCtx2); 672 devep->ep_tr = XHCI_GADDR(dev->xsc, devep->ep_ringaddr); 673 DPRINTF(("init_ep tr DCS %x", devep->ep_ccs)); 674 } 675 676 if (devep->ep_xfer == NULL) { 677 devep->ep_xfer = malloc(sizeof(struct usb_data_xfer)); 678 USB_DATA_XFER_INIT(devep->ep_xfer); 679 } 680 } 681 682 static void 683 pci_xhci_disable_ep(struct pci_xhci_dev_emu *dev, int epid) 684 { 685 struct xhci_dev_ctx *dev_ctx; 686 struct pci_xhci_dev_ep *devep; 687 struct xhci_endp_ctx *ep_ctx; 688 689 DPRINTF(("pci_xhci disable_ep %d", epid)); 690 691 dev_ctx = dev->dev_ctx; 692 ep_ctx = &dev_ctx->ctx_ep[epid]; 693 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_DISABLED; 694 695 devep = &dev->eps[epid]; 696 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) > 0 && 697 devep->ep_sctx_trbs != NULL) 698 free(devep->ep_sctx_trbs); 699 700 if (devep->ep_xfer != NULL) { 701 free(devep->ep_xfer); 702 devep->ep_xfer = NULL; 703 } 704 705 memset(devep, 0, sizeof(struct pci_xhci_dev_ep)); 706 } 707 708 709 /* reset device at slot and data structures related to it */ 710 static void 711 pci_xhci_reset_slot(struct pci_xhci_softc *sc, int slot) 712 { 713 struct pci_xhci_dev_emu *dev; 714 715 dev = XHCI_SLOTDEV_PTR(sc, slot); 716 717 if (!dev) { 718 DPRINTF(("xhci reset unassigned slot (%d)?", slot)); 719 } else { 720 dev->dev_slotstate = XHCI_ST_DISABLED; 721 } 722 723 /* TODO: reset ring buffer pointers */ 724 } 725 726 static int 727 pci_xhci_insert_event(struct pci_xhci_softc *sc, struct xhci_trb *evtrb, 728 int do_intr) 729 { 730 struct pci_xhci_rtsregs *rts; 731 uint64_t erdp; 732 int erdp_idx; 733 int err; 734 struct xhci_trb *evtrbptr; 735 736 err = XHCI_TRB_ERROR_SUCCESS; 737 738 rts = &sc->rtsregs; 739 740 erdp = rts->intrreg.erdp & ~0xF; 741 erdp_idx = (erdp - rts->erstba_p[rts->er_deq_seg].qwEvrsTablePtr) / 742 sizeof(struct xhci_trb); 743 744 DPRINTF(("pci_xhci: insert event 0[%lx] 2[%x] 3[%x]", 745 evtrb->qwTrb0, evtrb->dwTrb2, evtrb->dwTrb3)); 746 DPRINTF(("\terdp idx %d/seg %d, enq idx %d/seg %d, pcs %u", 747 erdp_idx, rts->er_deq_seg, rts->er_enq_idx, 748 rts->er_enq_seg, rts->event_pcs)); 749 DPRINTF(("\t(erdp=0x%lx, erst=0x%lx, tblsz=%u, do_intr %d)", 750 erdp, rts->erstba_p->qwEvrsTablePtr, 751 rts->erstba_p->dwEvrsTableSize, do_intr)); 752 753 evtrbptr = &rts->erst_p[rts->er_enq_idx]; 754 755 /* TODO: multi-segment table */ 756 if (rts->er_events_cnt >= rts->erstba_p->dwEvrsTableSize) { 757 DPRINTF(("pci_xhci[%d] cannot insert event; ring full", 758 __LINE__)); 759 err = XHCI_TRB_ERROR_EV_RING_FULL; 760 goto done; 761 } 762 763 if (rts->er_events_cnt == rts->erstba_p->dwEvrsTableSize - 1) { 764 struct xhci_trb errev; 765 766 if ((evtrbptr->dwTrb3 & 0x1) == (rts->event_pcs & 0x1)) { 767 768 DPRINTF(("pci_xhci[%d] insert evt err: ring full", 769 __LINE__)); 770 771 errev.qwTrb0 = 0; 772 errev.dwTrb2 = XHCI_TRB_2_ERROR_SET( 773 XHCI_TRB_ERROR_EV_RING_FULL); 774 errev.dwTrb3 = XHCI_TRB_3_TYPE_SET( 775 XHCI_TRB_EVENT_HOST_CTRL) | 776 rts->event_pcs; 777 rts->er_events_cnt++; 778 memcpy(&rts->erst_p[rts->er_enq_idx], &errev, 779 sizeof(struct xhci_trb)); 780 rts->er_enq_idx = (rts->er_enq_idx + 1) % 781 rts->erstba_p->dwEvrsTableSize; 782 err = XHCI_TRB_ERROR_EV_RING_FULL; 783 do_intr = 1; 784 785 goto done; 786 } 787 } else { 788 rts->er_events_cnt++; 789 } 790 791 evtrb->dwTrb3 &= ~XHCI_TRB_3_CYCLE_BIT; 792 evtrb->dwTrb3 |= rts->event_pcs; 793 794 memcpy(&rts->erst_p[rts->er_enq_idx], evtrb, sizeof(struct xhci_trb)); 795 rts->er_enq_idx = (rts->er_enq_idx + 1) % 796 rts->erstba_p->dwEvrsTableSize; 797 798 if (rts->er_enq_idx == 0) 799 rts->event_pcs ^= 1; 800 801 done: 802 if (do_intr) 803 pci_xhci_assert_interrupt(sc); 804 805 return (err); 806 } 807 808 static uint32_t 809 pci_xhci_cmd_enable_slot(struct pci_xhci_softc *sc, uint32_t *slot) 810 { 811 struct pci_xhci_dev_emu *dev; 812 uint32_t cmderr; 813 int i; 814 815 cmderr = XHCI_TRB_ERROR_NO_SLOTS; 816 if (sc->portregs != NULL) 817 for (i = 1; i <= XHCI_MAX_SLOTS; i++) { 818 dev = XHCI_SLOTDEV_PTR(sc, i); 819 if (dev && dev->dev_slotstate == XHCI_ST_DISABLED) { 820 *slot = i; 821 dev->dev_slotstate = XHCI_ST_ENABLED; 822 cmderr = XHCI_TRB_ERROR_SUCCESS; 823 dev->hci.hci_address = i; 824 break; 825 } 826 } 827 828 DPRINTF(("pci_xhci enable slot (error=%d) slot %u", 829 cmderr != XHCI_TRB_ERROR_SUCCESS, *slot)); 830 831 return (cmderr); 832 } 833 834 static uint32_t 835 pci_xhci_cmd_disable_slot(struct pci_xhci_softc *sc, uint32_t slot) 836 { 837 struct pci_xhci_dev_emu *dev; 838 uint32_t cmderr; 839 840 DPRINTF(("pci_xhci disable slot %u", slot)); 841 842 cmderr = XHCI_TRB_ERROR_NO_SLOTS; 843 if (sc->portregs == NULL) 844 goto done; 845 846 if (slot > sc->ndevices) { 847 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 848 goto done; 849 } 850 851 dev = XHCI_SLOTDEV_PTR(sc, slot); 852 if (dev) { 853 if (dev->dev_slotstate == XHCI_ST_DISABLED) { 854 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 855 } else { 856 dev->dev_slotstate = XHCI_ST_DISABLED; 857 cmderr = XHCI_TRB_ERROR_SUCCESS; 858 /* TODO: reset events and endpoints */ 859 } 860 } 861 862 done: 863 return (cmderr); 864 } 865 866 static uint32_t 867 pci_xhci_cmd_reset_device(struct pci_xhci_softc *sc, uint32_t slot) 868 { 869 struct pci_xhci_dev_emu *dev; 870 struct xhci_dev_ctx *dev_ctx; 871 struct xhci_endp_ctx *ep_ctx; 872 uint32_t cmderr; 873 int i; 874 875 cmderr = XHCI_TRB_ERROR_NO_SLOTS; 876 if (sc->portregs == NULL) 877 goto done; 878 879 DPRINTF(("pci_xhci reset device slot %u", slot)); 880 881 dev = XHCI_SLOTDEV_PTR(sc, slot); 882 if (!dev || dev->dev_slotstate == XHCI_ST_DISABLED) 883 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 884 else { 885 dev->dev_slotstate = XHCI_ST_DEFAULT; 886 887 dev->hci.hci_address = 0; 888 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 889 890 /* slot state */ 891 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE( 892 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_DEFAULT, 893 0x1F, 27); 894 895 /* number of contexts */ 896 dev_ctx->ctx_slot.dwSctx0 = FIELD_REPLACE( 897 dev_ctx->ctx_slot.dwSctx0, 1, 0x1F, 27); 898 899 /* reset all eps other than ep-0 */ 900 for (i = 2; i <= 31; i++) { 901 ep_ctx = &dev_ctx->ctx_ep[i]; 902 ep_ctx->dwEpCtx0 = FIELD_REPLACE( ep_ctx->dwEpCtx0, 903 XHCI_ST_EPCTX_DISABLED, 0x7, 0); 904 } 905 906 cmderr = XHCI_TRB_ERROR_SUCCESS; 907 } 908 909 pci_xhci_reset_slot(sc, slot); 910 911 done: 912 return (cmderr); 913 } 914 915 static uint32_t 916 pci_xhci_cmd_address_device(struct pci_xhci_softc *sc, uint32_t slot, 917 struct xhci_trb *trb) 918 { 919 struct pci_xhci_dev_emu *dev; 920 struct xhci_input_dev_ctx *input_ctx; 921 struct xhci_slot_ctx *islot_ctx; 922 struct xhci_dev_ctx *dev_ctx; 923 struct xhci_endp_ctx *ep0_ctx; 924 uint32_t cmderr; 925 926 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL); 927 islot_ctx = &input_ctx->ctx_slot; 928 ep0_ctx = &input_ctx->ctx_ep[1]; 929 930 cmderr = XHCI_TRB_ERROR_SUCCESS; 931 932 DPRINTF(("pci_xhci: address device, input ctl: D 0x%08x A 0x%08x,", 933 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1)); 934 DPRINTF((" slot %08x %08x %08x %08x", 935 islot_ctx->dwSctx0, islot_ctx->dwSctx1, 936 islot_ctx->dwSctx2, islot_ctx->dwSctx3)); 937 DPRINTF((" ep0 %08x %08x %016lx %08x", 938 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, 939 ep0_ctx->dwEpCtx4)); 940 941 /* when setting address: drop-ctx=0, add-ctx=slot+ep0 */ 942 if ((input_ctx->ctx_input.dwInCtx0 != 0) || 943 (input_ctx->ctx_input.dwInCtx1 & 0x03) != 0x03) { 944 DPRINTF(("pci_xhci: address device, input ctl invalid")); 945 cmderr = XHCI_TRB_ERROR_TRB; 946 goto done; 947 } 948 949 /* assign address to slot */ 950 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 951 952 DPRINTF(("pci_xhci: address device, dev ctx")); 953 DPRINTF((" slot %08x %08x %08x %08x", 954 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 955 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 956 957 dev = XHCI_SLOTDEV_PTR(sc, slot); 958 assert(dev != NULL); 959 960 dev->hci.hci_address = slot; 961 dev->dev_ctx = dev_ctx; 962 963 if (dev->dev_ue->ue_reset == NULL || 964 dev->dev_ue->ue_reset(dev->dev_sc) < 0) { 965 cmderr = XHCI_TRB_ERROR_ENDP_NOT_ON; 966 goto done; 967 } 968 969 memcpy(&dev_ctx->ctx_slot, islot_ctx, sizeof(struct xhci_slot_ctx)); 970 971 dev_ctx->ctx_slot.dwSctx3 = 972 XHCI_SCTX_3_SLOT_STATE_SET(XHCI_ST_SLCTX_ADDRESSED) | 973 XHCI_SCTX_3_DEV_ADDR_SET(slot); 974 975 memcpy(&dev_ctx->ctx_ep[1], ep0_ctx, sizeof(struct xhci_endp_ctx)); 976 ep0_ctx = &dev_ctx->ctx_ep[1]; 977 ep0_ctx->dwEpCtx0 = (ep0_ctx->dwEpCtx0 & ~0x7) | 978 XHCI_EPCTX_0_EPSTATE_SET(XHCI_ST_EPCTX_RUNNING); 979 980 pci_xhci_init_ep(dev, 1); 981 982 dev->dev_slotstate = XHCI_ST_ADDRESSED; 983 984 DPRINTF(("pci_xhci: address device, output ctx")); 985 DPRINTF((" slot %08x %08x %08x %08x", 986 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 987 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 988 DPRINTF((" ep0 %08x %08x %016lx %08x", 989 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, 990 ep0_ctx->dwEpCtx4)); 991 992 done: 993 return (cmderr); 994 } 995 996 static uint32_t 997 pci_xhci_cmd_config_ep(struct pci_xhci_softc *sc, uint32_t slot, 998 struct xhci_trb *trb) 999 { 1000 struct xhci_input_dev_ctx *input_ctx; 1001 struct pci_xhci_dev_emu *dev; 1002 struct xhci_dev_ctx *dev_ctx; 1003 struct xhci_endp_ctx *ep_ctx, *iep_ctx; 1004 uint32_t cmderr; 1005 int i; 1006 1007 cmderr = XHCI_TRB_ERROR_SUCCESS; 1008 1009 DPRINTF(("pci_xhci config_ep slot %u", slot)); 1010 1011 dev = XHCI_SLOTDEV_PTR(sc, slot); 1012 assert(dev != NULL); 1013 1014 if ((trb->dwTrb3 & XHCI_TRB_3_DCEP_BIT) != 0) { 1015 DPRINTF(("pci_xhci config_ep - deconfigure ep slot %u", 1016 slot)); 1017 if (dev->dev_ue->ue_stop != NULL) 1018 dev->dev_ue->ue_stop(dev->dev_sc); 1019 1020 dev->dev_slotstate = XHCI_ST_ADDRESSED; 1021 1022 dev->hci.hci_address = 0; 1023 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 1024 1025 /* number of contexts */ 1026 dev_ctx->ctx_slot.dwSctx0 = FIELD_REPLACE( 1027 dev_ctx->ctx_slot.dwSctx0, 1, 0x1F, 27); 1028 1029 /* slot state */ 1030 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE( 1031 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_ADDRESSED, 1032 0x1F, 27); 1033 1034 /* disable endpoints */ 1035 for (i = 2; i < 32; i++) 1036 pci_xhci_disable_ep(dev, i); 1037 1038 cmderr = XHCI_TRB_ERROR_SUCCESS; 1039 1040 goto done; 1041 } 1042 1043 if (dev->dev_slotstate < XHCI_ST_ADDRESSED) { 1044 DPRINTF(("pci_xhci: config_ep slotstate x%x != addressed", 1045 dev->dev_slotstate)); 1046 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 1047 goto done; 1048 } 1049 1050 /* In addressed/configured state; 1051 * for each drop endpoint ctx flag: 1052 * ep->state = DISABLED 1053 * for each add endpoint ctx flag: 1054 * cp(ep-in, ep-out) 1055 * ep->state = RUNNING 1056 * for each drop+add endpoint flag: 1057 * reset ep resources 1058 * cp(ep-in, ep-out) 1059 * ep->state = RUNNING 1060 * if input->DisabledCtx[2-31] < 30: (at least 1 ep not disabled) 1061 * slot->state = configured 1062 */ 1063 1064 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL); 1065 dev_ctx = dev->dev_ctx; 1066 DPRINTF(("pci_xhci: config_ep inputctx: D:x%08x A:x%08x 7:x%08x", 1067 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1, 1068 input_ctx->ctx_input.dwInCtx7)); 1069 1070 for (i = 2; i <= 31; i++) { 1071 ep_ctx = &dev_ctx->ctx_ep[i]; 1072 1073 if (input_ctx->ctx_input.dwInCtx0 & 1074 XHCI_INCTX_0_DROP_MASK(i)) { 1075 DPRINTF((" config ep - dropping ep %d", i)); 1076 pci_xhci_disable_ep(dev, i); 1077 } 1078 1079 if (input_ctx->ctx_input.dwInCtx1 & 1080 XHCI_INCTX_1_ADD_MASK(i)) { 1081 iep_ctx = &input_ctx->ctx_ep[i]; 1082 1083 DPRINTF((" enable ep[%d] %08x %08x %016lx %08x", 1084 i, iep_ctx->dwEpCtx0, iep_ctx->dwEpCtx1, 1085 iep_ctx->qwEpCtx2, iep_ctx->dwEpCtx4)); 1086 1087 memcpy(ep_ctx, iep_ctx, sizeof(struct xhci_endp_ctx)); 1088 1089 pci_xhci_init_ep(dev, i); 1090 1091 /* ep state */ 1092 ep_ctx->dwEpCtx0 = FIELD_REPLACE( 1093 ep_ctx->dwEpCtx0, XHCI_ST_EPCTX_RUNNING, 0x7, 0); 1094 } 1095 } 1096 1097 /* slot state to configured */ 1098 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE( 1099 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_CONFIGURED, 0x1F, 27); 1100 dev_ctx->ctx_slot.dwSctx0 = FIELD_COPY( 1101 dev_ctx->ctx_slot.dwSctx0, input_ctx->ctx_slot.dwSctx0, 0x1F, 27); 1102 dev->dev_slotstate = XHCI_ST_CONFIGURED; 1103 1104 DPRINTF(("EP configured; slot %u [0]=0x%08x [1]=0x%08x [2]=0x%08x " 1105 "[3]=0x%08x", 1106 slot, dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 1107 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 1108 1109 done: 1110 return (cmderr); 1111 } 1112 1113 static uint32_t 1114 pci_xhci_cmd_reset_ep(struct pci_xhci_softc *sc, uint32_t slot, 1115 struct xhci_trb *trb) 1116 { 1117 struct pci_xhci_dev_emu *dev; 1118 struct pci_xhci_dev_ep *devep; 1119 struct xhci_dev_ctx *dev_ctx; 1120 struct xhci_endp_ctx *ep_ctx; 1121 uint32_t cmderr, epid; 1122 uint32_t type; 1123 1124 epid = XHCI_TRB_3_EP_GET(trb->dwTrb3); 1125 1126 DPRINTF(("pci_xhci: reset ep %u: slot %u", epid, slot)); 1127 1128 cmderr = XHCI_TRB_ERROR_SUCCESS; 1129 1130 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3); 1131 1132 dev = XHCI_SLOTDEV_PTR(sc, slot); 1133 assert(dev != NULL); 1134 1135 if (type == XHCI_TRB_TYPE_STOP_EP && 1136 (trb->dwTrb3 & XHCI_TRB_3_SUSP_EP_BIT) != 0) { 1137 /* XXX suspend endpoint for 10ms */ 1138 } 1139 1140 if (epid < 1 || epid > 31) { 1141 DPRINTF(("pci_xhci: reset ep: invalid epid %u", epid)); 1142 cmderr = XHCI_TRB_ERROR_TRB; 1143 goto done; 1144 } 1145 1146 devep = &dev->eps[epid]; 1147 if (devep->ep_xfer != NULL) 1148 USB_DATA_XFER_RESET(devep->ep_xfer); 1149 1150 dev_ctx = dev->dev_ctx; 1151 assert(dev_ctx != NULL); 1152 1153 ep_ctx = &dev_ctx->ctx_ep[epid]; 1154 1155 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_STOPPED; 1156 1157 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) == 0) 1158 ep_ctx->qwEpCtx2 = devep->ep_ringaddr | devep->ep_ccs; 1159 1160 DPRINTF(("pci_xhci: reset ep[%u] %08x %08x %016lx %08x", 1161 epid, ep_ctx->dwEpCtx0, ep_ctx->dwEpCtx1, ep_ctx->qwEpCtx2, 1162 ep_ctx->dwEpCtx4)); 1163 1164 if (type == XHCI_TRB_TYPE_RESET_EP && 1165 (dev->dev_ue->ue_reset == NULL || 1166 dev->dev_ue->ue_reset(dev->dev_sc) < 0)) { 1167 cmderr = XHCI_TRB_ERROR_ENDP_NOT_ON; 1168 goto done; 1169 } 1170 1171 done: 1172 return (cmderr); 1173 } 1174 1175 1176 static uint32_t 1177 pci_xhci_find_stream(struct pci_xhci_softc *sc, struct xhci_endp_ctx *ep, 1178 uint32_t streamid, struct xhci_stream_ctx **osctx) 1179 { 1180 struct xhci_stream_ctx *sctx; 1181 uint32_t maxpstreams; 1182 1183 maxpstreams = XHCI_EPCTX_0_MAXP_STREAMS_GET(ep->dwEpCtx0); 1184 if (maxpstreams == 0) 1185 return (XHCI_TRB_ERROR_TRB); 1186 1187 if (maxpstreams > XHCI_STREAMS_MAX) 1188 return (XHCI_TRB_ERROR_INVALID_SID); 1189 1190 if (XHCI_EPCTX_0_LSA_GET(ep->dwEpCtx0) == 0) { 1191 DPRINTF(("pci_xhci: find_stream; LSA bit not set")); 1192 return (XHCI_TRB_ERROR_INVALID_SID); 1193 } 1194 1195 /* only support primary stream */ 1196 if (streamid > maxpstreams) 1197 return (XHCI_TRB_ERROR_STREAM_TYPE); 1198 1199 sctx = XHCI_GADDR(sc, ep->qwEpCtx2 & ~0xFUL) + streamid; 1200 if (!XHCI_SCTX_0_SCT_GET(sctx->qwSctx0)) 1201 return (XHCI_TRB_ERROR_STREAM_TYPE); 1202 1203 *osctx = sctx; 1204 1205 return (XHCI_TRB_ERROR_SUCCESS); 1206 } 1207 1208 1209 static uint32_t 1210 pci_xhci_cmd_set_tr(struct pci_xhci_softc *sc, uint32_t slot, 1211 struct xhci_trb *trb) 1212 { 1213 struct pci_xhci_dev_emu *dev; 1214 struct pci_xhci_dev_ep *devep; 1215 struct xhci_dev_ctx *dev_ctx; 1216 struct xhci_endp_ctx *ep_ctx; 1217 uint32_t cmderr, epid; 1218 uint32_t streamid; 1219 1220 cmderr = XHCI_TRB_ERROR_SUCCESS; 1221 1222 dev = XHCI_SLOTDEV_PTR(sc, slot); 1223 assert(dev != NULL); 1224 1225 DPRINTF(("pci_xhci set_tr: new-tr x%016lx, SCT %u DCS %u", 1226 (trb->qwTrb0 & ~0xF), (uint32_t)((trb->qwTrb0 >> 1) & 0x7), 1227 (uint32_t)(trb->qwTrb0 & 0x1))); 1228 DPRINTF((" stream-id %u, slot %u, epid %u, C %u", 1229 (trb->dwTrb2 >> 16) & 0xFFFF, 1230 XHCI_TRB_3_SLOT_GET(trb->dwTrb3), 1231 XHCI_TRB_3_EP_GET(trb->dwTrb3), trb->dwTrb3 & 0x1)); 1232 1233 epid = XHCI_TRB_3_EP_GET(trb->dwTrb3); 1234 if (epid < 1 || epid > 31) { 1235 DPRINTF(("pci_xhci: set_tr_deq: invalid epid %u", epid)); 1236 cmderr = XHCI_TRB_ERROR_TRB; 1237 goto done; 1238 } 1239 1240 dev_ctx = dev->dev_ctx; 1241 assert(dev_ctx != NULL); 1242 1243 ep_ctx = &dev_ctx->ctx_ep[epid]; 1244 devep = &dev->eps[epid]; 1245 1246 switch (XHCI_EPCTX_0_EPSTATE_GET(ep_ctx->dwEpCtx0)) { 1247 case XHCI_ST_EPCTX_STOPPED: 1248 case XHCI_ST_EPCTX_ERROR: 1249 break; 1250 default: 1251 DPRINTF(("pci_xhci cmd set_tr invalid state %x", 1252 XHCI_EPCTX_0_EPSTATE_GET(ep_ctx->dwEpCtx0))); 1253 cmderr = XHCI_TRB_ERROR_CONTEXT_STATE; 1254 goto done; 1255 } 1256 1257 streamid = XHCI_TRB_2_STREAM_GET(trb->dwTrb2); 1258 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) > 0) { 1259 struct xhci_stream_ctx *sctx; 1260 1261 sctx = NULL; 1262 cmderr = pci_xhci_find_stream(sc, ep_ctx, streamid, &sctx); 1263 if (sctx != NULL) { 1264 assert(devep->ep_sctx != NULL); 1265 1266 devep->ep_sctx[streamid].qwSctx0 = trb->qwTrb0; 1267 devep->ep_sctx_trbs[streamid].ringaddr = 1268 trb->qwTrb0 & ~0xF; 1269 devep->ep_sctx_trbs[streamid].ccs = 1270 XHCI_EPCTX_2_DCS_GET(trb->qwTrb0); 1271 } 1272 } else { 1273 if (streamid != 0) { 1274 DPRINTF(("pci_xhci cmd set_tr streamid %x != 0", 1275 streamid)); 1276 } 1277 ep_ctx->qwEpCtx2 = trb->qwTrb0 & ~0xFUL; 1278 devep->ep_ringaddr = ep_ctx->qwEpCtx2 & ~0xFUL; 1279 devep->ep_ccs = trb->qwTrb0 & 0x1; 1280 devep->ep_tr = XHCI_GADDR(sc, devep->ep_ringaddr); 1281 1282 DPRINTF(("pci_xhci set_tr first TRB:")); 1283 pci_xhci_dump_trb(devep->ep_tr); 1284 } 1285 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_STOPPED; 1286 1287 done: 1288 return (cmderr); 1289 } 1290 1291 static uint32_t 1292 pci_xhci_cmd_eval_ctx(struct pci_xhci_softc *sc, uint32_t slot, 1293 struct xhci_trb *trb) 1294 { 1295 struct xhci_input_dev_ctx *input_ctx; 1296 struct xhci_slot_ctx *islot_ctx; 1297 struct xhci_dev_ctx *dev_ctx; 1298 struct xhci_endp_ctx *ep0_ctx; 1299 uint32_t cmderr; 1300 1301 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL); 1302 islot_ctx = &input_ctx->ctx_slot; 1303 ep0_ctx = &input_ctx->ctx_ep[1]; 1304 1305 cmderr = XHCI_TRB_ERROR_SUCCESS; 1306 DPRINTF(("pci_xhci: eval ctx, input ctl: D 0x%08x A 0x%08x,", 1307 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1)); 1308 DPRINTF((" slot %08x %08x %08x %08x", 1309 islot_ctx->dwSctx0, islot_ctx->dwSctx1, 1310 islot_ctx->dwSctx2, islot_ctx->dwSctx3)); 1311 DPRINTF((" ep0 %08x %08x %016lx %08x", 1312 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, 1313 ep0_ctx->dwEpCtx4)); 1314 1315 /* this command expects drop-ctx=0 & add-ctx=slot+ep0 */ 1316 if ((input_ctx->ctx_input.dwInCtx0 != 0) || 1317 (input_ctx->ctx_input.dwInCtx1 & 0x03) == 0) { 1318 DPRINTF(("pci_xhci: eval ctx, input ctl invalid")); 1319 cmderr = XHCI_TRB_ERROR_TRB; 1320 goto done; 1321 } 1322 1323 /* assign address to slot; in this emulation, slot_id = address */ 1324 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 1325 1326 DPRINTF(("pci_xhci: eval ctx, dev ctx")); 1327 DPRINTF((" slot %08x %08x %08x %08x", 1328 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 1329 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 1330 1331 if (input_ctx->ctx_input.dwInCtx1 & 0x01) { /* slot ctx */ 1332 /* set max exit latency */ 1333 dev_ctx->ctx_slot.dwSctx1 = FIELD_COPY( 1334 dev_ctx->ctx_slot.dwSctx1, input_ctx->ctx_slot.dwSctx1, 1335 0xFFFF, 0); 1336 1337 /* set interrupter target */ 1338 dev_ctx->ctx_slot.dwSctx2 = FIELD_COPY( 1339 dev_ctx->ctx_slot.dwSctx2, input_ctx->ctx_slot.dwSctx2, 1340 0x3FF, 22); 1341 } 1342 if (input_ctx->ctx_input.dwInCtx1 & 0x02) { /* control ctx */ 1343 /* set max packet size */ 1344 dev_ctx->ctx_ep[1].dwEpCtx1 = FIELD_COPY( 1345 dev_ctx->ctx_ep[1].dwEpCtx1, ep0_ctx->dwEpCtx1, 1346 0xFFFF, 16); 1347 1348 ep0_ctx = &dev_ctx->ctx_ep[1]; 1349 } 1350 1351 DPRINTF(("pci_xhci: eval ctx, output ctx")); 1352 DPRINTF((" slot %08x %08x %08x %08x", 1353 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 1354 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 1355 DPRINTF((" ep0 %08x %08x %016lx %08x", 1356 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, 1357 ep0_ctx->dwEpCtx4)); 1358 1359 done: 1360 return (cmderr); 1361 } 1362 1363 static int 1364 pci_xhci_complete_commands(struct pci_xhci_softc *sc) 1365 { 1366 struct xhci_trb evtrb; 1367 struct xhci_trb *trb; 1368 uint64_t crcr; 1369 uint32_t ccs; /* cycle state (XHCI 4.9.2) */ 1370 uint32_t type; 1371 uint32_t slot; 1372 uint32_t cmderr; 1373 int error; 1374 1375 error = 0; 1376 sc->opregs.crcr |= XHCI_CRCR_LO_CRR; 1377 1378 trb = sc->opregs.cr_p; 1379 ccs = sc->opregs.crcr & XHCI_CRCR_LO_RCS; 1380 crcr = sc->opregs.crcr & ~0xF; 1381 1382 while (1) { 1383 sc->opregs.cr_p = trb; 1384 1385 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3); 1386 1387 if ((trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT) != 1388 (ccs & XHCI_TRB_3_CYCLE_BIT)) 1389 break; 1390 1391 DPRINTF(("pci_xhci: cmd type 0x%x, Trb0 x%016lx dwTrb2 x%08x" 1392 " dwTrb3 x%08x, TRB_CYCLE %u/ccs %u", 1393 type, trb->qwTrb0, trb->dwTrb2, trb->dwTrb3, 1394 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT, ccs)); 1395 1396 cmderr = XHCI_TRB_ERROR_SUCCESS; 1397 evtrb.dwTrb2 = 0; 1398 evtrb.dwTrb3 = (ccs & XHCI_TRB_3_CYCLE_BIT) | 1399 XHCI_TRB_3_TYPE_SET(XHCI_TRB_EVENT_CMD_COMPLETE); 1400 slot = 0; 1401 1402 switch (type) { 1403 case XHCI_TRB_TYPE_LINK: /* 0x06 */ 1404 if (trb->dwTrb3 & XHCI_TRB_3_TC_BIT) 1405 ccs ^= XHCI_CRCR_LO_RCS; 1406 break; 1407 1408 case XHCI_TRB_TYPE_ENABLE_SLOT: /* 0x09 */ 1409 cmderr = pci_xhci_cmd_enable_slot(sc, &slot); 1410 break; 1411 1412 case XHCI_TRB_TYPE_DISABLE_SLOT: /* 0x0A */ 1413 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1414 cmderr = pci_xhci_cmd_disable_slot(sc, slot); 1415 break; 1416 1417 case XHCI_TRB_TYPE_ADDRESS_DEVICE: /* 0x0B */ 1418 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1419 cmderr = pci_xhci_cmd_address_device(sc, slot, trb); 1420 break; 1421 1422 case XHCI_TRB_TYPE_CONFIGURE_EP: /* 0x0C */ 1423 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1424 cmderr = pci_xhci_cmd_config_ep(sc, slot, trb); 1425 break; 1426 1427 case XHCI_TRB_TYPE_EVALUATE_CTX: /* 0x0D */ 1428 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1429 cmderr = pci_xhci_cmd_eval_ctx(sc, slot, trb); 1430 break; 1431 1432 case XHCI_TRB_TYPE_RESET_EP: /* 0x0E */ 1433 DPRINTF(("Reset Endpoint on slot %d", slot)); 1434 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1435 cmderr = pci_xhci_cmd_reset_ep(sc, slot, trb); 1436 break; 1437 1438 case XHCI_TRB_TYPE_STOP_EP: /* 0x0F */ 1439 DPRINTF(("Stop Endpoint on slot %d", slot)); 1440 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1441 cmderr = pci_xhci_cmd_reset_ep(sc, slot, trb); 1442 break; 1443 1444 case XHCI_TRB_TYPE_SET_TR_DEQUEUE: /* 0x10 */ 1445 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1446 cmderr = pci_xhci_cmd_set_tr(sc, slot, trb); 1447 break; 1448 1449 case XHCI_TRB_TYPE_RESET_DEVICE: /* 0x11 */ 1450 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1451 cmderr = pci_xhci_cmd_reset_device(sc, slot); 1452 break; 1453 1454 case XHCI_TRB_TYPE_FORCE_EVENT: /* 0x12 */ 1455 /* TODO: */ 1456 break; 1457 1458 case XHCI_TRB_TYPE_NEGOTIATE_BW: /* 0x13 */ 1459 break; 1460 1461 case XHCI_TRB_TYPE_SET_LATENCY_TOL: /* 0x14 */ 1462 break; 1463 1464 case XHCI_TRB_TYPE_GET_PORT_BW: /* 0x15 */ 1465 break; 1466 1467 case XHCI_TRB_TYPE_FORCE_HEADER: /* 0x16 */ 1468 break; 1469 1470 case XHCI_TRB_TYPE_NOOP_CMD: /* 0x17 */ 1471 break; 1472 1473 default: 1474 DPRINTF(("pci_xhci: unsupported cmd %x", type)); 1475 break; 1476 } 1477 1478 if (type != XHCI_TRB_TYPE_LINK) { 1479 /* 1480 * insert command completion event and assert intr 1481 */ 1482 evtrb.qwTrb0 = crcr; 1483 evtrb.dwTrb2 |= XHCI_TRB_2_ERROR_SET(cmderr); 1484 evtrb.dwTrb3 |= XHCI_TRB_3_SLOT_SET(slot); 1485 DPRINTF(("pci_xhci: command 0x%x result: 0x%x", 1486 type, cmderr)); 1487 pci_xhci_insert_event(sc, &evtrb, 1); 1488 } 1489 1490 trb = pci_xhci_trb_next(sc, trb, &crcr); 1491 } 1492 1493 sc->opregs.crcr = crcr | (sc->opregs.crcr & XHCI_CRCR_LO_CA) | ccs; 1494 sc->opregs.crcr &= ~XHCI_CRCR_LO_CRR; 1495 return (error); 1496 } 1497 1498 static void 1499 pci_xhci_dump_trb(struct xhci_trb *trb) 1500 { 1501 static const char *trbtypes[] = { 1502 "RESERVED", 1503 "NORMAL", 1504 "SETUP_STAGE", 1505 "DATA_STAGE", 1506 "STATUS_STAGE", 1507 "ISOCH", 1508 "LINK", 1509 "EVENT_DATA", 1510 "NOOP", 1511 "ENABLE_SLOT", 1512 "DISABLE_SLOT", 1513 "ADDRESS_DEVICE", 1514 "CONFIGURE_EP", 1515 "EVALUATE_CTX", 1516 "RESET_EP", 1517 "STOP_EP", 1518 "SET_TR_DEQUEUE", 1519 "RESET_DEVICE", 1520 "FORCE_EVENT", 1521 "NEGOTIATE_BW", 1522 "SET_LATENCY_TOL", 1523 "GET_PORT_BW", 1524 "FORCE_HEADER", 1525 "NOOP_CMD" 1526 }; 1527 uint32_t type; 1528 1529 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3); 1530 DPRINTF(("pci_xhci: trb[@%p] type x%02x %s 0:x%016lx 2:x%08x 3:x%08x", 1531 trb, type, 1532 type <= XHCI_TRB_TYPE_NOOP_CMD ? trbtypes[type] : "INVALID", 1533 trb->qwTrb0, trb->dwTrb2, trb->dwTrb3)); 1534 } 1535 1536 static int 1537 pci_xhci_xfer_complete(struct pci_xhci_softc *sc, struct usb_data_xfer *xfer, 1538 uint32_t slot, uint32_t epid, int *do_intr) 1539 { 1540 struct pci_xhci_dev_emu *dev; 1541 struct pci_xhci_dev_ep *devep; 1542 struct xhci_dev_ctx *dev_ctx; 1543 struct xhci_endp_ctx *ep_ctx; 1544 struct xhci_trb *trb; 1545 struct xhci_trb evtrb; 1546 uint32_t trbflags; 1547 uint32_t edtla; 1548 int i, err; 1549 1550 dev = XHCI_SLOTDEV_PTR(sc, slot); 1551 devep = &dev->eps[epid]; 1552 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 1553 1554 assert(dev_ctx != NULL); 1555 1556 ep_ctx = &dev_ctx->ctx_ep[epid]; 1557 1558 err = XHCI_TRB_ERROR_SUCCESS; 1559 *do_intr = 0; 1560 edtla = 0; 1561 1562 /* go through list of TRBs and insert event(s) */ 1563 for (i = xfer->head; xfer->ndata > 0; ) { 1564 evtrb.qwTrb0 = (uint64_t)xfer->data[i].hci_data; 1565 trb = XHCI_GADDR(sc, evtrb.qwTrb0); 1566 trbflags = trb->dwTrb3; 1567 1568 DPRINTF(("pci_xhci: xfer[%d] done?%u:%d trb %x %016lx %x " 1569 "(err %d) IOC?%d", 1570 i, xfer->data[i].processed, xfer->data[i].blen, 1571 XHCI_TRB_3_TYPE_GET(trbflags), evtrb.qwTrb0, 1572 trbflags, err, 1573 trb->dwTrb3 & XHCI_TRB_3_IOC_BIT ? 1 : 0)); 1574 1575 if (!xfer->data[i].processed) { 1576 xfer->head = i; 1577 break; 1578 } 1579 1580 xfer->ndata--; 1581 edtla += xfer->data[i].bdone; 1582 1583 trb->dwTrb3 = (trb->dwTrb3 & ~0x1) | (xfer->data[i].ccs); 1584 1585 pci_xhci_update_ep_ring(sc, dev, devep, ep_ctx, 1586 xfer->data[i].streamid, xfer->data[i].trbnext, 1587 xfer->data[i].ccs); 1588 1589 /* Only interrupt if IOC or short packet */ 1590 if (!(trb->dwTrb3 & XHCI_TRB_3_IOC_BIT) && 1591 !((err == XHCI_TRB_ERROR_SHORT_PKT) && 1592 (trb->dwTrb3 & XHCI_TRB_3_ISP_BIT))) { 1593 1594 i = (i + 1) % USB_MAX_XFER_BLOCKS; 1595 continue; 1596 } 1597 1598 evtrb.dwTrb2 = XHCI_TRB_2_ERROR_SET(err) | 1599 XHCI_TRB_2_REM_SET(xfer->data[i].blen); 1600 1601 evtrb.dwTrb3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_EVENT_TRANSFER) | 1602 XHCI_TRB_3_SLOT_SET(slot) | XHCI_TRB_3_EP_SET(epid); 1603 1604 if (XHCI_TRB_3_TYPE_GET(trbflags) == XHCI_TRB_TYPE_EVENT_DATA) { 1605 DPRINTF(("pci_xhci EVENT_DATA edtla %u", edtla)); 1606 evtrb.qwTrb0 = trb->qwTrb0; 1607 evtrb.dwTrb2 = (edtla & 0xFFFFF) | 1608 XHCI_TRB_2_ERROR_SET(err); 1609 evtrb.dwTrb3 |= XHCI_TRB_3_ED_BIT; 1610 edtla = 0; 1611 } 1612 1613 *do_intr = 1; 1614 1615 err = pci_xhci_insert_event(sc, &evtrb, 0); 1616 if (err != XHCI_TRB_ERROR_SUCCESS) { 1617 break; 1618 } 1619 1620 i = (i + 1) % USB_MAX_XFER_BLOCKS; 1621 } 1622 1623 return (err); 1624 } 1625 1626 static void 1627 pci_xhci_update_ep_ring(struct pci_xhci_softc *sc, struct pci_xhci_dev_emu *dev, 1628 struct pci_xhci_dev_ep *devep, struct xhci_endp_ctx *ep_ctx, 1629 uint32_t streamid, uint64_t ringaddr, int ccs) 1630 { 1631 1632 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) != 0) { 1633 devep->ep_sctx[streamid].qwSctx0 = (ringaddr & ~0xFUL) | 1634 (ccs & 0x1); 1635 1636 devep->ep_sctx_trbs[streamid].ringaddr = ringaddr & ~0xFUL; 1637 devep->ep_sctx_trbs[streamid].ccs = ccs & 0x1; 1638 ep_ctx->qwEpCtx2 = (ep_ctx->qwEpCtx2 & ~0x1) | (ccs & 0x1); 1639 1640 DPRINTF(("xhci update ep-ring stream %d, addr %lx", 1641 streamid, devep->ep_sctx[streamid].qwSctx0)); 1642 } else { 1643 devep->ep_ringaddr = ringaddr & ~0xFUL; 1644 devep->ep_ccs = ccs & 0x1; 1645 devep->ep_tr = XHCI_GADDR(sc, ringaddr & ~0xFUL); 1646 ep_ctx->qwEpCtx2 = (ringaddr & ~0xFUL) | (ccs & 0x1); 1647 1648 DPRINTF(("xhci update ep-ring, addr %lx", 1649 (devep->ep_ringaddr | devep->ep_ccs))); 1650 } 1651 } 1652 1653 /* 1654 * Outstanding transfer still in progress (device NAK'd earlier) so retry 1655 * the transfer again to see if it succeeds. 1656 */ 1657 static int 1658 pci_xhci_try_usb_xfer(struct pci_xhci_softc *sc, 1659 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep, 1660 struct xhci_endp_ctx *ep_ctx, uint32_t slot, uint32_t epid) 1661 { 1662 struct usb_data_xfer *xfer; 1663 int err; 1664 int do_intr; 1665 1666 ep_ctx->dwEpCtx0 = FIELD_REPLACE( 1667 ep_ctx->dwEpCtx0, XHCI_ST_EPCTX_RUNNING, 0x7, 0); 1668 1669 err = 0; 1670 do_intr = 0; 1671 1672 xfer = devep->ep_xfer; 1673 USB_DATA_XFER_LOCK(xfer); 1674 1675 /* outstanding requests queued up */ 1676 if (dev->dev_ue->ue_data != NULL) { 1677 err = dev->dev_ue->ue_data(dev->dev_sc, xfer, 1678 epid & 0x1 ? USB_XFER_IN : USB_XFER_OUT, epid/2); 1679 if (err == USB_ERR_CANCELLED) { 1680 if (USB_DATA_GET_ERRCODE(&xfer->data[xfer->head]) == 1681 USB_NAK) 1682 err = XHCI_TRB_ERROR_SUCCESS; 1683 } else { 1684 err = pci_xhci_xfer_complete(sc, xfer, slot, epid, 1685 &do_intr); 1686 if (err == XHCI_TRB_ERROR_SUCCESS && do_intr) { 1687 pci_xhci_assert_interrupt(sc); 1688 } 1689 1690 1691 /* XXX should not do it if error? */ 1692 USB_DATA_XFER_RESET(xfer); 1693 } 1694 } 1695 1696 USB_DATA_XFER_UNLOCK(xfer); 1697 1698 1699 return (err); 1700 } 1701 1702 1703 static int 1704 pci_xhci_handle_transfer(struct pci_xhci_softc *sc, 1705 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep, 1706 struct xhci_endp_ctx *ep_ctx, struct xhci_trb *trb, uint32_t slot, 1707 uint32_t epid, uint64_t addr, uint32_t ccs, uint32_t streamid) 1708 { 1709 struct xhci_trb *setup_trb; 1710 struct usb_data_xfer *xfer; 1711 struct usb_data_xfer_block *xfer_block; 1712 uint64_t val; 1713 uint32_t trbflags; 1714 int do_intr, err; 1715 int do_retry; 1716 1717 ep_ctx->dwEpCtx0 = FIELD_REPLACE(ep_ctx->dwEpCtx0, 1718 XHCI_ST_EPCTX_RUNNING, 0x7, 0); 1719 1720 xfer = devep->ep_xfer; 1721 USB_DATA_XFER_LOCK(xfer); 1722 1723 DPRINTF(("pci_xhci handle_transfer slot %u", slot)); 1724 1725 retry: 1726 err = 0; 1727 do_retry = 0; 1728 do_intr = 0; 1729 setup_trb = NULL; 1730 1731 while (1) { 1732 pci_xhci_dump_trb(trb); 1733 1734 trbflags = trb->dwTrb3; 1735 1736 if (XHCI_TRB_3_TYPE_GET(trbflags) != XHCI_TRB_TYPE_LINK && 1737 (trbflags & XHCI_TRB_3_CYCLE_BIT) != 1738 (ccs & XHCI_TRB_3_CYCLE_BIT)) { 1739 DPRINTF(("Cycle-bit changed trbflags %x, ccs %x", 1740 trbflags & XHCI_TRB_3_CYCLE_BIT, ccs)); 1741 break; 1742 } 1743 1744 xfer_block = NULL; 1745 1746 switch (XHCI_TRB_3_TYPE_GET(trbflags)) { 1747 case XHCI_TRB_TYPE_LINK: 1748 if (trb->dwTrb3 & XHCI_TRB_3_TC_BIT) 1749 ccs ^= 0x1; 1750 1751 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1752 (void *)addr, ccs); 1753 xfer_block->processed = 1; 1754 break; 1755 1756 case XHCI_TRB_TYPE_SETUP_STAGE: 1757 if ((trbflags & XHCI_TRB_3_IDT_BIT) == 0 || 1758 XHCI_TRB_2_BYTES_GET(trb->dwTrb2) != 8) { 1759 DPRINTF(("pci_xhci: invalid setup trb")); 1760 err = XHCI_TRB_ERROR_TRB; 1761 goto errout; 1762 } 1763 setup_trb = trb; 1764 1765 val = trb->qwTrb0; 1766 if (!xfer->ureq) 1767 xfer->ureq = malloc( 1768 sizeof(struct usb_device_request)); 1769 memcpy(xfer->ureq, &val, 1770 sizeof(struct usb_device_request)); 1771 1772 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1773 (void *)addr, ccs); 1774 xfer_block->processed = 1; 1775 break; 1776 1777 case XHCI_TRB_TYPE_NORMAL: 1778 case XHCI_TRB_TYPE_ISOCH: 1779 if (setup_trb != NULL) { 1780 DPRINTF(("pci_xhci: trb not supposed to be in " 1781 "ctl scope")); 1782 err = XHCI_TRB_ERROR_TRB; 1783 goto errout; 1784 } 1785 /* fall through */ 1786 1787 case XHCI_TRB_TYPE_DATA_STAGE: 1788 xfer_block = usb_data_xfer_append(xfer, 1789 (void *)(trbflags & XHCI_TRB_3_IDT_BIT ? 1790 &trb->qwTrb0 : XHCI_GADDR(sc, trb->qwTrb0)), 1791 trb->dwTrb2 & 0x1FFFF, (void *)addr, ccs); 1792 break; 1793 1794 case XHCI_TRB_TYPE_STATUS_STAGE: 1795 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1796 (void *)addr, ccs); 1797 break; 1798 1799 case XHCI_TRB_TYPE_NOOP: 1800 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1801 (void *)addr, ccs); 1802 xfer_block->processed = 1; 1803 break; 1804 1805 case XHCI_TRB_TYPE_EVENT_DATA: 1806 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1807 (void *)addr, ccs); 1808 if ((epid > 1) && (trbflags & XHCI_TRB_3_IOC_BIT)) { 1809 xfer_block->processed = 1; 1810 } 1811 break; 1812 1813 default: 1814 DPRINTF(("pci_xhci: handle xfer unexpected trb type " 1815 "0x%x", 1816 XHCI_TRB_3_TYPE_GET(trbflags))); 1817 err = XHCI_TRB_ERROR_TRB; 1818 goto errout; 1819 } 1820 1821 trb = pci_xhci_trb_next(sc, trb, &addr); 1822 1823 DPRINTF(("pci_xhci: next trb: 0x%lx", (uint64_t)trb)); 1824 1825 if (xfer_block) { 1826 xfer_block->trbnext = addr; 1827 xfer_block->streamid = streamid; 1828 } 1829 1830 if (!setup_trb && !(trbflags & XHCI_TRB_3_CHAIN_BIT) && 1831 XHCI_TRB_3_TYPE_GET(trbflags) != XHCI_TRB_TYPE_LINK) { 1832 break; 1833 } 1834 1835 /* handle current batch that requires interrupt on complete */ 1836 if (trbflags & XHCI_TRB_3_IOC_BIT) { 1837 DPRINTF(("pci_xhci: trb IOC bit set")); 1838 if (epid == 1) 1839 do_retry = 1; 1840 break; 1841 } 1842 } 1843 1844 DPRINTF(("pci_xhci[%d]: xfer->ndata %u", __LINE__, xfer->ndata)); 1845 1846 if (xfer->ndata <= 0) 1847 goto errout; 1848 1849 if (epid == 1) { 1850 err = USB_ERR_NOT_STARTED; 1851 if (dev->dev_ue->ue_request != NULL) 1852 err = dev->dev_ue->ue_request(dev->dev_sc, xfer); 1853 setup_trb = NULL; 1854 } else { 1855 /* handle data transfer */ 1856 pci_xhci_try_usb_xfer(sc, dev, devep, ep_ctx, slot, epid); 1857 err = XHCI_TRB_ERROR_SUCCESS; 1858 goto errout; 1859 } 1860 1861 err = USB_TO_XHCI_ERR(err); 1862 if ((err == XHCI_TRB_ERROR_SUCCESS) || 1863 (err == XHCI_TRB_ERROR_STALL) || 1864 (err == XHCI_TRB_ERROR_SHORT_PKT)) { 1865 err = pci_xhci_xfer_complete(sc, xfer, slot, epid, &do_intr); 1866 if (err != XHCI_TRB_ERROR_SUCCESS) 1867 do_retry = 0; 1868 } 1869 1870 errout: 1871 if (err == XHCI_TRB_ERROR_EV_RING_FULL) 1872 DPRINTF(("pci_xhci[%d]: event ring full", __LINE__)); 1873 1874 if (!do_retry) 1875 USB_DATA_XFER_UNLOCK(xfer); 1876 1877 if (do_intr) 1878 pci_xhci_assert_interrupt(sc); 1879 1880 if (do_retry) { 1881 USB_DATA_XFER_RESET(xfer); 1882 DPRINTF(("pci_xhci[%d]: retry:continuing with next TRBs", 1883 __LINE__)); 1884 goto retry; 1885 } 1886 1887 if (epid == 1) 1888 USB_DATA_XFER_RESET(xfer); 1889 1890 return (err); 1891 } 1892 1893 static void 1894 pci_xhci_device_doorbell(struct pci_xhci_softc *sc, uint32_t slot, 1895 uint32_t epid, uint32_t streamid) 1896 { 1897 struct pci_xhci_dev_emu *dev; 1898 struct pci_xhci_dev_ep *devep; 1899 struct xhci_dev_ctx *dev_ctx; 1900 struct xhci_endp_ctx *ep_ctx; 1901 struct pci_xhci_trb_ring *sctx_tr; 1902 struct xhci_trb *trb; 1903 uint64_t ringaddr; 1904 uint32_t ccs; 1905 1906 DPRINTF(("pci_xhci doorbell slot %u epid %u stream %u", 1907 slot, epid, streamid)); 1908 1909 if (slot == 0 || slot > sc->ndevices) { 1910 DPRINTF(("pci_xhci: invalid doorbell slot %u", slot)); 1911 return; 1912 } 1913 1914 if (epid == 0 || epid >= XHCI_MAX_ENDPOINTS) { 1915 DPRINTF(("pci_xhci: invalid endpoint %u", epid)); 1916 return; 1917 } 1918 1919 dev = XHCI_SLOTDEV_PTR(sc, slot); 1920 devep = &dev->eps[epid]; 1921 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 1922 if (!dev_ctx) { 1923 return; 1924 } 1925 ep_ctx = &dev_ctx->ctx_ep[epid]; 1926 1927 sctx_tr = NULL; 1928 1929 DPRINTF(("pci_xhci: device doorbell ep[%u] %08x %08x %016lx %08x", 1930 epid, ep_ctx->dwEpCtx0, ep_ctx->dwEpCtx1, ep_ctx->qwEpCtx2, 1931 ep_ctx->dwEpCtx4)); 1932 1933 if (ep_ctx->qwEpCtx2 == 0) 1934 return; 1935 1936 /* handle pending transfers */ 1937 if (devep->ep_xfer->ndata > 0) { 1938 pci_xhci_try_usb_xfer(sc, dev, devep, ep_ctx, slot, epid); 1939 return; 1940 } 1941 1942 /* get next trb work item */ 1943 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) != 0) { 1944 struct xhci_stream_ctx *sctx; 1945 1946 /* 1947 * Stream IDs of 0, 65535 (any stream), and 65534 1948 * (prime) are invalid. 1949 */ 1950 if (streamid == 0 || streamid == 65534 || streamid == 65535) { 1951 DPRINTF(("pci_xhci: invalid stream %u", streamid)); 1952 return; 1953 } 1954 1955 sctx = NULL; 1956 pci_xhci_find_stream(sc, ep_ctx, streamid, &sctx); 1957 if (sctx == NULL) { 1958 DPRINTF(("pci_xhci: invalid stream %u", streamid)); 1959 return; 1960 } 1961 sctx_tr = &devep->ep_sctx_trbs[streamid]; 1962 ringaddr = sctx_tr->ringaddr; 1963 ccs = sctx_tr->ccs; 1964 trb = XHCI_GADDR(sc, sctx_tr->ringaddr & ~0xFUL); 1965 DPRINTF(("doorbell, stream %u, ccs %lx, trb ccs %x", 1966 streamid, ep_ctx->qwEpCtx2 & XHCI_TRB_3_CYCLE_BIT, 1967 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT)); 1968 } else { 1969 if (streamid != 0) { 1970 DPRINTF(("pci_xhci: invalid stream %u", streamid)); 1971 return; 1972 } 1973 ringaddr = devep->ep_ringaddr; 1974 ccs = devep->ep_ccs; 1975 trb = devep->ep_tr; 1976 DPRINTF(("doorbell, ccs %lx, trb ccs %x", 1977 ep_ctx->qwEpCtx2 & XHCI_TRB_3_CYCLE_BIT, 1978 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT)); 1979 } 1980 1981 if (XHCI_TRB_3_TYPE_GET(trb->dwTrb3) == 0) { 1982 DPRINTF(("pci_xhci: ring %lx trb[%lx] EP %u is RESERVED?", 1983 ep_ctx->qwEpCtx2, devep->ep_ringaddr, epid)); 1984 return; 1985 } 1986 1987 pci_xhci_handle_transfer(sc, dev, devep, ep_ctx, trb, slot, epid, 1988 ringaddr, ccs, streamid); 1989 } 1990 1991 static void 1992 pci_xhci_dbregs_write(struct pci_xhci_softc *sc, uint64_t offset, 1993 uint64_t value) 1994 { 1995 1996 offset = (offset - sc->dboff) / sizeof(uint32_t); 1997 1998 DPRINTF(("pci_xhci: doorbell write offset 0x%lx: 0x%lx", 1999 offset, value)); 2000 2001 if (XHCI_HALTED(sc)) { 2002 DPRINTF(("pci_xhci: controller halted")); 2003 return; 2004 } 2005 2006 if (offset == 0) 2007 pci_xhci_complete_commands(sc); 2008 else if (sc->portregs != NULL) 2009 pci_xhci_device_doorbell(sc, offset, 2010 XHCI_DB_TARGET_GET(value), XHCI_DB_SID_GET(value)); 2011 } 2012 2013 static void 2014 pci_xhci_rtsregs_write(struct pci_xhci_softc *sc, uint64_t offset, 2015 uint64_t value) 2016 { 2017 struct pci_xhci_rtsregs *rts; 2018 2019 offset -= sc->rtsoff; 2020 2021 if (offset == 0) { 2022 DPRINTF(("pci_xhci attempted write to MFINDEX")); 2023 return; 2024 } 2025 2026 DPRINTF(("pci_xhci: runtime regs write offset 0x%lx: 0x%lx", 2027 offset, value)); 2028 2029 offset -= 0x20; /* start of intrreg */ 2030 2031 rts = &sc->rtsregs; 2032 2033 switch (offset) { 2034 case 0x00: 2035 if (value & XHCI_IMAN_INTR_PEND) 2036 rts->intrreg.iman &= ~XHCI_IMAN_INTR_PEND; 2037 rts->intrreg.iman = (value & XHCI_IMAN_INTR_ENA) | 2038 (rts->intrreg.iman & XHCI_IMAN_INTR_PEND); 2039 2040 if (!(value & XHCI_IMAN_INTR_ENA)) 2041 pci_xhci_deassert_interrupt(sc); 2042 2043 break; 2044 2045 case 0x04: 2046 rts->intrreg.imod = value; 2047 break; 2048 2049 case 0x08: 2050 rts->intrreg.erstsz = value & 0xFFFF; 2051 break; 2052 2053 case 0x10: 2054 /* ERSTBA low bits */ 2055 rts->intrreg.erstba = MASK_64_HI(sc->rtsregs.intrreg.erstba) | 2056 (value & ~0x3F); 2057 break; 2058 2059 case 0x14: 2060 /* ERSTBA high bits */ 2061 rts->intrreg.erstba = (value << 32) | 2062 MASK_64_LO(sc->rtsregs.intrreg.erstba); 2063 2064 rts->erstba_p = XHCI_GADDR(sc, 2065 sc->rtsregs.intrreg.erstba & ~0x3FUL); 2066 2067 rts->erst_p = XHCI_GADDR(sc, 2068 sc->rtsregs.erstba_p->qwEvrsTablePtr & ~0x3FUL); 2069 2070 rts->er_enq_idx = 0; 2071 rts->er_events_cnt = 0; 2072 2073 DPRINTF(("pci_xhci: wr erstba erst (%p) ptr 0x%lx, sz %u", 2074 rts->erstba_p, 2075 rts->erstba_p->qwEvrsTablePtr, 2076 rts->erstba_p->dwEvrsTableSize)); 2077 break; 2078 2079 case 0x18: 2080 /* ERDP low bits */ 2081 rts->intrreg.erdp = 2082 MASK_64_HI(sc->rtsregs.intrreg.erdp) | 2083 (rts->intrreg.erdp & XHCI_ERDP_LO_BUSY) | 2084 (value & ~0xF); 2085 if (value & XHCI_ERDP_LO_BUSY) { 2086 rts->intrreg.erdp &= ~XHCI_ERDP_LO_BUSY; 2087 rts->intrreg.iman &= ~XHCI_IMAN_INTR_PEND; 2088 } 2089 2090 rts->er_deq_seg = XHCI_ERDP_LO_SINDEX(value); 2091 2092 break; 2093 2094 case 0x1C: 2095 /* ERDP high bits */ 2096 rts->intrreg.erdp = (value << 32) | 2097 MASK_64_LO(sc->rtsregs.intrreg.erdp); 2098 2099 if (rts->er_events_cnt > 0) { 2100 uint64_t erdp; 2101 uint32_t erdp_i; 2102 2103 erdp = rts->intrreg.erdp & ~0xF; 2104 erdp_i = (erdp - rts->erstba_p->qwEvrsTablePtr) / 2105 sizeof(struct xhci_trb); 2106 2107 if (erdp_i <= rts->er_enq_idx) 2108 rts->er_events_cnt = rts->er_enq_idx - erdp_i; 2109 else 2110 rts->er_events_cnt = 2111 rts->erstba_p->dwEvrsTableSize - 2112 (erdp_i - rts->er_enq_idx); 2113 2114 DPRINTF(("pci_xhci: erdp 0x%lx, events cnt %u", 2115 erdp, rts->er_events_cnt)); 2116 } 2117 2118 break; 2119 2120 default: 2121 DPRINTF(("pci_xhci attempted write to RTS offset 0x%lx", 2122 offset)); 2123 break; 2124 } 2125 } 2126 2127 static uint64_t 2128 pci_xhci_portregs_read(struct pci_xhci_softc *sc, uint64_t offset) 2129 { 2130 int port; 2131 uint32_t *p; 2132 2133 if (sc->portregs == NULL) 2134 return (0); 2135 2136 port = (offset - 0x3F0) / 0x10; 2137 2138 if (port > XHCI_MAX_DEVS) { 2139 DPRINTF(("pci_xhci: portregs_read port %d >= XHCI_MAX_DEVS", 2140 port)); 2141 2142 /* return default value for unused port */ 2143 return (XHCI_PS_SPEED_SET(3)); 2144 } 2145 2146 offset = (offset - 0x3F0) % 0x10; 2147 2148 p = &sc->portregs[port].portsc; 2149 p += offset / sizeof(uint32_t); 2150 2151 DPRINTF(("pci_xhci: portregs read offset 0x%lx port %u -> 0x%x", 2152 offset, port, *p)); 2153 2154 return (*p); 2155 } 2156 2157 static void 2158 pci_xhci_hostop_write(struct pci_xhci_softc *sc, uint64_t offset, 2159 uint64_t value) 2160 { 2161 offset -= XHCI_CAPLEN; 2162 2163 if (offset < 0x400) 2164 DPRINTF(("pci_xhci: hostop write offset 0x%lx: 0x%lx", 2165 offset, value)); 2166 2167 switch (offset) { 2168 case XHCI_USBCMD: 2169 sc->opregs.usbcmd = pci_xhci_usbcmd_write(sc, value & 0x3F0F); 2170 break; 2171 2172 case XHCI_USBSTS: 2173 /* clear bits on write */ 2174 sc->opregs.usbsts &= ~(value & 2175 (XHCI_STS_HSE|XHCI_STS_EINT|XHCI_STS_PCD|XHCI_STS_SSS| 2176 XHCI_STS_RSS|XHCI_STS_SRE|XHCI_STS_CNR)); 2177 break; 2178 2179 case XHCI_PAGESIZE: 2180 /* read only */ 2181 break; 2182 2183 case XHCI_DNCTRL: 2184 sc->opregs.dnctrl = value & 0xFFFF; 2185 break; 2186 2187 case XHCI_CRCR_LO: 2188 if (sc->opregs.crcr & XHCI_CRCR_LO_CRR) { 2189 sc->opregs.crcr &= ~(XHCI_CRCR_LO_CS|XHCI_CRCR_LO_CA); 2190 sc->opregs.crcr |= value & 2191 (XHCI_CRCR_LO_CS|XHCI_CRCR_LO_CA); 2192 } else { 2193 sc->opregs.crcr = MASK_64_HI(sc->opregs.crcr) | 2194 (value & (0xFFFFFFC0 | XHCI_CRCR_LO_RCS)); 2195 } 2196 break; 2197 2198 case XHCI_CRCR_HI: 2199 if (!(sc->opregs.crcr & XHCI_CRCR_LO_CRR)) { 2200 sc->opregs.crcr = MASK_64_LO(sc->opregs.crcr) | 2201 (value << 32); 2202 2203 sc->opregs.cr_p = XHCI_GADDR(sc, 2204 sc->opregs.crcr & ~0xF); 2205 } 2206 2207 if (sc->opregs.crcr & XHCI_CRCR_LO_CS) { 2208 /* Stop operation of Command Ring */ 2209 } 2210 2211 if (sc->opregs.crcr & XHCI_CRCR_LO_CA) { 2212 /* Abort command */ 2213 } 2214 2215 break; 2216 2217 case XHCI_DCBAAP_LO: 2218 sc->opregs.dcbaap = MASK_64_HI(sc->opregs.dcbaap) | 2219 (value & 0xFFFFFFC0); 2220 break; 2221 2222 case XHCI_DCBAAP_HI: 2223 sc->opregs.dcbaap = MASK_64_LO(sc->opregs.dcbaap) | 2224 (value << 32); 2225 sc->opregs.dcbaa_p = XHCI_GADDR(sc, sc->opregs.dcbaap & ~0x3FUL); 2226 2227 DPRINTF(("pci_xhci: opregs dcbaap = 0x%lx (vaddr 0x%lx)", 2228 sc->opregs.dcbaap, (uint64_t)sc->opregs.dcbaa_p)); 2229 break; 2230 2231 case XHCI_CONFIG: 2232 sc->opregs.config = value & 0x03FF; 2233 break; 2234 2235 default: 2236 if (offset >= 0x400) 2237 pci_xhci_portregs_write(sc, offset, value); 2238 2239 break; 2240 } 2241 } 2242 2243 2244 static void 2245 pci_xhci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, 2246 int baridx, uint64_t offset, int size, uint64_t value) 2247 { 2248 struct pci_xhci_softc *sc; 2249 2250 sc = pi->pi_arg; 2251 2252 assert(baridx == 0); 2253 2254 2255 pthread_mutex_lock(&sc->mtx); 2256 if (offset < XHCI_CAPLEN) /* read only registers */ 2257 WPRINTF(("pci_xhci: write RO-CAPs offset %ld", offset)); 2258 else if (offset < sc->dboff) 2259 pci_xhci_hostop_write(sc, offset, value); 2260 else if (offset < sc->rtsoff) 2261 pci_xhci_dbregs_write(sc, offset, value); 2262 else if (offset < sc->regsend) 2263 pci_xhci_rtsregs_write(sc, offset, value); 2264 else 2265 WPRINTF(("pci_xhci: write invalid offset %ld", offset)); 2266 2267 pthread_mutex_unlock(&sc->mtx); 2268 } 2269 2270 static uint64_t 2271 pci_xhci_hostcap_read(struct pci_xhci_softc *sc, uint64_t offset) 2272 { 2273 uint64_t value; 2274 2275 switch (offset) { 2276 case XHCI_CAPLENGTH: /* 0x00 */ 2277 value = sc->caplength; 2278 break; 2279 2280 case XHCI_HCSPARAMS1: /* 0x04 */ 2281 value = sc->hcsparams1; 2282 break; 2283 2284 case XHCI_HCSPARAMS2: /* 0x08 */ 2285 value = sc->hcsparams2; 2286 break; 2287 2288 case XHCI_HCSPARAMS3: /* 0x0C */ 2289 value = sc->hcsparams3; 2290 break; 2291 2292 case XHCI_HCSPARAMS0: /* 0x10 */ 2293 value = sc->hccparams1; 2294 break; 2295 2296 case XHCI_DBOFF: /* 0x14 */ 2297 value = sc->dboff; 2298 break; 2299 2300 case XHCI_RTSOFF: /* 0x18 */ 2301 value = sc->rtsoff; 2302 break; 2303 2304 case XHCI_HCCPRAMS2: /* 0x1C */ 2305 value = sc->hccparams2; 2306 break; 2307 2308 default: 2309 value = 0; 2310 break; 2311 } 2312 2313 DPRINTF(("pci_xhci: hostcap read offset 0x%lx -> 0x%lx", 2314 offset, value)); 2315 2316 return (value); 2317 } 2318 2319 static uint64_t 2320 pci_xhci_hostop_read(struct pci_xhci_softc *sc, uint64_t offset) 2321 { 2322 uint64_t value; 2323 2324 offset = (offset - XHCI_CAPLEN); 2325 2326 switch (offset) { 2327 case XHCI_USBCMD: /* 0x00 */ 2328 value = sc->opregs.usbcmd; 2329 break; 2330 2331 case XHCI_USBSTS: /* 0x04 */ 2332 value = sc->opregs.usbsts; 2333 break; 2334 2335 case XHCI_PAGESIZE: /* 0x08 */ 2336 value = sc->opregs.pgsz; 2337 break; 2338 2339 case XHCI_DNCTRL: /* 0x14 */ 2340 value = sc->opregs.dnctrl; 2341 break; 2342 2343 case XHCI_CRCR_LO: /* 0x18 */ 2344 value = sc->opregs.crcr & XHCI_CRCR_LO_CRR; 2345 break; 2346 2347 case XHCI_CRCR_HI: /* 0x1C */ 2348 value = 0; 2349 break; 2350 2351 case XHCI_DCBAAP_LO: /* 0x30 */ 2352 value = sc->opregs.dcbaap & 0xFFFFFFFF; 2353 break; 2354 2355 case XHCI_DCBAAP_HI: /* 0x34 */ 2356 value = (sc->opregs.dcbaap >> 32) & 0xFFFFFFFF; 2357 break; 2358 2359 case XHCI_CONFIG: /* 0x38 */ 2360 value = sc->opregs.config; 2361 break; 2362 2363 default: 2364 if (offset >= 0x400) 2365 value = pci_xhci_portregs_read(sc, offset); 2366 else 2367 value = 0; 2368 2369 break; 2370 } 2371 2372 if (offset < 0x400) 2373 DPRINTF(("pci_xhci: hostop read offset 0x%lx -> 0x%lx", 2374 offset, value)); 2375 2376 return (value); 2377 } 2378 2379 static uint64_t 2380 pci_xhci_dbregs_read(struct pci_xhci_softc *sc, uint64_t offset) 2381 { 2382 2383 /* read doorbell always returns 0 */ 2384 return (0); 2385 } 2386 2387 static uint64_t 2388 pci_xhci_rtsregs_read(struct pci_xhci_softc *sc, uint64_t offset) 2389 { 2390 uint32_t value; 2391 2392 offset -= sc->rtsoff; 2393 value = 0; 2394 2395 if (offset == XHCI_MFINDEX) { 2396 value = sc->rtsregs.mfindex; 2397 } else if (offset >= 0x20) { 2398 int item; 2399 uint32_t *p; 2400 2401 offset -= 0x20; 2402 item = offset % 32; 2403 2404 assert(offset < sizeof(sc->rtsregs.intrreg)); 2405 2406 p = &sc->rtsregs.intrreg.iman; 2407 p += item / sizeof(uint32_t); 2408 value = *p; 2409 } 2410 2411 DPRINTF(("pci_xhci: rtsregs read offset 0x%lx -> 0x%x", 2412 offset, value)); 2413 2414 return (value); 2415 } 2416 2417 static uint64_t 2418 pci_xhci_xecp_read(struct pci_xhci_softc *sc, uint64_t offset) 2419 { 2420 uint32_t value; 2421 2422 offset -= sc->regsend; 2423 value = 0; 2424 2425 switch (offset) { 2426 case 0: 2427 /* rev major | rev minor | next-cap | cap-id */ 2428 value = (0x02 << 24) | (4 << 8) | XHCI_ID_PROTOCOLS; 2429 break; 2430 case 4: 2431 /* name string = "USB" */ 2432 value = 0x20425355; 2433 break; 2434 case 8: 2435 /* psic | proto-defined | compat # | compat offset */ 2436 value = ((XHCI_MAX_DEVS/2) << 8) | sc->usb2_port_start; 2437 break; 2438 case 12: 2439 break; 2440 case 16: 2441 /* rev major | rev minor | next-cap | cap-id */ 2442 value = (0x03 << 24) | XHCI_ID_PROTOCOLS; 2443 break; 2444 case 20: 2445 /* name string = "USB" */ 2446 value = 0x20425355; 2447 break; 2448 case 24: 2449 /* psic | proto-defined | compat # | compat offset */ 2450 value = ((XHCI_MAX_DEVS/2) << 8) | sc->usb3_port_start; 2451 break; 2452 case 28: 2453 break; 2454 default: 2455 DPRINTF(("pci_xhci: xecp invalid offset 0x%lx", offset)); 2456 break; 2457 } 2458 2459 DPRINTF(("pci_xhci: xecp read offset 0x%lx -> 0x%x", 2460 offset, value)); 2461 2462 return (value); 2463 } 2464 2465 2466 static uint64_t 2467 pci_xhci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx, 2468 uint64_t offset, int size) 2469 { 2470 struct pci_xhci_softc *sc; 2471 uint32_t value; 2472 2473 sc = pi->pi_arg; 2474 2475 assert(baridx == 0); 2476 2477 pthread_mutex_lock(&sc->mtx); 2478 if (offset < XHCI_CAPLEN) 2479 value = pci_xhci_hostcap_read(sc, offset); 2480 else if (offset < sc->dboff) 2481 value = pci_xhci_hostop_read(sc, offset); 2482 else if (offset < sc->rtsoff) 2483 value = pci_xhci_dbregs_read(sc, offset); 2484 else if (offset < sc->regsend) 2485 value = pci_xhci_rtsregs_read(sc, offset); 2486 else if (offset < (sc->regsend + 4*32)) 2487 value = pci_xhci_xecp_read(sc, offset); 2488 else { 2489 value = 0; 2490 WPRINTF(("pci_xhci: read invalid offset %ld", offset)); 2491 } 2492 2493 pthread_mutex_unlock(&sc->mtx); 2494 2495 switch (size) { 2496 case 1: 2497 value &= 0xFF; 2498 break; 2499 case 2: 2500 value &= 0xFFFF; 2501 break; 2502 case 4: 2503 value &= 0xFFFFFFFF; 2504 break; 2505 } 2506 2507 return (value); 2508 } 2509 2510 static void 2511 pci_xhci_reset_port(struct pci_xhci_softc *sc, int portn, int warm) 2512 { 2513 struct pci_xhci_portregs *port; 2514 struct pci_xhci_dev_emu *dev; 2515 struct xhci_trb evtrb; 2516 int error; 2517 2518 assert(portn <= XHCI_MAX_DEVS); 2519 2520 DPRINTF(("xhci reset port %d", portn)); 2521 2522 port = XHCI_PORTREG_PTR(sc, portn); 2523 dev = XHCI_DEVINST_PTR(sc, portn); 2524 if (dev) { 2525 port->portsc &= ~(XHCI_PS_PLS_MASK | XHCI_PS_PR | XHCI_PS_PRC); 2526 port->portsc |= XHCI_PS_PED | 2527 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed); 2528 2529 if (warm && dev->dev_ue->ue_usbver == 3) { 2530 port->portsc |= XHCI_PS_WRC; 2531 } 2532 2533 if ((port->portsc & XHCI_PS_PRC) == 0) { 2534 port->portsc |= XHCI_PS_PRC; 2535 2536 pci_xhci_set_evtrb(&evtrb, portn, 2537 XHCI_TRB_ERROR_SUCCESS, 2538 XHCI_TRB_EVENT_PORT_STS_CHANGE); 2539 error = pci_xhci_insert_event(sc, &evtrb, 1); 2540 if (error != XHCI_TRB_ERROR_SUCCESS) 2541 DPRINTF(("xhci reset port insert event " 2542 "failed")); 2543 } 2544 } 2545 } 2546 2547 static void 2548 pci_xhci_init_port(struct pci_xhci_softc *sc, int portn) 2549 { 2550 struct pci_xhci_portregs *port; 2551 struct pci_xhci_dev_emu *dev; 2552 2553 port = XHCI_PORTREG_PTR(sc, portn); 2554 dev = XHCI_DEVINST_PTR(sc, portn); 2555 if (dev) { 2556 port->portsc = XHCI_PS_CCS | /* connected */ 2557 XHCI_PS_PP; /* port power */ 2558 2559 if (dev->dev_ue->ue_usbver == 2) { 2560 port->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_POLL) | 2561 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed); 2562 } else { 2563 port->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_U0) | 2564 XHCI_PS_PED | /* enabled */ 2565 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed); 2566 } 2567 2568 DPRINTF(("Init port %d 0x%x", portn, port->portsc)); 2569 } else { 2570 port->portsc = XHCI_PS_PLS_SET(UPS_PORT_LS_RX_DET) | XHCI_PS_PP; 2571 DPRINTF(("Init empty port %d 0x%x", portn, port->portsc)); 2572 } 2573 } 2574 2575 static int 2576 pci_xhci_dev_intr(struct usb_hci *hci, int epctx) 2577 { 2578 struct pci_xhci_dev_emu *dev; 2579 struct xhci_dev_ctx *dev_ctx; 2580 struct xhci_trb evtrb; 2581 struct pci_xhci_softc *sc; 2582 struct pci_xhci_portregs *p; 2583 struct xhci_endp_ctx *ep_ctx; 2584 int error = 0; 2585 int dir_in; 2586 int epid; 2587 2588 dir_in = epctx & 0x80; 2589 epid = epctx & ~0x80; 2590 2591 /* HW endpoint contexts are 0-15; convert to epid based on dir */ 2592 epid = (epid * 2) + (dir_in ? 1 : 0); 2593 2594 assert(epid >= 1 && epid <= 31); 2595 2596 dev = hci->hci_sc; 2597 sc = dev->xsc; 2598 2599 /* check if device is ready; OS has to initialise it */ 2600 if (sc->rtsregs.erstba_p == NULL || 2601 (sc->opregs.usbcmd & XHCI_CMD_RS) == 0 || 2602 dev->dev_ctx == NULL) 2603 return (0); 2604 2605 p = XHCI_PORTREG_PTR(sc, hci->hci_port); 2606 2607 /* raise event if link U3 (suspended) state */ 2608 if (XHCI_PS_PLS_GET(p->portsc) == 3) { 2609 p->portsc &= ~XHCI_PS_PLS_MASK; 2610 p->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_RESUME); 2611 if ((p->portsc & XHCI_PS_PLC) != 0) 2612 return (0); 2613 2614 p->portsc |= XHCI_PS_PLC; 2615 2616 pci_xhci_set_evtrb(&evtrb, hci->hci_port, 2617 XHCI_TRB_ERROR_SUCCESS, XHCI_TRB_EVENT_PORT_STS_CHANGE); 2618 error = pci_xhci_insert_event(sc, &evtrb, 0); 2619 if (error != XHCI_TRB_ERROR_SUCCESS) 2620 goto done; 2621 } 2622 2623 dev_ctx = dev->dev_ctx; 2624 ep_ctx = &dev_ctx->ctx_ep[epid]; 2625 if ((ep_ctx->dwEpCtx0 & 0x7) == XHCI_ST_EPCTX_DISABLED) { 2626 DPRINTF(("xhci device interrupt on disabled endpoint %d", 2627 epid)); 2628 return (0); 2629 } 2630 2631 DPRINTF(("xhci device interrupt on endpoint %d", epid)); 2632 2633 pci_xhci_device_doorbell(sc, hci->hci_port, epid, 0); 2634 2635 done: 2636 return (error); 2637 } 2638 2639 static int 2640 pci_xhci_dev_event(struct usb_hci *hci, enum hci_usbev evid, void *param) 2641 { 2642 2643 DPRINTF(("xhci device event port %d", hci->hci_port)); 2644 return (0); 2645 } 2646 2647 2648 2649 static void 2650 pci_xhci_device_usage(char *opt) 2651 { 2652 2653 EPRINTLN("Invalid USB emulation \"%s\"", opt); 2654 } 2655 2656 static int 2657 pci_xhci_parse_opts(struct pci_xhci_softc *sc, char *opts) 2658 { 2659 struct pci_xhci_dev_emu **devices; 2660 struct pci_xhci_dev_emu *dev; 2661 struct usb_devemu *ue; 2662 void *devsc; 2663 char *uopt, *xopts, *config; 2664 int usb3_port, usb2_port, i; 2665 2666 uopt = NULL; 2667 usb3_port = sc->usb3_port_start - 1; 2668 usb2_port = sc->usb2_port_start - 1; 2669 devices = NULL; 2670 2671 if (opts == NULL) 2672 goto portsfinal; 2673 2674 devices = calloc(XHCI_MAX_DEVS, sizeof(struct pci_xhci_dev_emu *)); 2675 2676 sc->slots = calloc(XHCI_MAX_SLOTS, sizeof(struct pci_xhci_dev_emu *)); 2677 sc->devices = devices; 2678 sc->ndevices = 0; 2679 2680 uopt = strdup(opts); 2681 for (xopts = strtok(uopt, ","); 2682 xopts != NULL; 2683 xopts = strtok(NULL, ",")) { 2684 if (usb2_port == ((sc->usb2_port_start-1) + XHCI_MAX_DEVS/2) || 2685 usb3_port == ((sc->usb3_port_start-1) + XHCI_MAX_DEVS/2)) { 2686 WPRINTF(("pci_xhci max number of USB 2 or 3 " 2687 "devices reached, max %d", XHCI_MAX_DEVS/2)); 2688 usb2_port = usb3_port = -1; 2689 goto done; 2690 } 2691 2692 /* device[=<config>] */ 2693 if ((config = strchr(xopts, '=')) == NULL) 2694 config = ""; /* no config */ 2695 else 2696 *config++ = '\0'; 2697 2698 ue = usb_emu_finddev(xopts); 2699 if (ue == NULL) { 2700 pci_xhci_device_usage(xopts); 2701 DPRINTF(("pci_xhci device not found %s", xopts)); 2702 usb2_port = usb3_port = -1; 2703 goto done; 2704 } 2705 2706 DPRINTF(("pci_xhci adding device %s, opts \"%s\"", 2707 xopts, config)); 2708 2709 dev = calloc(1, sizeof(struct pci_xhci_dev_emu)); 2710 dev->xsc = sc; 2711 dev->hci.hci_sc = dev; 2712 dev->hci.hci_intr = pci_xhci_dev_intr; 2713 dev->hci.hci_event = pci_xhci_dev_event; 2714 2715 if (ue->ue_usbver == 2) { 2716 dev->hci.hci_port = usb2_port + 1; 2717 devices[usb2_port] = dev; 2718 usb2_port++; 2719 } else { 2720 dev->hci.hci_port = usb3_port + 1; 2721 devices[usb3_port] = dev; 2722 usb3_port++; 2723 } 2724 2725 dev->hci.hci_address = 0; 2726 devsc = ue->ue_init(&dev->hci, config); 2727 if (devsc == NULL) { 2728 pci_xhci_device_usage(xopts); 2729 usb2_port = usb3_port = -1; 2730 goto done; 2731 } 2732 2733 dev->dev_ue = ue; 2734 dev->dev_sc = devsc; 2735 2736 /* assign slot number to device */ 2737 sc->slots[sc->ndevices] = dev; 2738 2739 sc->ndevices++; 2740 } 2741 2742 portsfinal: 2743 sc->portregs = calloc(XHCI_MAX_DEVS, sizeof(struct pci_xhci_portregs)); 2744 2745 if (sc->ndevices > 0) { 2746 /* port and slot numbering start from 1 */ 2747 sc->devices--; 2748 sc->portregs--; 2749 sc->slots--; 2750 2751 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 2752 pci_xhci_init_port(sc, i); 2753 } 2754 } else { 2755 WPRINTF(("pci_xhci no USB devices configured")); 2756 sc->ndevices = 1; 2757 } 2758 2759 done: 2760 if (devices != NULL) { 2761 if (usb2_port <= 0 && usb3_port <= 0) { 2762 sc->devices = NULL; 2763 for (i = 0; devices[i] != NULL; i++) 2764 free(devices[i]); 2765 sc->ndevices = -1; 2766 2767 free(devices); 2768 } 2769 } 2770 free(uopt); 2771 return (sc->ndevices); 2772 } 2773 2774 static int 2775 pci_xhci_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) 2776 { 2777 struct pci_xhci_softc *sc; 2778 int error; 2779 2780 if (xhci_in_use) { 2781 WPRINTF(("pci_xhci controller already defined")); 2782 return (-1); 2783 } 2784 xhci_in_use = 1; 2785 2786 sc = calloc(1, sizeof(struct pci_xhci_softc)); 2787 pi->pi_arg = sc; 2788 sc->xsc_pi = pi; 2789 2790 sc->usb2_port_start = (XHCI_MAX_DEVS/2) + 1; 2791 sc->usb3_port_start = 1; 2792 2793 /* discover devices */ 2794 error = pci_xhci_parse_opts(sc, opts); 2795 if (error < 0) 2796 goto done; 2797 else 2798 error = 0; 2799 2800 sc->caplength = XHCI_SET_CAPLEN(XHCI_CAPLEN) | 2801 XHCI_SET_HCIVERSION(0x0100); 2802 sc->hcsparams1 = XHCI_SET_HCSP1_MAXPORTS(XHCI_MAX_DEVS) | 2803 XHCI_SET_HCSP1_MAXINTR(1) | /* interrupters */ 2804 XHCI_SET_HCSP1_MAXSLOTS(XHCI_MAX_SLOTS); 2805 sc->hcsparams2 = XHCI_SET_HCSP2_ERSTMAX(XHCI_ERST_MAX) | 2806 XHCI_SET_HCSP2_IST(0x04); 2807 sc->hcsparams3 = 0; /* no latency */ 2808 sc->hccparams1 = XHCI_SET_HCCP1_AC64(1) | /* 64-bit addrs */ 2809 XHCI_SET_HCCP1_NSS(1) | /* no 2nd-streams */ 2810 XHCI_SET_HCCP1_SPC(1) | /* short packet */ 2811 XHCI_SET_HCCP1_MAXPSA(XHCI_STREAMS_MAX); 2812 sc->hccparams2 = XHCI_SET_HCCP2_LEC(1) | 2813 XHCI_SET_HCCP2_U3C(1); 2814 sc->dboff = XHCI_SET_DOORBELL(XHCI_CAPLEN + XHCI_PORTREGS_START + 2815 XHCI_MAX_DEVS * sizeof(struct pci_xhci_portregs)); 2816 2817 /* dboff must be 32-bit aligned */ 2818 if (sc->dboff & 0x3) 2819 sc->dboff = (sc->dboff + 0x3) & ~0x3; 2820 2821 /* rtsoff must be 32-bytes aligned */ 2822 sc->rtsoff = XHCI_SET_RTSOFFSET(sc->dboff + (XHCI_MAX_SLOTS+1) * 32); 2823 if (sc->rtsoff & 0x1F) 2824 sc->rtsoff = (sc->rtsoff + 0x1F) & ~0x1F; 2825 2826 DPRINTF(("pci_xhci dboff: 0x%x, rtsoff: 0x%x", sc->dboff, 2827 sc->rtsoff)); 2828 2829 sc->opregs.usbsts = XHCI_STS_HCH; 2830 sc->opregs.pgsz = XHCI_PAGESIZE_4K; 2831 2832 pci_xhci_reset(sc); 2833 2834 sc->regsend = sc->rtsoff + 0x20 + 32; /* only 1 intrpter */ 2835 2836 /* 2837 * Set extended capabilities pointer to be after regsend; 2838 * value of xecp field is 32-bit offset. 2839 */ 2840 sc->hccparams1 |= XHCI_SET_HCCP1_XECP(sc->regsend/4); 2841 2842 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x1E31); 2843 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x8086); 2844 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_SERIALBUS); 2845 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_SERIALBUS_USB); 2846 pci_set_cfgdata8(pi, PCIR_PROGIF,PCIP_SERIALBUS_USB_XHCI); 2847 pci_set_cfgdata8(pi, PCI_USBREV, PCI_USB_REV_3_0); 2848 2849 pci_emul_add_msicap(pi, 1); 2850 2851 /* regsend + xecp registers */ 2852 pci_emul_alloc_bar(pi, 0, PCIBAR_MEM32, sc->regsend + 4*32); 2853 DPRINTF(("pci_xhci pci_emu_alloc: %d", sc->regsend + 4*32)); 2854 2855 2856 pci_lintr_request(pi); 2857 2858 pthread_mutex_init(&sc->mtx, NULL); 2859 2860 done: 2861 if (error) { 2862 free(sc); 2863 } 2864 2865 return (error); 2866 } 2867 2868 #ifdef BHYVE_SNAPSHOT 2869 static void 2870 pci_xhci_map_devs_slots(struct pci_xhci_softc *sc, int maps[]) 2871 { 2872 int i, j; 2873 struct pci_xhci_dev_emu *dev, *slot; 2874 2875 memset(maps, 0, sizeof(maps[0]) * XHCI_MAX_SLOTS); 2876 2877 for (i = 1; i <= XHCI_MAX_SLOTS; i++) { 2878 for (j = 1; j <= XHCI_MAX_DEVS; j++) { 2879 slot = XHCI_SLOTDEV_PTR(sc, i); 2880 dev = XHCI_DEVINST_PTR(sc, j); 2881 2882 if (slot == dev) 2883 maps[i] = j; 2884 } 2885 } 2886 } 2887 2888 static int 2889 pci_xhci_snapshot_ep(struct pci_xhci_softc *sc, struct pci_xhci_dev_emu *dev, 2890 int idx, struct vm_snapshot_meta *meta) 2891 { 2892 int k; 2893 int ret; 2894 struct usb_data_xfer *xfer; 2895 struct usb_data_xfer_block *xfer_block; 2896 2897 /* some sanity checks */ 2898 if (meta->op == VM_SNAPSHOT_SAVE) 2899 xfer = dev->eps[idx].ep_xfer; 2900 2901 SNAPSHOT_VAR_OR_LEAVE(xfer, meta, ret, done); 2902 if (xfer == NULL) { 2903 ret = 0; 2904 goto done; 2905 } 2906 2907 if (meta->op == VM_SNAPSHOT_RESTORE) { 2908 pci_xhci_init_ep(dev, idx); 2909 xfer = dev->eps[idx].ep_xfer; 2910 } 2911 2912 /* save / restore proper */ 2913 for (k = 0; k < USB_MAX_XFER_BLOCKS; k++) { 2914 xfer_block = &xfer->data[k]; 2915 2916 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(xfer_block->buf, 2917 XHCI_GADDR_SIZE(xfer_block->buf), true, meta, ret, 2918 done); 2919 SNAPSHOT_VAR_OR_LEAVE(xfer_block->blen, meta, ret, done); 2920 SNAPSHOT_VAR_OR_LEAVE(xfer_block->bdone, meta, ret, done); 2921 SNAPSHOT_VAR_OR_LEAVE(xfer_block->processed, meta, ret, done); 2922 SNAPSHOT_VAR_OR_LEAVE(xfer_block->hci_data, meta, ret, done); 2923 SNAPSHOT_VAR_OR_LEAVE(xfer_block->ccs, meta, ret, done); 2924 SNAPSHOT_VAR_OR_LEAVE(xfer_block->streamid, meta, ret, done); 2925 SNAPSHOT_VAR_OR_LEAVE(xfer_block->trbnext, meta, ret, done); 2926 } 2927 2928 SNAPSHOT_VAR_OR_LEAVE(xfer->ureq, meta, ret, done); 2929 if (xfer->ureq) { 2930 /* xfer->ureq is not allocated at restore time */ 2931 if (meta->op == VM_SNAPSHOT_RESTORE) 2932 xfer->ureq = malloc(sizeof(struct usb_device_request)); 2933 2934 SNAPSHOT_BUF_OR_LEAVE(xfer->ureq, 2935 sizeof(struct usb_device_request), 2936 meta, ret, done); 2937 } 2938 2939 SNAPSHOT_VAR_OR_LEAVE(xfer->ndata, meta, ret, done); 2940 SNAPSHOT_VAR_OR_LEAVE(xfer->head, meta, ret, done); 2941 SNAPSHOT_VAR_OR_LEAVE(xfer->tail, meta, ret, done); 2942 2943 done: 2944 return (ret); 2945 } 2946 2947 static int 2948 pci_xhci_snapshot(struct vm_snapshot_meta *meta) 2949 { 2950 int i, j; 2951 int ret; 2952 int restore_idx; 2953 struct pci_devinst *pi; 2954 struct pci_xhci_softc *sc; 2955 struct pci_xhci_portregs *port; 2956 struct pci_xhci_dev_emu *dev; 2957 char dname[SNAP_DEV_NAME_LEN]; 2958 int maps[XHCI_MAX_SLOTS + 1]; 2959 2960 pi = meta->dev_data; 2961 sc = pi->pi_arg; 2962 2963 SNAPSHOT_VAR_OR_LEAVE(sc->caplength, meta, ret, done); 2964 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams1, meta, ret, done); 2965 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams2, meta, ret, done); 2966 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams3, meta, ret, done); 2967 SNAPSHOT_VAR_OR_LEAVE(sc->hccparams1, meta, ret, done); 2968 SNAPSHOT_VAR_OR_LEAVE(sc->dboff, meta, ret, done); 2969 SNAPSHOT_VAR_OR_LEAVE(sc->rtsoff, meta, ret, done); 2970 SNAPSHOT_VAR_OR_LEAVE(sc->hccparams2, meta, ret, done); 2971 SNAPSHOT_VAR_OR_LEAVE(sc->regsend, meta, ret, done); 2972 2973 /* opregs */ 2974 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.usbcmd, meta, ret, done); 2975 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.usbsts, meta, ret, done); 2976 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.pgsz, meta, ret, done); 2977 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.dnctrl, meta, ret, done); 2978 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.crcr, meta, ret, done); 2979 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.dcbaap, meta, ret, done); 2980 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.config, meta, ret, done); 2981 2982 /* opregs.cr_p */ 2983 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->opregs.cr_p, 2984 XHCI_GADDR_SIZE(sc->opregs.cr_p), false, meta, ret, done); 2985 2986 /* opregs.dcbaa_p */ 2987 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->opregs.dcbaa_p, 2988 XHCI_GADDR_SIZE(sc->opregs.dcbaa_p), false, meta, ret, done); 2989 2990 /* rtsregs */ 2991 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.mfindex, meta, ret, done); 2992 2993 /* rtsregs.intrreg */ 2994 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.iman, meta, ret, done); 2995 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.imod, meta, ret, done); 2996 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erstsz, meta, ret, done); 2997 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.rsvd, meta, ret, done); 2998 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erstba, meta, ret, done); 2999 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erdp, meta, ret, done); 3000 3001 /* rtsregs.erstba_p */ 3002 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->rtsregs.erstba_p, 3003 XHCI_GADDR_SIZE(sc->rtsregs.erstba_p), false, meta, ret, done); 3004 3005 /* rtsregs.erst_p */ 3006 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->rtsregs.erst_p, 3007 XHCI_GADDR_SIZE(sc->rtsregs.erst_p), false, meta, ret, done); 3008 3009 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_deq_seg, meta, ret, done); 3010 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_enq_idx, meta, ret, done); 3011 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_enq_seg, meta, ret, done); 3012 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_events_cnt, meta, ret, done); 3013 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.event_pcs, meta, ret, done); 3014 3015 /* sanity checking */ 3016 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 3017 dev = XHCI_DEVINST_PTR(sc, i); 3018 if (dev == NULL) 3019 continue; 3020 3021 if (meta->op == VM_SNAPSHOT_SAVE) 3022 restore_idx = i; 3023 SNAPSHOT_VAR_OR_LEAVE(restore_idx, meta, ret, done); 3024 3025 /* check if the restored device (when restoring) is sane */ 3026 if (restore_idx != i) { 3027 fprintf(stderr, "%s: idx not matching: actual: %d, " 3028 "expected: %d\r\n", __func__, restore_idx, i); 3029 ret = EINVAL; 3030 goto done; 3031 } 3032 3033 if (meta->op == VM_SNAPSHOT_SAVE) { 3034 memset(dname, 0, sizeof(dname)); 3035 strncpy(dname, dev->dev_ue->ue_emu, sizeof(dname) - 1); 3036 } 3037 3038 SNAPSHOT_BUF_OR_LEAVE(dname, sizeof(dname), meta, ret, done); 3039 3040 if (meta->op == VM_SNAPSHOT_RESTORE) { 3041 dname[sizeof(dname) - 1] = '\0'; 3042 if (strcmp(dev->dev_ue->ue_emu, dname)) { 3043 fprintf(stderr, "%s: device names mismatch: " 3044 "actual: %s, expected: %s\r\n", 3045 __func__, dname, dev->dev_ue->ue_emu); 3046 3047 ret = EINVAL; 3048 goto done; 3049 } 3050 } 3051 } 3052 3053 /* portregs */ 3054 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 3055 port = XHCI_PORTREG_PTR(sc, i); 3056 dev = XHCI_DEVINST_PTR(sc, i); 3057 3058 if (dev == NULL) 3059 continue; 3060 3061 SNAPSHOT_VAR_OR_LEAVE(port->portsc, meta, ret, done); 3062 SNAPSHOT_VAR_OR_LEAVE(port->portpmsc, meta, ret, done); 3063 SNAPSHOT_VAR_OR_LEAVE(port->portli, meta, ret, done); 3064 SNAPSHOT_VAR_OR_LEAVE(port->porthlpmc, meta, ret, done); 3065 } 3066 3067 /* slots */ 3068 if (meta->op == VM_SNAPSHOT_SAVE) 3069 pci_xhci_map_devs_slots(sc, maps); 3070 3071 for (i = 1; i <= XHCI_MAX_SLOTS; i++) { 3072 SNAPSHOT_VAR_OR_LEAVE(maps[i], meta, ret, done); 3073 3074 if (meta->op == VM_SNAPSHOT_SAVE) { 3075 dev = XHCI_SLOTDEV_PTR(sc, i); 3076 } else if (meta->op == VM_SNAPSHOT_RESTORE) { 3077 if (maps[i] != 0) 3078 dev = XHCI_DEVINST_PTR(sc, maps[i]); 3079 else 3080 dev = NULL; 3081 3082 XHCI_SLOTDEV_PTR(sc, i) = dev; 3083 } else { 3084 /* error */ 3085 ret = EINVAL; 3086 goto done; 3087 } 3088 3089 if (dev == NULL) 3090 continue; 3091 3092 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(dev->dev_ctx, 3093 XHCI_GADDR_SIZE(dev->dev_ctx), false, meta, ret, done); 3094 3095 for (j = 1; j < XHCI_MAX_ENDPOINTS; j++) { 3096 ret = pci_xhci_snapshot_ep(sc, dev, j, meta); 3097 if (ret != 0) 3098 goto done; 3099 } 3100 3101 SNAPSHOT_VAR_OR_LEAVE(dev->dev_slotstate, meta, ret, done); 3102 3103 /* devices[i]->dev_sc */ 3104 dev->dev_ue->ue_snapshot(dev->dev_sc, meta); 3105 3106 /* devices[i]->hci */ 3107 SNAPSHOT_VAR_OR_LEAVE(dev->hci.hci_address, meta, ret, done); 3108 SNAPSHOT_VAR_OR_LEAVE(dev->hci.hci_port, meta, ret, done); 3109 } 3110 3111 SNAPSHOT_VAR_OR_LEAVE(sc->ndevices, meta, ret, done); 3112 SNAPSHOT_VAR_OR_LEAVE(sc->usb2_port_start, meta, ret, done); 3113 SNAPSHOT_VAR_OR_LEAVE(sc->usb3_port_start, meta, ret, done); 3114 3115 done: 3116 return (ret); 3117 } 3118 #endif 3119 3120 struct pci_devemu pci_de_xhci = { 3121 .pe_emu = "xhci", 3122 .pe_init = pci_xhci_init, 3123 .pe_barwrite = pci_xhci_write, 3124 .pe_barread = pci_xhci_read, 3125 #ifdef BHYVE_SNAPSHOT 3126 .pe_snapshot = pci_xhci_snapshot, 3127 #endif 3128 }; 3129 PCI_EMUL_SET(pci_de_xhci); 3130