1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2014 Leon Dang <ldang@nahannisys.com> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /* 29 XHCI options: 30 -s <n>,xhci,{devices} 31 32 devices: 33 tablet USB tablet mouse 34 */ 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <sys/param.h> 39 #include <sys/uio.h> 40 #include <sys/types.h> 41 #include <sys/queue.h> 42 43 #include <stdio.h> 44 #include <stdlib.h> 45 #include <stdint.h> 46 #include <string.h> 47 #include <errno.h> 48 #include <pthread.h> 49 #include <unistd.h> 50 51 #include <machine/vmm_snapshot.h> 52 53 #include <dev/usb/usbdi.h> 54 #include <dev/usb/usb.h> 55 #include <dev/usb/usb_freebsd.h> 56 #include <xhcireg.h> 57 58 #include "bhyverun.h" 59 #include "debug.h" 60 #include "pci_emul.h" 61 #include "pci_xhci.h" 62 #include "usb_emul.h" 63 64 65 static int xhci_debug = 0; 66 #define DPRINTF(params) if (xhci_debug) PRINTLN params 67 #define WPRINTF(params) PRINTLN params 68 69 70 #define XHCI_NAME "xhci" 71 #define XHCI_MAX_DEVS 8 /* 4 USB3 + 4 USB2 devs */ 72 73 #define XHCI_MAX_SLOTS 64 /* min allowed by Windows drivers */ 74 75 /* 76 * XHCI data structures can be up to 64k, but limit paddr_guest2host mapping 77 * to 4k to avoid going over the guest physical memory barrier. 78 */ 79 #define XHCI_PADDR_SZ 4096 /* paddr_guest2host max size */ 80 81 #define XHCI_ERST_MAX 0 /* max 2^entries event ring seg tbl */ 82 83 #define XHCI_CAPLEN (4*8) /* offset of op register space */ 84 #define XHCI_HCCPRAMS2 0x1C /* offset of HCCPARAMS2 register */ 85 #define XHCI_PORTREGS_START 0x400 86 #define XHCI_DOORBELL_MAX 256 87 88 #define XHCI_STREAMS_MAX 1 /* 4-15 in XHCI spec */ 89 90 /* caplength and hci-version registers */ 91 #define XHCI_SET_CAPLEN(x) ((x) & 0xFF) 92 #define XHCI_SET_HCIVERSION(x) (((x) & 0xFFFF) << 16) 93 #define XHCI_GET_HCIVERSION(x) (((x) >> 16) & 0xFFFF) 94 95 /* hcsparams1 register */ 96 #define XHCI_SET_HCSP1_MAXSLOTS(x) ((x) & 0xFF) 97 #define XHCI_SET_HCSP1_MAXINTR(x) (((x) & 0x7FF) << 8) 98 #define XHCI_SET_HCSP1_MAXPORTS(x) (((x) & 0xFF) << 24) 99 100 /* hcsparams2 register */ 101 #define XHCI_SET_HCSP2_IST(x) ((x) & 0x0F) 102 #define XHCI_SET_HCSP2_ERSTMAX(x) (((x) & 0x0F) << 4) 103 #define XHCI_SET_HCSP2_MAXSCRATCH_HI(x) (((x) & 0x1F) << 21) 104 #define XHCI_SET_HCSP2_MAXSCRATCH_LO(x) (((x) & 0x1F) << 27) 105 106 /* hcsparams3 register */ 107 #define XHCI_SET_HCSP3_U1EXITLATENCY(x) ((x) & 0xFF) 108 #define XHCI_SET_HCSP3_U2EXITLATENCY(x) (((x) & 0xFFFF) << 16) 109 110 /* hccparams1 register */ 111 #define XHCI_SET_HCCP1_AC64(x) ((x) & 0x01) 112 #define XHCI_SET_HCCP1_BNC(x) (((x) & 0x01) << 1) 113 #define XHCI_SET_HCCP1_CSZ(x) (((x) & 0x01) << 2) 114 #define XHCI_SET_HCCP1_PPC(x) (((x) & 0x01) << 3) 115 #define XHCI_SET_HCCP1_PIND(x) (((x) & 0x01) << 4) 116 #define XHCI_SET_HCCP1_LHRC(x) (((x) & 0x01) << 5) 117 #define XHCI_SET_HCCP1_LTC(x) (((x) & 0x01) << 6) 118 #define XHCI_SET_HCCP1_NSS(x) (((x) & 0x01) << 7) 119 #define XHCI_SET_HCCP1_PAE(x) (((x) & 0x01) << 8) 120 #define XHCI_SET_HCCP1_SPC(x) (((x) & 0x01) << 9) 121 #define XHCI_SET_HCCP1_SEC(x) (((x) & 0x01) << 10) 122 #define XHCI_SET_HCCP1_CFC(x) (((x) & 0x01) << 11) 123 #define XHCI_SET_HCCP1_MAXPSA(x) (((x) & 0x0F) << 12) 124 #define XHCI_SET_HCCP1_XECP(x) (((x) & 0xFFFF) << 16) 125 126 /* hccparams2 register */ 127 #define XHCI_SET_HCCP2_U3C(x) ((x) & 0x01) 128 #define XHCI_SET_HCCP2_CMC(x) (((x) & 0x01) << 1) 129 #define XHCI_SET_HCCP2_FSC(x) (((x) & 0x01) << 2) 130 #define XHCI_SET_HCCP2_CTC(x) (((x) & 0x01) << 3) 131 #define XHCI_SET_HCCP2_LEC(x) (((x) & 0x01) << 4) 132 #define XHCI_SET_HCCP2_CIC(x) (((x) & 0x01) << 5) 133 134 /* other registers */ 135 #define XHCI_SET_DOORBELL(x) ((x) & ~0x03) 136 #define XHCI_SET_RTSOFFSET(x) ((x) & ~0x0F) 137 138 /* register masks */ 139 #define XHCI_PS_PLS_MASK (0xF << 5) /* port link state */ 140 #define XHCI_PS_SPEED_MASK (0xF << 10) /* port speed */ 141 #define XHCI_PS_PIC_MASK (0x3 << 14) /* port indicator */ 142 143 /* port register set */ 144 #define XHCI_PORTREGS_BASE 0x400 /* base offset */ 145 #define XHCI_PORTREGS_PORT0 0x3F0 146 #define XHCI_PORTREGS_SETSZ 0x10 /* size of a set */ 147 148 #define MASK_64_HI(x) ((x) & ~0xFFFFFFFFULL) 149 #define MASK_64_LO(x) ((x) & 0xFFFFFFFFULL) 150 151 #define FIELD_REPLACE(a,b,m,s) (((a) & ~((m) << (s))) | \ 152 (((b) & (m)) << (s))) 153 #define FIELD_COPY(a,b,m,s) (((a) & ~((m) << (s))) | \ 154 (((b) & ((m) << (s))))) 155 156 #define SNAP_DEV_NAME_LEN 128 157 158 struct pci_xhci_trb_ring { 159 uint64_t ringaddr; /* current dequeue guest address */ 160 uint32_t ccs; /* consumer cycle state */ 161 }; 162 163 /* device endpoint transfer/stream rings */ 164 struct pci_xhci_dev_ep { 165 union { 166 struct xhci_trb *_epu_tr; 167 struct xhci_stream_ctx *_epu_sctx; 168 } _ep_trbsctx; 169 #define ep_tr _ep_trbsctx._epu_tr 170 #define ep_sctx _ep_trbsctx._epu_sctx 171 172 union { 173 struct pci_xhci_trb_ring _epu_trb; 174 struct pci_xhci_trb_ring *_epu_sctx_trbs; 175 } _ep_trb_rings; 176 #define ep_ringaddr _ep_trb_rings._epu_trb.ringaddr 177 #define ep_ccs _ep_trb_rings._epu_trb.ccs 178 #define ep_sctx_trbs _ep_trb_rings._epu_sctx_trbs 179 180 struct usb_data_xfer *ep_xfer; /* transfer chain */ 181 }; 182 183 /* device context base address array: maps slot->device context */ 184 struct xhci_dcbaa { 185 uint64_t dcba[USB_MAX_DEVICES+1]; /* xhci_dev_ctx ptrs */ 186 }; 187 188 /* port status registers */ 189 struct pci_xhci_portregs { 190 uint32_t portsc; /* port status and control */ 191 uint32_t portpmsc; /* port pwr mgmt status & control */ 192 uint32_t portli; /* port link info */ 193 uint32_t porthlpmc; /* port hardware LPM control */ 194 } __packed; 195 #define XHCI_PS_SPEED_SET(x) (((x) & 0xF) << 10) 196 197 /* xHC operational registers */ 198 struct pci_xhci_opregs { 199 uint32_t usbcmd; /* usb command */ 200 uint32_t usbsts; /* usb status */ 201 uint32_t pgsz; /* page size */ 202 uint32_t dnctrl; /* device notification control */ 203 uint64_t crcr; /* command ring control */ 204 uint64_t dcbaap; /* device ctx base addr array ptr */ 205 uint32_t config; /* configure */ 206 207 /* guest mapped addresses: */ 208 struct xhci_trb *cr_p; /* crcr dequeue */ 209 struct xhci_dcbaa *dcbaa_p; /* dev ctx array ptr */ 210 }; 211 212 /* xHC runtime registers */ 213 struct pci_xhci_rtsregs { 214 uint32_t mfindex; /* microframe index */ 215 struct { /* interrupter register set */ 216 uint32_t iman; /* interrupter management */ 217 uint32_t imod; /* interrupter moderation */ 218 uint32_t erstsz; /* event ring segment table size */ 219 uint32_t rsvd; 220 uint64_t erstba; /* event ring seg-tbl base addr */ 221 uint64_t erdp; /* event ring dequeue ptr */ 222 } intrreg __packed; 223 224 /* guest mapped addresses */ 225 struct xhci_event_ring_seg *erstba_p; 226 struct xhci_trb *erst_p; /* event ring segment tbl */ 227 int er_deq_seg; /* event ring dequeue segment */ 228 int er_enq_idx; /* event ring enqueue index - xHCI */ 229 int er_enq_seg; /* event ring enqueue segment */ 230 uint32_t er_events_cnt; /* number of events in ER */ 231 uint32_t event_pcs; /* producer cycle state flag */ 232 }; 233 234 235 struct pci_xhci_softc; 236 237 238 /* 239 * USB device emulation container. 240 * This is referenced from usb_hci->hci_sc; 1 pci_xhci_dev_emu for each 241 * emulated device instance. 242 */ 243 struct pci_xhci_dev_emu { 244 struct pci_xhci_softc *xsc; 245 246 /* XHCI contexts */ 247 struct xhci_dev_ctx *dev_ctx; 248 struct pci_xhci_dev_ep eps[XHCI_MAX_ENDPOINTS]; 249 int dev_slotstate; 250 251 struct usb_devemu *dev_ue; /* USB emulated dev */ 252 void *dev_sc; /* device's softc */ 253 254 struct usb_hci hci; 255 }; 256 257 struct pci_xhci_softc { 258 struct pci_devinst *xsc_pi; 259 260 pthread_mutex_t mtx; 261 262 uint32_t caplength; /* caplen & hciversion */ 263 uint32_t hcsparams1; /* structural parameters 1 */ 264 uint32_t hcsparams2; /* structural parameters 2 */ 265 uint32_t hcsparams3; /* structural parameters 3 */ 266 uint32_t hccparams1; /* capability parameters 1 */ 267 uint32_t dboff; /* doorbell offset */ 268 uint32_t rtsoff; /* runtime register space offset */ 269 uint32_t hccparams2; /* capability parameters 2 */ 270 271 uint32_t regsend; /* end of configuration registers */ 272 273 struct pci_xhci_opregs opregs; 274 struct pci_xhci_rtsregs rtsregs; 275 276 struct pci_xhci_portregs *portregs; 277 struct pci_xhci_dev_emu **devices; /* XHCI[port] = device */ 278 struct pci_xhci_dev_emu **slots; /* slots assigned from 1 */ 279 int ndevices; 280 281 int usb2_port_start; 282 int usb3_port_start; 283 }; 284 285 286 /* portregs and devices arrays are set up to start from idx=1 */ 287 #define XHCI_PORTREG_PTR(x,n) &(x)->portregs[(n)] 288 #define XHCI_DEVINST_PTR(x,n) (x)->devices[(n)] 289 #define XHCI_SLOTDEV_PTR(x,n) (x)->slots[(n)] 290 291 #define XHCI_HALTED(sc) ((sc)->opregs.usbsts & XHCI_STS_HCH) 292 293 #define XHCI_GADDR_SIZE(a) (XHCI_PADDR_SZ - \ 294 (((uint64_t) (a)) & (XHCI_PADDR_SZ - 1))) 295 #define XHCI_GADDR(sc,a) paddr_guest2host((sc)->xsc_pi->pi_vmctx, \ 296 (a), XHCI_GADDR_SIZE(a)) 297 298 static int xhci_in_use; 299 300 /* map USB errors to XHCI */ 301 static const int xhci_usb_errors[USB_ERR_MAX] = { 302 [USB_ERR_NORMAL_COMPLETION] = XHCI_TRB_ERROR_SUCCESS, 303 [USB_ERR_PENDING_REQUESTS] = XHCI_TRB_ERROR_RESOURCE, 304 [USB_ERR_NOT_STARTED] = XHCI_TRB_ERROR_ENDP_NOT_ON, 305 [USB_ERR_INVAL] = XHCI_TRB_ERROR_INVALID, 306 [USB_ERR_NOMEM] = XHCI_TRB_ERROR_RESOURCE, 307 [USB_ERR_CANCELLED] = XHCI_TRB_ERROR_STOPPED, 308 [USB_ERR_BAD_ADDRESS] = XHCI_TRB_ERROR_PARAMETER, 309 [USB_ERR_BAD_BUFSIZE] = XHCI_TRB_ERROR_PARAMETER, 310 [USB_ERR_BAD_FLAG] = XHCI_TRB_ERROR_PARAMETER, 311 [USB_ERR_NO_CALLBACK] = XHCI_TRB_ERROR_STALL, 312 [USB_ERR_IN_USE] = XHCI_TRB_ERROR_RESOURCE, 313 [USB_ERR_NO_ADDR] = XHCI_TRB_ERROR_RESOURCE, 314 [USB_ERR_NO_PIPE] = XHCI_TRB_ERROR_RESOURCE, 315 [USB_ERR_ZERO_NFRAMES] = XHCI_TRB_ERROR_UNDEFINED, 316 [USB_ERR_ZERO_MAXP] = XHCI_TRB_ERROR_UNDEFINED, 317 [USB_ERR_SET_ADDR_FAILED] = XHCI_TRB_ERROR_RESOURCE, 318 [USB_ERR_NO_POWER] = XHCI_TRB_ERROR_ENDP_NOT_ON, 319 [USB_ERR_TOO_DEEP] = XHCI_TRB_ERROR_RESOURCE, 320 [USB_ERR_IOERROR] = XHCI_TRB_ERROR_TRB, 321 [USB_ERR_NOT_CONFIGURED] = XHCI_TRB_ERROR_ENDP_NOT_ON, 322 [USB_ERR_TIMEOUT] = XHCI_TRB_ERROR_CMD_ABORTED, 323 [USB_ERR_SHORT_XFER] = XHCI_TRB_ERROR_SHORT_PKT, 324 [USB_ERR_STALLED] = XHCI_TRB_ERROR_STALL, 325 [USB_ERR_INTERRUPTED] = XHCI_TRB_ERROR_CMD_ABORTED, 326 [USB_ERR_DMA_LOAD_FAILED] = XHCI_TRB_ERROR_DATA_BUF, 327 [USB_ERR_BAD_CONTEXT] = XHCI_TRB_ERROR_TRB, 328 [USB_ERR_NO_ROOT_HUB] = XHCI_TRB_ERROR_UNDEFINED, 329 [USB_ERR_NO_INTR_THREAD] = XHCI_TRB_ERROR_UNDEFINED, 330 [USB_ERR_NOT_LOCKED] = XHCI_TRB_ERROR_UNDEFINED, 331 }; 332 #define USB_TO_XHCI_ERR(e) ((e) < USB_ERR_MAX ? xhci_usb_errors[(e)] : \ 333 XHCI_TRB_ERROR_INVALID) 334 335 static int pci_xhci_insert_event(struct pci_xhci_softc *sc, 336 struct xhci_trb *evtrb, int do_intr); 337 static void pci_xhci_dump_trb(struct xhci_trb *trb); 338 static void pci_xhci_assert_interrupt(struct pci_xhci_softc *sc); 339 static void pci_xhci_reset_slot(struct pci_xhci_softc *sc, int slot); 340 static void pci_xhci_reset_port(struct pci_xhci_softc *sc, int portn, int warm); 341 static void pci_xhci_update_ep_ring(struct pci_xhci_softc *sc, 342 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep, 343 struct xhci_endp_ctx *ep_ctx, uint32_t streamid, 344 uint64_t ringaddr, int ccs); 345 346 static void 347 pci_xhci_set_evtrb(struct xhci_trb *evtrb, uint64_t port, uint32_t errcode, 348 uint32_t evtype) 349 { 350 evtrb->qwTrb0 = port << 24; 351 evtrb->dwTrb2 = XHCI_TRB_2_ERROR_SET(errcode); 352 evtrb->dwTrb3 = XHCI_TRB_3_TYPE_SET(evtype); 353 } 354 355 356 /* controller reset */ 357 static void 358 pci_xhci_reset(struct pci_xhci_softc *sc) 359 { 360 int i; 361 362 sc->rtsregs.er_enq_idx = 0; 363 sc->rtsregs.er_events_cnt = 0; 364 sc->rtsregs.event_pcs = 1; 365 366 for (i = 1; i <= XHCI_MAX_SLOTS; i++) { 367 pci_xhci_reset_slot(sc, i); 368 } 369 } 370 371 static uint32_t 372 pci_xhci_usbcmd_write(struct pci_xhci_softc *sc, uint32_t cmd) 373 { 374 int do_intr = 0; 375 int i; 376 377 if (cmd & XHCI_CMD_RS) { 378 do_intr = (sc->opregs.usbcmd & XHCI_CMD_RS) == 0; 379 380 sc->opregs.usbcmd |= XHCI_CMD_RS; 381 sc->opregs.usbsts &= ~XHCI_STS_HCH; 382 sc->opregs.usbsts |= XHCI_STS_PCD; 383 384 /* Queue port change event on controller run from stop */ 385 if (do_intr) 386 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 387 struct pci_xhci_dev_emu *dev; 388 struct pci_xhci_portregs *port; 389 struct xhci_trb evtrb; 390 391 if ((dev = XHCI_DEVINST_PTR(sc, i)) == NULL) 392 continue; 393 394 port = XHCI_PORTREG_PTR(sc, i); 395 port->portsc |= XHCI_PS_CSC | XHCI_PS_CCS; 396 port->portsc &= ~XHCI_PS_PLS_MASK; 397 398 /* 399 * XHCI 4.19.3 USB2 RxDetect->Polling, 400 * USB3 Polling->U0 401 */ 402 if (dev->dev_ue->ue_usbver == 2) 403 port->portsc |= 404 XHCI_PS_PLS_SET(UPS_PORT_LS_POLL); 405 else 406 port->portsc |= 407 XHCI_PS_PLS_SET(UPS_PORT_LS_U0); 408 409 pci_xhci_set_evtrb(&evtrb, i, 410 XHCI_TRB_ERROR_SUCCESS, 411 XHCI_TRB_EVENT_PORT_STS_CHANGE); 412 413 if (pci_xhci_insert_event(sc, &evtrb, 0) != 414 XHCI_TRB_ERROR_SUCCESS) 415 break; 416 } 417 } else { 418 sc->opregs.usbcmd &= ~XHCI_CMD_RS; 419 sc->opregs.usbsts |= XHCI_STS_HCH; 420 sc->opregs.usbsts &= ~XHCI_STS_PCD; 421 } 422 423 /* start execution of schedule; stop when set to 0 */ 424 cmd |= sc->opregs.usbcmd & XHCI_CMD_RS; 425 426 if (cmd & XHCI_CMD_HCRST) { 427 /* reset controller */ 428 pci_xhci_reset(sc); 429 cmd &= ~XHCI_CMD_HCRST; 430 } 431 432 cmd &= ~(XHCI_CMD_CSS | XHCI_CMD_CRS); 433 434 if (do_intr) 435 pci_xhci_assert_interrupt(sc); 436 437 return (cmd); 438 } 439 440 static void 441 pci_xhci_portregs_write(struct pci_xhci_softc *sc, uint64_t offset, 442 uint64_t value) 443 { 444 struct xhci_trb evtrb; 445 struct pci_xhci_portregs *p; 446 int port; 447 uint32_t oldpls, newpls; 448 449 if (sc->portregs == NULL) 450 return; 451 452 port = (offset - XHCI_PORTREGS_PORT0) / XHCI_PORTREGS_SETSZ; 453 offset = (offset - XHCI_PORTREGS_PORT0) % XHCI_PORTREGS_SETSZ; 454 455 DPRINTF(("pci_xhci: portregs wr offset 0x%lx, port %u: 0x%lx", 456 offset, port, value)); 457 458 assert(port >= 0); 459 460 if (port > XHCI_MAX_DEVS) { 461 DPRINTF(("pci_xhci: portregs_write port %d > ndevices", 462 port)); 463 return; 464 } 465 466 if (XHCI_DEVINST_PTR(sc, port) == NULL) { 467 DPRINTF(("pci_xhci: portregs_write to unattached port %d", 468 port)); 469 } 470 471 p = XHCI_PORTREG_PTR(sc, port); 472 switch (offset) { 473 case 0: 474 /* port reset or warm reset */ 475 if (value & (XHCI_PS_PR | XHCI_PS_WPR)) { 476 pci_xhci_reset_port(sc, port, value & XHCI_PS_WPR); 477 break; 478 } 479 480 if ((p->portsc & XHCI_PS_PP) == 0) { 481 WPRINTF(("pci_xhci: portregs_write to unpowered " 482 "port %d", port)); 483 break; 484 } 485 486 /* Port status and control register */ 487 oldpls = XHCI_PS_PLS_GET(p->portsc); 488 newpls = XHCI_PS_PLS_GET(value); 489 490 p->portsc &= XHCI_PS_PED | XHCI_PS_PLS_MASK | 491 XHCI_PS_SPEED_MASK | XHCI_PS_PIC_MASK; 492 493 if (XHCI_DEVINST_PTR(sc, port)) 494 p->portsc |= XHCI_PS_CCS; 495 496 p->portsc |= (value & 497 ~(XHCI_PS_OCA | 498 XHCI_PS_PR | 499 XHCI_PS_PED | 500 XHCI_PS_PLS_MASK | /* link state */ 501 XHCI_PS_SPEED_MASK | 502 XHCI_PS_PIC_MASK | /* port indicator */ 503 XHCI_PS_LWS | XHCI_PS_DR | XHCI_PS_WPR)); 504 505 /* clear control bits */ 506 p->portsc &= ~(value & 507 (XHCI_PS_CSC | 508 XHCI_PS_PEC | 509 XHCI_PS_WRC | 510 XHCI_PS_OCC | 511 XHCI_PS_PRC | 512 XHCI_PS_PLC | 513 XHCI_PS_CEC | 514 XHCI_PS_CAS)); 515 516 /* port disable request; for USB3, don't care */ 517 if (value & XHCI_PS_PED) 518 DPRINTF(("Disable port %d request", port)); 519 520 if (!(value & XHCI_PS_LWS)) 521 break; 522 523 DPRINTF(("Port new PLS: %d", newpls)); 524 switch (newpls) { 525 case 0: /* U0 */ 526 case 3: /* U3 */ 527 if (oldpls != newpls) { 528 p->portsc &= ~XHCI_PS_PLS_MASK; 529 p->portsc |= XHCI_PS_PLS_SET(newpls) | 530 XHCI_PS_PLC; 531 532 if (oldpls != 0 && newpls == 0) { 533 pci_xhci_set_evtrb(&evtrb, port, 534 XHCI_TRB_ERROR_SUCCESS, 535 XHCI_TRB_EVENT_PORT_STS_CHANGE); 536 537 pci_xhci_insert_event(sc, &evtrb, 1); 538 } 539 } 540 break; 541 542 default: 543 DPRINTF(("Unhandled change port %d PLS %u", 544 port, newpls)); 545 break; 546 } 547 break; 548 case 4: 549 /* Port power management status and control register */ 550 p->portpmsc = value; 551 break; 552 case 8: 553 /* Port link information register */ 554 DPRINTF(("pci_xhci attempted write to PORTLI, port %d", 555 port)); 556 break; 557 case 12: 558 /* 559 * Port hardware LPM control register. 560 * For USB3, this register is reserved. 561 */ 562 p->porthlpmc = value; 563 break; 564 } 565 } 566 567 struct xhci_dev_ctx * 568 pci_xhci_get_dev_ctx(struct pci_xhci_softc *sc, uint32_t slot) 569 { 570 uint64_t devctx_addr; 571 struct xhci_dev_ctx *devctx; 572 573 assert(slot > 0 && slot <= sc->ndevices); 574 assert(sc->opregs.dcbaa_p != NULL); 575 576 devctx_addr = sc->opregs.dcbaa_p->dcba[slot]; 577 578 if (devctx_addr == 0) { 579 DPRINTF(("get_dev_ctx devctx_addr == 0")); 580 return (NULL); 581 } 582 583 DPRINTF(("pci_xhci: get dev ctx, slot %u devctx addr %016lx", 584 slot, devctx_addr)); 585 devctx = XHCI_GADDR(sc, devctx_addr & ~0x3FUL); 586 587 return (devctx); 588 } 589 590 struct xhci_trb * 591 pci_xhci_trb_next(struct pci_xhci_softc *sc, struct xhci_trb *curtrb, 592 uint64_t *guestaddr) 593 { 594 struct xhci_trb *next; 595 596 assert(curtrb != NULL); 597 598 if (XHCI_TRB_3_TYPE_GET(curtrb->dwTrb3) == XHCI_TRB_TYPE_LINK) { 599 if (guestaddr) 600 *guestaddr = curtrb->qwTrb0 & ~0xFUL; 601 602 next = XHCI_GADDR(sc, curtrb->qwTrb0 & ~0xFUL); 603 } else { 604 if (guestaddr) 605 *guestaddr += sizeof(struct xhci_trb) & ~0xFUL; 606 607 next = curtrb + 1; 608 } 609 610 return (next); 611 } 612 613 static void 614 pci_xhci_assert_interrupt(struct pci_xhci_softc *sc) 615 { 616 617 sc->rtsregs.intrreg.erdp |= XHCI_ERDP_LO_BUSY; 618 sc->rtsregs.intrreg.iman |= XHCI_IMAN_INTR_PEND; 619 sc->opregs.usbsts |= XHCI_STS_EINT; 620 621 /* only trigger interrupt if permitted */ 622 if ((sc->opregs.usbcmd & XHCI_CMD_INTE) && 623 (sc->rtsregs.intrreg.iman & XHCI_IMAN_INTR_ENA)) { 624 if (pci_msi_enabled(sc->xsc_pi)) 625 pci_generate_msi(sc->xsc_pi, 0); 626 else 627 pci_lintr_assert(sc->xsc_pi); 628 } 629 } 630 631 static void 632 pci_xhci_deassert_interrupt(struct pci_xhci_softc *sc) 633 { 634 635 if (!pci_msi_enabled(sc->xsc_pi)) 636 pci_lintr_assert(sc->xsc_pi); 637 } 638 639 static void 640 pci_xhci_init_ep(struct pci_xhci_dev_emu *dev, int epid) 641 { 642 struct xhci_dev_ctx *dev_ctx; 643 struct pci_xhci_dev_ep *devep; 644 struct xhci_endp_ctx *ep_ctx; 645 uint32_t pstreams; 646 int i; 647 648 dev_ctx = dev->dev_ctx; 649 ep_ctx = &dev_ctx->ctx_ep[epid]; 650 devep = &dev->eps[epid]; 651 pstreams = XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0); 652 if (pstreams > 0) { 653 DPRINTF(("init_ep %d with pstreams %d", epid, pstreams)); 654 assert(devep->ep_sctx_trbs == NULL); 655 656 devep->ep_sctx = XHCI_GADDR(dev->xsc, ep_ctx->qwEpCtx2 & 657 XHCI_EPCTX_2_TR_DQ_PTR_MASK); 658 devep->ep_sctx_trbs = calloc(pstreams, 659 sizeof(struct pci_xhci_trb_ring)); 660 for (i = 0; i < pstreams; i++) { 661 devep->ep_sctx_trbs[i].ringaddr = 662 devep->ep_sctx[i].qwSctx0 & 663 XHCI_SCTX_0_TR_DQ_PTR_MASK; 664 devep->ep_sctx_trbs[i].ccs = 665 XHCI_SCTX_0_DCS_GET(devep->ep_sctx[i].qwSctx0); 666 } 667 } else { 668 DPRINTF(("init_ep %d with no pstreams", epid)); 669 devep->ep_ringaddr = ep_ctx->qwEpCtx2 & 670 XHCI_EPCTX_2_TR_DQ_PTR_MASK; 671 devep->ep_ccs = XHCI_EPCTX_2_DCS_GET(ep_ctx->qwEpCtx2); 672 devep->ep_tr = XHCI_GADDR(dev->xsc, devep->ep_ringaddr); 673 DPRINTF(("init_ep tr DCS %x", devep->ep_ccs)); 674 } 675 676 if (devep->ep_xfer == NULL) { 677 devep->ep_xfer = malloc(sizeof(struct usb_data_xfer)); 678 USB_DATA_XFER_INIT(devep->ep_xfer); 679 } 680 } 681 682 static void 683 pci_xhci_disable_ep(struct pci_xhci_dev_emu *dev, int epid) 684 { 685 struct xhci_dev_ctx *dev_ctx; 686 struct pci_xhci_dev_ep *devep; 687 struct xhci_endp_ctx *ep_ctx; 688 689 DPRINTF(("pci_xhci disable_ep %d", epid)); 690 691 dev_ctx = dev->dev_ctx; 692 ep_ctx = &dev_ctx->ctx_ep[epid]; 693 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_DISABLED; 694 695 devep = &dev->eps[epid]; 696 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) > 0 && 697 devep->ep_sctx_trbs != NULL) 698 free(devep->ep_sctx_trbs); 699 700 if (devep->ep_xfer != NULL) { 701 free(devep->ep_xfer); 702 devep->ep_xfer = NULL; 703 } 704 705 memset(devep, 0, sizeof(struct pci_xhci_dev_ep)); 706 } 707 708 709 /* reset device at slot and data structures related to it */ 710 static void 711 pci_xhci_reset_slot(struct pci_xhci_softc *sc, int slot) 712 { 713 struct pci_xhci_dev_emu *dev; 714 715 dev = XHCI_SLOTDEV_PTR(sc, slot); 716 717 if (!dev) { 718 DPRINTF(("xhci reset unassigned slot (%d)?", slot)); 719 } else { 720 dev->dev_slotstate = XHCI_ST_DISABLED; 721 } 722 723 /* TODO: reset ring buffer pointers */ 724 } 725 726 static int 727 pci_xhci_insert_event(struct pci_xhci_softc *sc, struct xhci_trb *evtrb, 728 int do_intr) 729 { 730 struct pci_xhci_rtsregs *rts; 731 uint64_t erdp; 732 int erdp_idx; 733 int err; 734 struct xhci_trb *evtrbptr; 735 736 err = XHCI_TRB_ERROR_SUCCESS; 737 738 rts = &sc->rtsregs; 739 740 erdp = rts->intrreg.erdp & ~0xF; 741 erdp_idx = (erdp - rts->erstba_p[rts->er_deq_seg].qwEvrsTablePtr) / 742 sizeof(struct xhci_trb); 743 744 DPRINTF(("pci_xhci: insert event 0[%lx] 2[%x] 3[%x]", 745 evtrb->qwTrb0, evtrb->dwTrb2, evtrb->dwTrb3)); 746 DPRINTF(("\terdp idx %d/seg %d, enq idx %d/seg %d, pcs %u", 747 erdp_idx, rts->er_deq_seg, rts->er_enq_idx, 748 rts->er_enq_seg, rts->event_pcs)); 749 DPRINTF(("\t(erdp=0x%lx, erst=0x%lx, tblsz=%u, do_intr %d)", 750 erdp, rts->erstba_p->qwEvrsTablePtr, 751 rts->erstba_p->dwEvrsTableSize, do_intr)); 752 753 evtrbptr = &rts->erst_p[rts->er_enq_idx]; 754 755 /* TODO: multi-segment table */ 756 if (rts->er_events_cnt >= rts->erstba_p->dwEvrsTableSize) { 757 DPRINTF(("pci_xhci[%d] cannot insert event; ring full", 758 __LINE__)); 759 err = XHCI_TRB_ERROR_EV_RING_FULL; 760 goto done; 761 } 762 763 if (rts->er_events_cnt == rts->erstba_p->dwEvrsTableSize - 1) { 764 struct xhci_trb errev; 765 766 if ((evtrbptr->dwTrb3 & 0x1) == (rts->event_pcs & 0x1)) { 767 768 DPRINTF(("pci_xhci[%d] insert evt err: ring full", 769 __LINE__)); 770 771 errev.qwTrb0 = 0; 772 errev.dwTrb2 = XHCI_TRB_2_ERROR_SET( 773 XHCI_TRB_ERROR_EV_RING_FULL); 774 errev.dwTrb3 = XHCI_TRB_3_TYPE_SET( 775 XHCI_TRB_EVENT_HOST_CTRL) | 776 rts->event_pcs; 777 rts->er_events_cnt++; 778 memcpy(&rts->erst_p[rts->er_enq_idx], &errev, 779 sizeof(struct xhci_trb)); 780 rts->er_enq_idx = (rts->er_enq_idx + 1) % 781 rts->erstba_p->dwEvrsTableSize; 782 err = XHCI_TRB_ERROR_EV_RING_FULL; 783 do_intr = 1; 784 785 goto done; 786 } 787 } else { 788 rts->er_events_cnt++; 789 } 790 791 evtrb->dwTrb3 &= ~XHCI_TRB_3_CYCLE_BIT; 792 evtrb->dwTrb3 |= rts->event_pcs; 793 794 memcpy(&rts->erst_p[rts->er_enq_idx], evtrb, sizeof(struct xhci_trb)); 795 rts->er_enq_idx = (rts->er_enq_idx + 1) % 796 rts->erstba_p->dwEvrsTableSize; 797 798 if (rts->er_enq_idx == 0) 799 rts->event_pcs ^= 1; 800 801 done: 802 if (do_intr) 803 pci_xhci_assert_interrupt(sc); 804 805 return (err); 806 } 807 808 static uint32_t 809 pci_xhci_cmd_enable_slot(struct pci_xhci_softc *sc, uint32_t *slot) 810 { 811 struct pci_xhci_dev_emu *dev; 812 uint32_t cmderr; 813 int i; 814 815 cmderr = XHCI_TRB_ERROR_NO_SLOTS; 816 if (sc->portregs != NULL) 817 for (i = 1; i <= XHCI_MAX_SLOTS; i++) { 818 dev = XHCI_SLOTDEV_PTR(sc, i); 819 if (dev && dev->dev_slotstate == XHCI_ST_DISABLED) { 820 *slot = i; 821 dev->dev_slotstate = XHCI_ST_ENABLED; 822 cmderr = XHCI_TRB_ERROR_SUCCESS; 823 dev->hci.hci_address = i; 824 break; 825 } 826 } 827 828 DPRINTF(("pci_xhci enable slot (error=%d) slot %u", 829 cmderr != XHCI_TRB_ERROR_SUCCESS, *slot)); 830 831 return (cmderr); 832 } 833 834 static uint32_t 835 pci_xhci_cmd_disable_slot(struct pci_xhci_softc *sc, uint32_t slot) 836 { 837 struct pci_xhci_dev_emu *dev; 838 uint32_t cmderr; 839 840 DPRINTF(("pci_xhci disable slot %u", slot)); 841 842 cmderr = XHCI_TRB_ERROR_NO_SLOTS; 843 if (sc->portregs == NULL) 844 goto done; 845 846 if (slot > sc->ndevices) { 847 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 848 goto done; 849 } 850 851 dev = XHCI_SLOTDEV_PTR(sc, slot); 852 if (dev) { 853 if (dev->dev_slotstate == XHCI_ST_DISABLED) { 854 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 855 } else { 856 dev->dev_slotstate = XHCI_ST_DISABLED; 857 cmderr = XHCI_TRB_ERROR_SUCCESS; 858 /* TODO: reset events and endpoints */ 859 } 860 } 861 862 done: 863 return (cmderr); 864 } 865 866 static uint32_t 867 pci_xhci_cmd_reset_device(struct pci_xhci_softc *sc, uint32_t slot) 868 { 869 struct pci_xhci_dev_emu *dev; 870 struct xhci_dev_ctx *dev_ctx; 871 struct xhci_endp_ctx *ep_ctx; 872 uint32_t cmderr; 873 int i; 874 875 cmderr = XHCI_TRB_ERROR_NO_SLOTS; 876 if (sc->portregs == NULL) 877 goto done; 878 879 DPRINTF(("pci_xhci reset device slot %u", slot)); 880 881 dev = XHCI_SLOTDEV_PTR(sc, slot); 882 if (!dev || dev->dev_slotstate == XHCI_ST_DISABLED) 883 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 884 else { 885 dev->dev_slotstate = XHCI_ST_DEFAULT; 886 887 dev->hci.hci_address = 0; 888 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 889 890 /* slot state */ 891 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE( 892 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_DEFAULT, 893 0x1F, 27); 894 895 /* number of contexts */ 896 dev_ctx->ctx_slot.dwSctx0 = FIELD_REPLACE( 897 dev_ctx->ctx_slot.dwSctx0, 1, 0x1F, 27); 898 899 /* reset all eps other than ep-0 */ 900 for (i = 2; i <= 31; i++) { 901 ep_ctx = &dev_ctx->ctx_ep[i]; 902 ep_ctx->dwEpCtx0 = FIELD_REPLACE( ep_ctx->dwEpCtx0, 903 XHCI_ST_EPCTX_DISABLED, 0x7, 0); 904 } 905 906 cmderr = XHCI_TRB_ERROR_SUCCESS; 907 } 908 909 pci_xhci_reset_slot(sc, slot); 910 911 done: 912 return (cmderr); 913 } 914 915 static uint32_t 916 pci_xhci_cmd_address_device(struct pci_xhci_softc *sc, uint32_t slot, 917 struct xhci_trb *trb) 918 { 919 struct pci_xhci_dev_emu *dev; 920 struct xhci_input_dev_ctx *input_ctx; 921 struct xhci_slot_ctx *islot_ctx; 922 struct xhci_dev_ctx *dev_ctx; 923 struct xhci_endp_ctx *ep0_ctx; 924 uint32_t cmderr; 925 926 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL); 927 islot_ctx = &input_ctx->ctx_slot; 928 ep0_ctx = &input_ctx->ctx_ep[1]; 929 930 cmderr = XHCI_TRB_ERROR_SUCCESS; 931 932 DPRINTF(("pci_xhci: address device, input ctl: D 0x%08x A 0x%08x,", 933 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1)); 934 DPRINTF((" slot %08x %08x %08x %08x", 935 islot_ctx->dwSctx0, islot_ctx->dwSctx1, 936 islot_ctx->dwSctx2, islot_ctx->dwSctx3)); 937 DPRINTF((" ep0 %08x %08x %016lx %08x", 938 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, 939 ep0_ctx->dwEpCtx4)); 940 941 /* when setting address: drop-ctx=0, add-ctx=slot+ep0 */ 942 if ((input_ctx->ctx_input.dwInCtx0 != 0) || 943 (input_ctx->ctx_input.dwInCtx1 & 0x03) != 0x03) { 944 DPRINTF(("pci_xhci: address device, input ctl invalid")); 945 cmderr = XHCI_TRB_ERROR_TRB; 946 goto done; 947 } 948 949 /* assign address to slot */ 950 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 951 952 DPRINTF(("pci_xhci: address device, dev ctx")); 953 DPRINTF((" slot %08x %08x %08x %08x", 954 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 955 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 956 957 dev = XHCI_SLOTDEV_PTR(sc, slot); 958 assert(dev != NULL); 959 960 dev->hci.hci_address = slot; 961 dev->dev_ctx = dev_ctx; 962 963 if (dev->dev_ue->ue_reset == NULL || 964 dev->dev_ue->ue_reset(dev->dev_sc) < 0) { 965 cmderr = XHCI_TRB_ERROR_ENDP_NOT_ON; 966 goto done; 967 } 968 969 memcpy(&dev_ctx->ctx_slot, islot_ctx, sizeof(struct xhci_slot_ctx)); 970 971 dev_ctx->ctx_slot.dwSctx3 = 972 XHCI_SCTX_3_SLOT_STATE_SET(XHCI_ST_SLCTX_ADDRESSED) | 973 XHCI_SCTX_3_DEV_ADDR_SET(slot); 974 975 memcpy(&dev_ctx->ctx_ep[1], ep0_ctx, sizeof(struct xhci_endp_ctx)); 976 ep0_ctx = &dev_ctx->ctx_ep[1]; 977 ep0_ctx->dwEpCtx0 = (ep0_ctx->dwEpCtx0 & ~0x7) | 978 XHCI_EPCTX_0_EPSTATE_SET(XHCI_ST_EPCTX_RUNNING); 979 980 pci_xhci_init_ep(dev, 1); 981 982 dev->dev_slotstate = XHCI_ST_ADDRESSED; 983 984 DPRINTF(("pci_xhci: address device, output ctx")); 985 DPRINTF((" slot %08x %08x %08x %08x", 986 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 987 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 988 DPRINTF((" ep0 %08x %08x %016lx %08x", 989 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, 990 ep0_ctx->dwEpCtx4)); 991 992 done: 993 return (cmderr); 994 } 995 996 static uint32_t 997 pci_xhci_cmd_config_ep(struct pci_xhci_softc *sc, uint32_t slot, 998 struct xhci_trb *trb) 999 { 1000 struct xhci_input_dev_ctx *input_ctx; 1001 struct pci_xhci_dev_emu *dev; 1002 struct xhci_dev_ctx *dev_ctx; 1003 struct xhci_endp_ctx *ep_ctx, *iep_ctx; 1004 uint32_t cmderr; 1005 int i; 1006 1007 cmderr = XHCI_TRB_ERROR_SUCCESS; 1008 1009 DPRINTF(("pci_xhci config_ep slot %u", slot)); 1010 1011 dev = XHCI_SLOTDEV_PTR(sc, slot); 1012 assert(dev != NULL); 1013 1014 if ((trb->dwTrb3 & XHCI_TRB_3_DCEP_BIT) != 0) { 1015 DPRINTF(("pci_xhci config_ep - deconfigure ep slot %u", 1016 slot)); 1017 if (dev->dev_ue->ue_stop != NULL) 1018 dev->dev_ue->ue_stop(dev->dev_sc); 1019 1020 dev->dev_slotstate = XHCI_ST_ADDRESSED; 1021 1022 dev->hci.hci_address = 0; 1023 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 1024 1025 /* number of contexts */ 1026 dev_ctx->ctx_slot.dwSctx0 = FIELD_REPLACE( 1027 dev_ctx->ctx_slot.dwSctx0, 1, 0x1F, 27); 1028 1029 /* slot state */ 1030 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE( 1031 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_ADDRESSED, 1032 0x1F, 27); 1033 1034 /* disable endpoints */ 1035 for (i = 2; i < 32; i++) 1036 pci_xhci_disable_ep(dev, i); 1037 1038 cmderr = XHCI_TRB_ERROR_SUCCESS; 1039 1040 goto done; 1041 } 1042 1043 if (dev->dev_slotstate < XHCI_ST_ADDRESSED) { 1044 DPRINTF(("pci_xhci: config_ep slotstate x%x != addressed", 1045 dev->dev_slotstate)); 1046 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 1047 goto done; 1048 } 1049 1050 /* In addressed/configured state; 1051 * for each drop endpoint ctx flag: 1052 * ep->state = DISABLED 1053 * for each add endpoint ctx flag: 1054 * cp(ep-in, ep-out) 1055 * ep->state = RUNNING 1056 * for each drop+add endpoint flag: 1057 * reset ep resources 1058 * cp(ep-in, ep-out) 1059 * ep->state = RUNNING 1060 * if input->DisabledCtx[2-31] < 30: (at least 1 ep not disabled) 1061 * slot->state = configured 1062 */ 1063 1064 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL); 1065 dev_ctx = dev->dev_ctx; 1066 DPRINTF(("pci_xhci: config_ep inputctx: D:x%08x A:x%08x 7:x%08x", 1067 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1, 1068 input_ctx->ctx_input.dwInCtx7)); 1069 1070 for (i = 2; i <= 31; i++) { 1071 ep_ctx = &dev_ctx->ctx_ep[i]; 1072 1073 if (input_ctx->ctx_input.dwInCtx0 & 1074 XHCI_INCTX_0_DROP_MASK(i)) { 1075 DPRINTF((" config ep - dropping ep %d", i)); 1076 pci_xhci_disable_ep(dev, i); 1077 } 1078 1079 if (input_ctx->ctx_input.dwInCtx1 & 1080 XHCI_INCTX_1_ADD_MASK(i)) { 1081 iep_ctx = &input_ctx->ctx_ep[i]; 1082 1083 DPRINTF((" enable ep[%d] %08x %08x %016lx %08x", 1084 i, iep_ctx->dwEpCtx0, iep_ctx->dwEpCtx1, 1085 iep_ctx->qwEpCtx2, iep_ctx->dwEpCtx4)); 1086 1087 memcpy(ep_ctx, iep_ctx, sizeof(struct xhci_endp_ctx)); 1088 1089 pci_xhci_init_ep(dev, i); 1090 1091 /* ep state */ 1092 ep_ctx->dwEpCtx0 = FIELD_REPLACE( 1093 ep_ctx->dwEpCtx0, XHCI_ST_EPCTX_RUNNING, 0x7, 0); 1094 } 1095 } 1096 1097 /* slot state to configured */ 1098 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE( 1099 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_CONFIGURED, 0x1F, 27); 1100 dev_ctx->ctx_slot.dwSctx0 = FIELD_COPY( 1101 dev_ctx->ctx_slot.dwSctx0, input_ctx->ctx_slot.dwSctx0, 0x1F, 27); 1102 dev->dev_slotstate = XHCI_ST_CONFIGURED; 1103 1104 DPRINTF(("EP configured; slot %u [0]=0x%08x [1]=0x%08x [2]=0x%08x " 1105 "[3]=0x%08x", 1106 slot, dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 1107 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 1108 1109 done: 1110 return (cmderr); 1111 } 1112 1113 static uint32_t 1114 pci_xhci_cmd_reset_ep(struct pci_xhci_softc *sc, uint32_t slot, 1115 struct xhci_trb *trb) 1116 { 1117 struct pci_xhci_dev_emu *dev; 1118 struct pci_xhci_dev_ep *devep; 1119 struct xhci_dev_ctx *dev_ctx; 1120 struct xhci_endp_ctx *ep_ctx; 1121 uint32_t cmderr, epid; 1122 uint32_t type; 1123 1124 epid = XHCI_TRB_3_EP_GET(trb->dwTrb3); 1125 1126 DPRINTF(("pci_xhci: reset ep %u: slot %u", epid, slot)); 1127 1128 cmderr = XHCI_TRB_ERROR_SUCCESS; 1129 1130 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3); 1131 1132 dev = XHCI_SLOTDEV_PTR(sc, slot); 1133 assert(dev != NULL); 1134 1135 if (type == XHCI_TRB_TYPE_STOP_EP && 1136 (trb->dwTrb3 & XHCI_TRB_3_SUSP_EP_BIT) != 0) { 1137 /* XXX suspend endpoint for 10ms */ 1138 } 1139 1140 if (epid < 1 || epid > 31) { 1141 DPRINTF(("pci_xhci: reset ep: invalid epid %u", epid)); 1142 cmderr = XHCI_TRB_ERROR_TRB; 1143 goto done; 1144 } 1145 1146 devep = &dev->eps[epid]; 1147 if (devep->ep_xfer != NULL) 1148 USB_DATA_XFER_RESET(devep->ep_xfer); 1149 1150 dev_ctx = dev->dev_ctx; 1151 assert(dev_ctx != NULL); 1152 1153 ep_ctx = &dev_ctx->ctx_ep[epid]; 1154 1155 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_STOPPED; 1156 1157 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) == 0) 1158 ep_ctx->qwEpCtx2 = devep->ep_ringaddr | devep->ep_ccs; 1159 1160 DPRINTF(("pci_xhci: reset ep[%u] %08x %08x %016lx %08x", 1161 epid, ep_ctx->dwEpCtx0, ep_ctx->dwEpCtx1, ep_ctx->qwEpCtx2, 1162 ep_ctx->dwEpCtx4)); 1163 1164 if (type == XHCI_TRB_TYPE_RESET_EP && 1165 (dev->dev_ue->ue_reset == NULL || 1166 dev->dev_ue->ue_reset(dev->dev_sc) < 0)) { 1167 cmderr = XHCI_TRB_ERROR_ENDP_NOT_ON; 1168 goto done; 1169 } 1170 1171 done: 1172 return (cmderr); 1173 } 1174 1175 1176 static uint32_t 1177 pci_xhci_find_stream(struct pci_xhci_softc *sc, struct xhci_endp_ctx *ep, 1178 uint32_t streamid, struct xhci_stream_ctx **osctx) 1179 { 1180 struct xhci_stream_ctx *sctx; 1181 uint32_t maxpstreams; 1182 1183 maxpstreams = XHCI_EPCTX_0_MAXP_STREAMS_GET(ep->dwEpCtx0); 1184 if (maxpstreams == 0) 1185 return (XHCI_TRB_ERROR_TRB); 1186 1187 if (maxpstreams > XHCI_STREAMS_MAX) 1188 return (XHCI_TRB_ERROR_INVALID_SID); 1189 1190 if (XHCI_EPCTX_0_LSA_GET(ep->dwEpCtx0) == 0) { 1191 DPRINTF(("pci_xhci: find_stream; LSA bit not set")); 1192 return (XHCI_TRB_ERROR_INVALID_SID); 1193 } 1194 1195 /* only support primary stream */ 1196 if (streamid > maxpstreams) 1197 return (XHCI_TRB_ERROR_STREAM_TYPE); 1198 1199 sctx = XHCI_GADDR(sc, ep->qwEpCtx2 & ~0xFUL) + streamid; 1200 if (!XHCI_SCTX_0_SCT_GET(sctx->qwSctx0)) 1201 return (XHCI_TRB_ERROR_STREAM_TYPE); 1202 1203 *osctx = sctx; 1204 1205 return (XHCI_TRB_ERROR_SUCCESS); 1206 } 1207 1208 1209 static uint32_t 1210 pci_xhci_cmd_set_tr(struct pci_xhci_softc *sc, uint32_t slot, 1211 struct xhci_trb *trb) 1212 { 1213 struct pci_xhci_dev_emu *dev; 1214 struct pci_xhci_dev_ep *devep; 1215 struct xhci_dev_ctx *dev_ctx; 1216 struct xhci_endp_ctx *ep_ctx; 1217 uint32_t cmderr, epid; 1218 uint32_t streamid; 1219 1220 cmderr = XHCI_TRB_ERROR_SUCCESS; 1221 1222 dev = XHCI_SLOTDEV_PTR(sc, slot); 1223 assert(dev != NULL); 1224 1225 DPRINTF(("pci_xhci set_tr: new-tr x%016lx, SCT %u DCS %u", 1226 (trb->qwTrb0 & ~0xF), (uint32_t)((trb->qwTrb0 >> 1) & 0x7), 1227 (uint32_t)(trb->qwTrb0 & 0x1))); 1228 DPRINTF((" stream-id %u, slot %u, epid %u, C %u", 1229 (trb->dwTrb2 >> 16) & 0xFFFF, 1230 XHCI_TRB_3_SLOT_GET(trb->dwTrb3), 1231 XHCI_TRB_3_EP_GET(trb->dwTrb3), trb->dwTrb3 & 0x1)); 1232 1233 epid = XHCI_TRB_3_EP_GET(trb->dwTrb3); 1234 if (epid < 1 || epid > 31) { 1235 DPRINTF(("pci_xhci: set_tr_deq: invalid epid %u", epid)); 1236 cmderr = XHCI_TRB_ERROR_TRB; 1237 goto done; 1238 } 1239 1240 dev_ctx = dev->dev_ctx; 1241 assert(dev_ctx != NULL); 1242 1243 ep_ctx = &dev_ctx->ctx_ep[epid]; 1244 devep = &dev->eps[epid]; 1245 1246 switch (XHCI_EPCTX_0_EPSTATE_GET(ep_ctx->dwEpCtx0)) { 1247 case XHCI_ST_EPCTX_STOPPED: 1248 case XHCI_ST_EPCTX_ERROR: 1249 break; 1250 default: 1251 DPRINTF(("pci_xhci cmd set_tr invalid state %x", 1252 XHCI_EPCTX_0_EPSTATE_GET(ep_ctx->dwEpCtx0))); 1253 cmderr = XHCI_TRB_ERROR_CONTEXT_STATE; 1254 goto done; 1255 } 1256 1257 streamid = XHCI_TRB_2_STREAM_GET(trb->dwTrb2); 1258 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) > 0) { 1259 struct xhci_stream_ctx *sctx; 1260 1261 sctx = NULL; 1262 cmderr = pci_xhci_find_stream(sc, ep_ctx, streamid, &sctx); 1263 if (sctx != NULL) { 1264 assert(devep->ep_sctx != NULL); 1265 1266 devep->ep_sctx[streamid].qwSctx0 = trb->qwTrb0; 1267 devep->ep_sctx_trbs[streamid].ringaddr = 1268 trb->qwTrb0 & ~0xF; 1269 devep->ep_sctx_trbs[streamid].ccs = 1270 XHCI_EPCTX_2_DCS_GET(trb->qwTrb0); 1271 } 1272 } else { 1273 if (streamid != 0) { 1274 DPRINTF(("pci_xhci cmd set_tr streamid %x != 0", 1275 streamid)); 1276 } 1277 ep_ctx->qwEpCtx2 = trb->qwTrb0 & ~0xFUL; 1278 devep->ep_ringaddr = ep_ctx->qwEpCtx2 & ~0xFUL; 1279 devep->ep_ccs = trb->qwTrb0 & 0x1; 1280 devep->ep_tr = XHCI_GADDR(sc, devep->ep_ringaddr); 1281 1282 DPRINTF(("pci_xhci set_tr first TRB:")); 1283 pci_xhci_dump_trb(devep->ep_tr); 1284 } 1285 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_STOPPED; 1286 1287 done: 1288 return (cmderr); 1289 } 1290 1291 static uint32_t 1292 pci_xhci_cmd_eval_ctx(struct pci_xhci_softc *sc, uint32_t slot, 1293 struct xhci_trb *trb) 1294 { 1295 struct xhci_input_dev_ctx *input_ctx; 1296 struct xhci_slot_ctx *islot_ctx; 1297 struct xhci_dev_ctx *dev_ctx; 1298 struct xhci_endp_ctx *ep0_ctx; 1299 uint32_t cmderr; 1300 1301 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL); 1302 islot_ctx = &input_ctx->ctx_slot; 1303 ep0_ctx = &input_ctx->ctx_ep[1]; 1304 1305 cmderr = XHCI_TRB_ERROR_SUCCESS; 1306 DPRINTF(("pci_xhci: eval ctx, input ctl: D 0x%08x A 0x%08x,", 1307 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1)); 1308 DPRINTF((" slot %08x %08x %08x %08x", 1309 islot_ctx->dwSctx0, islot_ctx->dwSctx1, 1310 islot_ctx->dwSctx2, islot_ctx->dwSctx3)); 1311 DPRINTF((" ep0 %08x %08x %016lx %08x", 1312 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, 1313 ep0_ctx->dwEpCtx4)); 1314 1315 /* this command expects drop-ctx=0 & add-ctx=slot+ep0 */ 1316 if ((input_ctx->ctx_input.dwInCtx0 != 0) || 1317 (input_ctx->ctx_input.dwInCtx1 & 0x03) == 0) { 1318 DPRINTF(("pci_xhci: eval ctx, input ctl invalid")); 1319 cmderr = XHCI_TRB_ERROR_TRB; 1320 goto done; 1321 } 1322 1323 /* assign address to slot; in this emulation, slot_id = address */ 1324 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 1325 1326 DPRINTF(("pci_xhci: eval ctx, dev ctx")); 1327 DPRINTF((" slot %08x %08x %08x %08x", 1328 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 1329 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 1330 1331 if (input_ctx->ctx_input.dwInCtx1 & 0x01) { /* slot ctx */ 1332 /* set max exit latency */ 1333 dev_ctx->ctx_slot.dwSctx1 = FIELD_COPY( 1334 dev_ctx->ctx_slot.dwSctx1, input_ctx->ctx_slot.dwSctx1, 1335 0xFFFF, 0); 1336 1337 /* set interrupter target */ 1338 dev_ctx->ctx_slot.dwSctx2 = FIELD_COPY( 1339 dev_ctx->ctx_slot.dwSctx2, input_ctx->ctx_slot.dwSctx2, 1340 0x3FF, 22); 1341 } 1342 if (input_ctx->ctx_input.dwInCtx1 & 0x02) { /* control ctx */ 1343 /* set max packet size */ 1344 dev_ctx->ctx_ep[1].dwEpCtx1 = FIELD_COPY( 1345 dev_ctx->ctx_ep[1].dwEpCtx1, ep0_ctx->dwEpCtx1, 1346 0xFFFF, 16); 1347 1348 ep0_ctx = &dev_ctx->ctx_ep[1]; 1349 } 1350 1351 DPRINTF(("pci_xhci: eval ctx, output ctx")); 1352 DPRINTF((" slot %08x %08x %08x %08x", 1353 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 1354 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 1355 DPRINTF((" ep0 %08x %08x %016lx %08x", 1356 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, 1357 ep0_ctx->dwEpCtx4)); 1358 1359 done: 1360 return (cmderr); 1361 } 1362 1363 static int 1364 pci_xhci_complete_commands(struct pci_xhci_softc *sc) 1365 { 1366 struct xhci_trb evtrb; 1367 struct xhci_trb *trb; 1368 uint64_t crcr; 1369 uint32_t ccs; /* cycle state (XHCI 4.9.2) */ 1370 uint32_t type; 1371 uint32_t slot; 1372 uint32_t cmderr; 1373 int error; 1374 1375 error = 0; 1376 sc->opregs.crcr |= XHCI_CRCR_LO_CRR; 1377 1378 trb = sc->opregs.cr_p; 1379 ccs = sc->opregs.crcr & XHCI_CRCR_LO_RCS; 1380 crcr = sc->opregs.crcr & ~0xF; 1381 1382 while (1) { 1383 sc->opregs.cr_p = trb; 1384 1385 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3); 1386 1387 if ((trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT) != 1388 (ccs & XHCI_TRB_3_CYCLE_BIT)) 1389 break; 1390 1391 DPRINTF(("pci_xhci: cmd type 0x%x, Trb0 x%016lx dwTrb2 x%08x" 1392 " dwTrb3 x%08x, TRB_CYCLE %u/ccs %u", 1393 type, trb->qwTrb0, trb->dwTrb2, trb->dwTrb3, 1394 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT, ccs)); 1395 1396 cmderr = XHCI_TRB_ERROR_SUCCESS; 1397 evtrb.dwTrb2 = 0; 1398 evtrb.dwTrb3 = (ccs & XHCI_TRB_3_CYCLE_BIT) | 1399 XHCI_TRB_3_TYPE_SET(XHCI_TRB_EVENT_CMD_COMPLETE); 1400 slot = 0; 1401 1402 switch (type) { 1403 case XHCI_TRB_TYPE_LINK: /* 0x06 */ 1404 if (trb->dwTrb3 & XHCI_TRB_3_TC_BIT) 1405 ccs ^= XHCI_CRCR_LO_RCS; 1406 break; 1407 1408 case XHCI_TRB_TYPE_ENABLE_SLOT: /* 0x09 */ 1409 cmderr = pci_xhci_cmd_enable_slot(sc, &slot); 1410 break; 1411 1412 case XHCI_TRB_TYPE_DISABLE_SLOT: /* 0x0A */ 1413 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1414 cmderr = pci_xhci_cmd_disable_slot(sc, slot); 1415 break; 1416 1417 case XHCI_TRB_TYPE_ADDRESS_DEVICE: /* 0x0B */ 1418 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1419 cmderr = pci_xhci_cmd_address_device(sc, slot, trb); 1420 break; 1421 1422 case XHCI_TRB_TYPE_CONFIGURE_EP: /* 0x0C */ 1423 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1424 cmderr = pci_xhci_cmd_config_ep(sc, slot, trb); 1425 break; 1426 1427 case XHCI_TRB_TYPE_EVALUATE_CTX: /* 0x0D */ 1428 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1429 cmderr = pci_xhci_cmd_eval_ctx(sc, slot, trb); 1430 break; 1431 1432 case XHCI_TRB_TYPE_RESET_EP: /* 0x0E */ 1433 DPRINTF(("Reset Endpoint on slot %d", slot)); 1434 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1435 cmderr = pci_xhci_cmd_reset_ep(sc, slot, trb); 1436 break; 1437 1438 case XHCI_TRB_TYPE_STOP_EP: /* 0x0F */ 1439 DPRINTF(("Stop Endpoint on slot %d", slot)); 1440 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1441 cmderr = pci_xhci_cmd_reset_ep(sc, slot, trb); 1442 break; 1443 1444 case XHCI_TRB_TYPE_SET_TR_DEQUEUE: /* 0x10 */ 1445 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1446 cmderr = pci_xhci_cmd_set_tr(sc, slot, trb); 1447 break; 1448 1449 case XHCI_TRB_TYPE_RESET_DEVICE: /* 0x11 */ 1450 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1451 cmderr = pci_xhci_cmd_reset_device(sc, slot); 1452 break; 1453 1454 case XHCI_TRB_TYPE_FORCE_EVENT: /* 0x12 */ 1455 /* TODO: */ 1456 break; 1457 1458 case XHCI_TRB_TYPE_NEGOTIATE_BW: /* 0x13 */ 1459 break; 1460 1461 case XHCI_TRB_TYPE_SET_LATENCY_TOL: /* 0x14 */ 1462 break; 1463 1464 case XHCI_TRB_TYPE_GET_PORT_BW: /* 0x15 */ 1465 break; 1466 1467 case XHCI_TRB_TYPE_FORCE_HEADER: /* 0x16 */ 1468 break; 1469 1470 case XHCI_TRB_TYPE_NOOP_CMD: /* 0x17 */ 1471 break; 1472 1473 default: 1474 DPRINTF(("pci_xhci: unsupported cmd %x", type)); 1475 break; 1476 } 1477 1478 if (type != XHCI_TRB_TYPE_LINK) { 1479 /* 1480 * insert command completion event and assert intr 1481 */ 1482 evtrb.qwTrb0 = crcr; 1483 evtrb.dwTrb2 |= XHCI_TRB_2_ERROR_SET(cmderr); 1484 evtrb.dwTrb3 |= XHCI_TRB_3_SLOT_SET(slot); 1485 DPRINTF(("pci_xhci: command 0x%x result: 0x%x", 1486 type, cmderr)); 1487 pci_xhci_insert_event(sc, &evtrb, 1); 1488 } 1489 1490 trb = pci_xhci_trb_next(sc, trb, &crcr); 1491 } 1492 1493 sc->opregs.crcr = crcr | (sc->opregs.crcr & XHCI_CRCR_LO_CA) | ccs; 1494 sc->opregs.crcr &= ~XHCI_CRCR_LO_CRR; 1495 return (error); 1496 } 1497 1498 static void 1499 pci_xhci_dump_trb(struct xhci_trb *trb) 1500 { 1501 static const char *trbtypes[] = { 1502 "RESERVED", 1503 "NORMAL", 1504 "SETUP_STAGE", 1505 "DATA_STAGE", 1506 "STATUS_STAGE", 1507 "ISOCH", 1508 "LINK", 1509 "EVENT_DATA", 1510 "NOOP", 1511 "ENABLE_SLOT", 1512 "DISABLE_SLOT", 1513 "ADDRESS_DEVICE", 1514 "CONFIGURE_EP", 1515 "EVALUATE_CTX", 1516 "RESET_EP", 1517 "STOP_EP", 1518 "SET_TR_DEQUEUE", 1519 "RESET_DEVICE", 1520 "FORCE_EVENT", 1521 "NEGOTIATE_BW", 1522 "SET_LATENCY_TOL", 1523 "GET_PORT_BW", 1524 "FORCE_HEADER", 1525 "NOOP_CMD" 1526 }; 1527 uint32_t type; 1528 1529 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3); 1530 DPRINTF(("pci_xhci: trb[@%p] type x%02x %s 0:x%016lx 2:x%08x 3:x%08x", 1531 trb, type, 1532 type <= XHCI_TRB_TYPE_NOOP_CMD ? trbtypes[type] : "INVALID", 1533 trb->qwTrb0, trb->dwTrb2, trb->dwTrb3)); 1534 } 1535 1536 static int 1537 pci_xhci_xfer_complete(struct pci_xhci_softc *sc, struct usb_data_xfer *xfer, 1538 uint32_t slot, uint32_t epid, int *do_intr) 1539 { 1540 struct pci_xhci_dev_emu *dev; 1541 struct pci_xhci_dev_ep *devep; 1542 struct xhci_dev_ctx *dev_ctx; 1543 struct xhci_endp_ctx *ep_ctx; 1544 struct xhci_trb *trb; 1545 struct xhci_trb evtrb; 1546 uint32_t trbflags; 1547 uint32_t edtla; 1548 int i, err; 1549 1550 dev = XHCI_SLOTDEV_PTR(sc, slot); 1551 devep = &dev->eps[epid]; 1552 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 1553 1554 assert(dev_ctx != NULL); 1555 1556 ep_ctx = &dev_ctx->ctx_ep[epid]; 1557 1558 err = XHCI_TRB_ERROR_SUCCESS; 1559 *do_intr = 0; 1560 edtla = 0; 1561 1562 /* go through list of TRBs and insert event(s) */ 1563 for (i = xfer->head; xfer->ndata > 0; ) { 1564 evtrb.qwTrb0 = (uint64_t)xfer->data[i].hci_data; 1565 trb = XHCI_GADDR(sc, evtrb.qwTrb0); 1566 trbflags = trb->dwTrb3; 1567 1568 DPRINTF(("pci_xhci: xfer[%d] done?%u:%d trb %x %016lx %x " 1569 "(err %d) IOC?%d", 1570 i, xfer->data[i].processed, xfer->data[i].blen, 1571 XHCI_TRB_3_TYPE_GET(trbflags), evtrb.qwTrb0, 1572 trbflags, err, 1573 trb->dwTrb3 & XHCI_TRB_3_IOC_BIT ? 1 : 0)); 1574 1575 if (!xfer->data[i].processed) { 1576 xfer->head = i; 1577 break; 1578 } 1579 1580 xfer->ndata--; 1581 edtla += xfer->data[i].bdone; 1582 1583 trb->dwTrb3 = (trb->dwTrb3 & ~0x1) | (xfer->data[i].ccs); 1584 1585 pci_xhci_update_ep_ring(sc, dev, devep, ep_ctx, 1586 xfer->data[i].streamid, xfer->data[i].trbnext, 1587 xfer->data[i].ccs); 1588 1589 /* Only interrupt if IOC or short packet */ 1590 if (!(trb->dwTrb3 & XHCI_TRB_3_IOC_BIT) && 1591 !((err == XHCI_TRB_ERROR_SHORT_PKT) && 1592 (trb->dwTrb3 & XHCI_TRB_3_ISP_BIT))) { 1593 1594 i = (i + 1) % USB_MAX_XFER_BLOCKS; 1595 continue; 1596 } 1597 1598 evtrb.dwTrb2 = XHCI_TRB_2_ERROR_SET(err) | 1599 XHCI_TRB_2_REM_SET(xfer->data[i].blen); 1600 1601 evtrb.dwTrb3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_EVENT_TRANSFER) | 1602 XHCI_TRB_3_SLOT_SET(slot) | XHCI_TRB_3_EP_SET(epid); 1603 1604 if (XHCI_TRB_3_TYPE_GET(trbflags) == XHCI_TRB_TYPE_EVENT_DATA) { 1605 DPRINTF(("pci_xhci EVENT_DATA edtla %u", edtla)); 1606 evtrb.qwTrb0 = trb->qwTrb0; 1607 evtrb.dwTrb2 = (edtla & 0xFFFFF) | 1608 XHCI_TRB_2_ERROR_SET(err); 1609 evtrb.dwTrb3 |= XHCI_TRB_3_ED_BIT; 1610 edtla = 0; 1611 } 1612 1613 *do_intr = 1; 1614 1615 err = pci_xhci_insert_event(sc, &evtrb, 0); 1616 if (err != XHCI_TRB_ERROR_SUCCESS) { 1617 break; 1618 } 1619 1620 i = (i + 1) % USB_MAX_XFER_BLOCKS; 1621 } 1622 1623 return (err); 1624 } 1625 1626 static void 1627 pci_xhci_update_ep_ring(struct pci_xhci_softc *sc, struct pci_xhci_dev_emu *dev, 1628 struct pci_xhci_dev_ep *devep, struct xhci_endp_ctx *ep_ctx, 1629 uint32_t streamid, uint64_t ringaddr, int ccs) 1630 { 1631 1632 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) != 0) { 1633 devep->ep_sctx[streamid].qwSctx0 = (ringaddr & ~0xFUL) | 1634 (ccs & 0x1); 1635 1636 devep->ep_sctx_trbs[streamid].ringaddr = ringaddr & ~0xFUL; 1637 devep->ep_sctx_trbs[streamid].ccs = ccs & 0x1; 1638 ep_ctx->qwEpCtx2 = (ep_ctx->qwEpCtx2 & ~0x1) | (ccs & 0x1); 1639 1640 DPRINTF(("xhci update ep-ring stream %d, addr %lx", 1641 streamid, devep->ep_sctx[streamid].qwSctx0)); 1642 } else { 1643 devep->ep_ringaddr = ringaddr & ~0xFUL; 1644 devep->ep_ccs = ccs & 0x1; 1645 devep->ep_tr = XHCI_GADDR(sc, ringaddr & ~0xFUL); 1646 ep_ctx->qwEpCtx2 = (ringaddr & ~0xFUL) | (ccs & 0x1); 1647 1648 DPRINTF(("xhci update ep-ring, addr %lx", 1649 (devep->ep_ringaddr | devep->ep_ccs))); 1650 } 1651 } 1652 1653 /* 1654 * Outstanding transfer still in progress (device NAK'd earlier) so retry 1655 * the transfer again to see if it succeeds. 1656 */ 1657 static int 1658 pci_xhci_try_usb_xfer(struct pci_xhci_softc *sc, 1659 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep, 1660 struct xhci_endp_ctx *ep_ctx, uint32_t slot, uint32_t epid) 1661 { 1662 struct usb_data_xfer *xfer; 1663 int err; 1664 int do_intr; 1665 1666 ep_ctx->dwEpCtx0 = FIELD_REPLACE( 1667 ep_ctx->dwEpCtx0, XHCI_ST_EPCTX_RUNNING, 0x7, 0); 1668 1669 err = 0; 1670 do_intr = 0; 1671 1672 xfer = devep->ep_xfer; 1673 USB_DATA_XFER_LOCK(xfer); 1674 1675 /* outstanding requests queued up */ 1676 if (dev->dev_ue->ue_data != NULL) { 1677 err = dev->dev_ue->ue_data(dev->dev_sc, xfer, 1678 epid & 0x1 ? USB_XFER_IN : USB_XFER_OUT, epid/2); 1679 if (err == USB_ERR_CANCELLED) { 1680 if (USB_DATA_GET_ERRCODE(&xfer->data[xfer->head]) == 1681 USB_NAK) 1682 err = XHCI_TRB_ERROR_SUCCESS; 1683 } else { 1684 err = pci_xhci_xfer_complete(sc, xfer, slot, epid, 1685 &do_intr); 1686 if (err == XHCI_TRB_ERROR_SUCCESS && do_intr) { 1687 pci_xhci_assert_interrupt(sc); 1688 } 1689 1690 1691 /* XXX should not do it if error? */ 1692 USB_DATA_XFER_RESET(xfer); 1693 } 1694 } 1695 1696 USB_DATA_XFER_UNLOCK(xfer); 1697 1698 1699 return (err); 1700 } 1701 1702 1703 static int 1704 pci_xhci_handle_transfer(struct pci_xhci_softc *sc, 1705 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep, 1706 struct xhci_endp_ctx *ep_ctx, struct xhci_trb *trb, uint32_t slot, 1707 uint32_t epid, uint64_t addr, uint32_t ccs, uint32_t streamid) 1708 { 1709 struct xhci_trb *setup_trb; 1710 struct usb_data_xfer *xfer; 1711 struct usb_data_xfer_block *xfer_block; 1712 uint64_t val; 1713 uint32_t trbflags; 1714 int do_intr, err; 1715 int do_retry; 1716 1717 ep_ctx->dwEpCtx0 = FIELD_REPLACE(ep_ctx->dwEpCtx0, 1718 XHCI_ST_EPCTX_RUNNING, 0x7, 0); 1719 1720 xfer = devep->ep_xfer; 1721 USB_DATA_XFER_LOCK(xfer); 1722 1723 DPRINTF(("pci_xhci handle_transfer slot %u", slot)); 1724 1725 retry: 1726 err = 0; 1727 do_retry = 0; 1728 do_intr = 0; 1729 setup_trb = NULL; 1730 1731 while (1) { 1732 pci_xhci_dump_trb(trb); 1733 1734 trbflags = trb->dwTrb3; 1735 1736 if (XHCI_TRB_3_TYPE_GET(trbflags) != XHCI_TRB_TYPE_LINK && 1737 (trbflags & XHCI_TRB_3_CYCLE_BIT) != 1738 (ccs & XHCI_TRB_3_CYCLE_BIT)) { 1739 DPRINTF(("Cycle-bit changed trbflags %x, ccs %x", 1740 trbflags & XHCI_TRB_3_CYCLE_BIT, ccs)); 1741 break; 1742 } 1743 1744 xfer_block = NULL; 1745 1746 switch (XHCI_TRB_3_TYPE_GET(trbflags)) { 1747 case XHCI_TRB_TYPE_LINK: 1748 if (trb->dwTrb3 & XHCI_TRB_3_TC_BIT) 1749 ccs ^= 0x1; 1750 1751 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1752 (void *)addr, ccs); 1753 xfer_block->processed = 1; 1754 break; 1755 1756 case XHCI_TRB_TYPE_SETUP_STAGE: 1757 if ((trbflags & XHCI_TRB_3_IDT_BIT) == 0 || 1758 XHCI_TRB_2_BYTES_GET(trb->dwTrb2) != 8) { 1759 DPRINTF(("pci_xhci: invalid setup trb")); 1760 err = XHCI_TRB_ERROR_TRB; 1761 goto errout; 1762 } 1763 setup_trb = trb; 1764 1765 val = trb->qwTrb0; 1766 if (!xfer->ureq) 1767 xfer->ureq = malloc( 1768 sizeof(struct usb_device_request)); 1769 memcpy(xfer->ureq, &val, 1770 sizeof(struct usb_device_request)); 1771 1772 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1773 (void *)addr, ccs); 1774 xfer_block->processed = 1; 1775 break; 1776 1777 case XHCI_TRB_TYPE_NORMAL: 1778 case XHCI_TRB_TYPE_ISOCH: 1779 if (setup_trb != NULL) { 1780 DPRINTF(("pci_xhci: trb not supposed to be in " 1781 "ctl scope")); 1782 err = XHCI_TRB_ERROR_TRB; 1783 goto errout; 1784 } 1785 /* fall through */ 1786 1787 case XHCI_TRB_TYPE_DATA_STAGE: 1788 xfer_block = usb_data_xfer_append(xfer, 1789 (void *)(trbflags & XHCI_TRB_3_IDT_BIT ? 1790 &trb->qwTrb0 : XHCI_GADDR(sc, trb->qwTrb0)), 1791 trb->dwTrb2 & 0x1FFFF, (void *)addr, ccs); 1792 break; 1793 1794 case XHCI_TRB_TYPE_STATUS_STAGE: 1795 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1796 (void *)addr, ccs); 1797 break; 1798 1799 case XHCI_TRB_TYPE_NOOP: 1800 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1801 (void *)addr, ccs); 1802 xfer_block->processed = 1; 1803 break; 1804 1805 case XHCI_TRB_TYPE_EVENT_DATA: 1806 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1807 (void *)addr, ccs); 1808 if ((epid > 1) && (trbflags & XHCI_TRB_3_IOC_BIT)) { 1809 xfer_block->processed = 1; 1810 } 1811 break; 1812 1813 default: 1814 DPRINTF(("pci_xhci: handle xfer unexpected trb type " 1815 "0x%x", 1816 XHCI_TRB_3_TYPE_GET(trbflags))); 1817 err = XHCI_TRB_ERROR_TRB; 1818 goto errout; 1819 } 1820 1821 trb = pci_xhci_trb_next(sc, trb, &addr); 1822 1823 DPRINTF(("pci_xhci: next trb: 0x%lx", (uint64_t)trb)); 1824 1825 if (xfer_block) { 1826 xfer_block->trbnext = addr; 1827 xfer_block->streamid = streamid; 1828 } 1829 1830 if (!setup_trb && !(trbflags & XHCI_TRB_3_CHAIN_BIT) && 1831 XHCI_TRB_3_TYPE_GET(trbflags) != XHCI_TRB_TYPE_LINK) { 1832 break; 1833 } 1834 1835 /* handle current batch that requires interrupt on complete */ 1836 if (trbflags & XHCI_TRB_3_IOC_BIT) { 1837 DPRINTF(("pci_xhci: trb IOC bit set")); 1838 if (epid == 1) 1839 do_retry = 1; 1840 break; 1841 } 1842 } 1843 1844 DPRINTF(("pci_xhci[%d]: xfer->ndata %u", __LINE__, xfer->ndata)); 1845 1846 if (epid == 1) { 1847 err = USB_ERR_NOT_STARTED; 1848 if (dev->dev_ue->ue_request != NULL) 1849 err = dev->dev_ue->ue_request(dev->dev_sc, xfer); 1850 setup_trb = NULL; 1851 } else { 1852 /* handle data transfer */ 1853 pci_xhci_try_usb_xfer(sc, dev, devep, ep_ctx, slot, epid); 1854 err = XHCI_TRB_ERROR_SUCCESS; 1855 goto errout; 1856 } 1857 1858 err = USB_TO_XHCI_ERR(err); 1859 if ((err == XHCI_TRB_ERROR_SUCCESS) || 1860 (err == XHCI_TRB_ERROR_SHORT_PKT)) { 1861 err = pci_xhci_xfer_complete(sc, xfer, slot, epid, &do_intr); 1862 if (err != XHCI_TRB_ERROR_SUCCESS) 1863 do_retry = 0; 1864 } 1865 1866 errout: 1867 if (err == XHCI_TRB_ERROR_EV_RING_FULL) 1868 DPRINTF(("pci_xhci[%d]: event ring full", __LINE__)); 1869 1870 if (!do_retry) 1871 USB_DATA_XFER_UNLOCK(xfer); 1872 1873 if (do_intr) 1874 pci_xhci_assert_interrupt(sc); 1875 1876 if (do_retry) { 1877 USB_DATA_XFER_RESET(xfer); 1878 DPRINTF(("pci_xhci[%d]: retry:continuing with next TRBs", 1879 __LINE__)); 1880 goto retry; 1881 } 1882 1883 if (epid == 1) 1884 USB_DATA_XFER_RESET(xfer); 1885 1886 return (err); 1887 } 1888 1889 static void 1890 pci_xhci_device_doorbell(struct pci_xhci_softc *sc, uint32_t slot, 1891 uint32_t epid, uint32_t streamid) 1892 { 1893 struct pci_xhci_dev_emu *dev; 1894 struct pci_xhci_dev_ep *devep; 1895 struct xhci_dev_ctx *dev_ctx; 1896 struct xhci_endp_ctx *ep_ctx; 1897 struct pci_xhci_trb_ring *sctx_tr; 1898 struct xhci_trb *trb; 1899 uint64_t ringaddr; 1900 uint32_t ccs; 1901 1902 DPRINTF(("pci_xhci doorbell slot %u epid %u stream %u", 1903 slot, epid, streamid)); 1904 1905 if (slot == 0 || slot > sc->ndevices) { 1906 DPRINTF(("pci_xhci: invalid doorbell slot %u", slot)); 1907 return; 1908 } 1909 1910 if (epid == 0 || epid >= XHCI_MAX_ENDPOINTS) { 1911 DPRINTF(("pci_xhci: invalid endpoint %u", epid)); 1912 return; 1913 } 1914 1915 dev = XHCI_SLOTDEV_PTR(sc, slot); 1916 devep = &dev->eps[epid]; 1917 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 1918 if (!dev_ctx) { 1919 return; 1920 } 1921 ep_ctx = &dev_ctx->ctx_ep[epid]; 1922 1923 sctx_tr = NULL; 1924 1925 DPRINTF(("pci_xhci: device doorbell ep[%u] %08x %08x %016lx %08x", 1926 epid, ep_ctx->dwEpCtx0, ep_ctx->dwEpCtx1, ep_ctx->qwEpCtx2, 1927 ep_ctx->dwEpCtx4)); 1928 1929 if (ep_ctx->qwEpCtx2 == 0) 1930 return; 1931 1932 /* handle pending transfers */ 1933 if (devep->ep_xfer->ndata > 0) { 1934 pci_xhci_try_usb_xfer(sc, dev, devep, ep_ctx, slot, epid); 1935 return; 1936 } 1937 1938 /* get next trb work item */ 1939 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) != 0) { 1940 struct xhci_stream_ctx *sctx; 1941 1942 /* 1943 * Stream IDs of 0, 65535 (any stream), and 65534 1944 * (prime) are invalid. 1945 */ 1946 if (streamid == 0 || streamid == 65534 || streamid == 65535) { 1947 DPRINTF(("pci_xhci: invalid stream %u", streamid)); 1948 return; 1949 } 1950 1951 sctx = NULL; 1952 pci_xhci_find_stream(sc, ep_ctx, streamid, &sctx); 1953 if (sctx == NULL) { 1954 DPRINTF(("pci_xhci: invalid stream %u", streamid)); 1955 return; 1956 } 1957 sctx_tr = &devep->ep_sctx_trbs[streamid]; 1958 ringaddr = sctx_tr->ringaddr; 1959 ccs = sctx_tr->ccs; 1960 trb = XHCI_GADDR(sc, sctx_tr->ringaddr & ~0xFUL); 1961 DPRINTF(("doorbell, stream %u, ccs %lx, trb ccs %x", 1962 streamid, ep_ctx->qwEpCtx2 & XHCI_TRB_3_CYCLE_BIT, 1963 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT)); 1964 } else { 1965 if (streamid != 0) { 1966 DPRINTF(("pci_xhci: invalid stream %u", streamid)); 1967 return; 1968 } 1969 ringaddr = devep->ep_ringaddr; 1970 ccs = devep->ep_ccs; 1971 trb = devep->ep_tr; 1972 DPRINTF(("doorbell, ccs %lx, trb ccs %x", 1973 ep_ctx->qwEpCtx2 & XHCI_TRB_3_CYCLE_BIT, 1974 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT)); 1975 } 1976 1977 if (XHCI_TRB_3_TYPE_GET(trb->dwTrb3) == 0) { 1978 DPRINTF(("pci_xhci: ring %lx trb[%lx] EP %u is RESERVED?", 1979 ep_ctx->qwEpCtx2, devep->ep_ringaddr, epid)); 1980 return; 1981 } 1982 1983 pci_xhci_handle_transfer(sc, dev, devep, ep_ctx, trb, slot, epid, 1984 ringaddr, ccs, streamid); 1985 } 1986 1987 static void 1988 pci_xhci_dbregs_write(struct pci_xhci_softc *sc, uint64_t offset, 1989 uint64_t value) 1990 { 1991 1992 offset = (offset - sc->dboff) / sizeof(uint32_t); 1993 1994 DPRINTF(("pci_xhci: doorbell write offset 0x%lx: 0x%lx", 1995 offset, value)); 1996 1997 if (XHCI_HALTED(sc)) { 1998 DPRINTF(("pci_xhci: controller halted")); 1999 return; 2000 } 2001 2002 if (offset == 0) 2003 pci_xhci_complete_commands(sc); 2004 else if (sc->portregs != NULL) 2005 pci_xhci_device_doorbell(sc, offset, 2006 XHCI_DB_TARGET_GET(value), XHCI_DB_SID_GET(value)); 2007 } 2008 2009 static void 2010 pci_xhci_rtsregs_write(struct pci_xhci_softc *sc, uint64_t offset, 2011 uint64_t value) 2012 { 2013 struct pci_xhci_rtsregs *rts; 2014 2015 offset -= sc->rtsoff; 2016 2017 if (offset == 0) { 2018 DPRINTF(("pci_xhci attempted write to MFINDEX")); 2019 return; 2020 } 2021 2022 DPRINTF(("pci_xhci: runtime regs write offset 0x%lx: 0x%lx", 2023 offset, value)); 2024 2025 offset -= 0x20; /* start of intrreg */ 2026 2027 rts = &sc->rtsregs; 2028 2029 switch (offset) { 2030 case 0x00: 2031 if (value & XHCI_IMAN_INTR_PEND) 2032 rts->intrreg.iman &= ~XHCI_IMAN_INTR_PEND; 2033 rts->intrreg.iman = (value & XHCI_IMAN_INTR_ENA) | 2034 (rts->intrreg.iman & XHCI_IMAN_INTR_PEND); 2035 2036 if (!(value & XHCI_IMAN_INTR_ENA)) 2037 pci_xhci_deassert_interrupt(sc); 2038 2039 break; 2040 2041 case 0x04: 2042 rts->intrreg.imod = value; 2043 break; 2044 2045 case 0x08: 2046 rts->intrreg.erstsz = value & 0xFFFF; 2047 break; 2048 2049 case 0x10: 2050 /* ERSTBA low bits */ 2051 rts->intrreg.erstba = MASK_64_HI(sc->rtsregs.intrreg.erstba) | 2052 (value & ~0x3F); 2053 break; 2054 2055 case 0x14: 2056 /* ERSTBA high bits */ 2057 rts->intrreg.erstba = (value << 32) | 2058 MASK_64_LO(sc->rtsregs.intrreg.erstba); 2059 2060 rts->erstba_p = XHCI_GADDR(sc, 2061 sc->rtsregs.intrreg.erstba & ~0x3FUL); 2062 2063 rts->erst_p = XHCI_GADDR(sc, 2064 sc->rtsregs.erstba_p->qwEvrsTablePtr & ~0x3FUL); 2065 2066 rts->er_enq_idx = 0; 2067 rts->er_events_cnt = 0; 2068 2069 DPRINTF(("pci_xhci: wr erstba erst (%p) ptr 0x%lx, sz %u", 2070 rts->erstba_p, 2071 rts->erstba_p->qwEvrsTablePtr, 2072 rts->erstba_p->dwEvrsTableSize)); 2073 break; 2074 2075 case 0x18: 2076 /* ERDP low bits */ 2077 rts->intrreg.erdp = 2078 MASK_64_HI(sc->rtsregs.intrreg.erdp) | 2079 (rts->intrreg.erdp & XHCI_ERDP_LO_BUSY) | 2080 (value & ~0xF); 2081 if (value & XHCI_ERDP_LO_BUSY) { 2082 rts->intrreg.erdp &= ~XHCI_ERDP_LO_BUSY; 2083 rts->intrreg.iman &= ~XHCI_IMAN_INTR_PEND; 2084 } 2085 2086 rts->er_deq_seg = XHCI_ERDP_LO_SINDEX(value); 2087 2088 break; 2089 2090 case 0x1C: 2091 /* ERDP high bits */ 2092 rts->intrreg.erdp = (value << 32) | 2093 MASK_64_LO(sc->rtsregs.intrreg.erdp); 2094 2095 if (rts->er_events_cnt > 0) { 2096 uint64_t erdp; 2097 uint32_t erdp_i; 2098 2099 erdp = rts->intrreg.erdp & ~0xF; 2100 erdp_i = (erdp - rts->erstba_p->qwEvrsTablePtr) / 2101 sizeof(struct xhci_trb); 2102 2103 if (erdp_i <= rts->er_enq_idx) 2104 rts->er_events_cnt = rts->er_enq_idx - erdp_i; 2105 else 2106 rts->er_events_cnt = 2107 rts->erstba_p->dwEvrsTableSize - 2108 (erdp_i - rts->er_enq_idx); 2109 2110 DPRINTF(("pci_xhci: erdp 0x%lx, events cnt %u", 2111 erdp, rts->er_events_cnt)); 2112 } 2113 2114 break; 2115 2116 default: 2117 DPRINTF(("pci_xhci attempted write to RTS offset 0x%lx", 2118 offset)); 2119 break; 2120 } 2121 } 2122 2123 static uint64_t 2124 pci_xhci_portregs_read(struct pci_xhci_softc *sc, uint64_t offset) 2125 { 2126 int port; 2127 uint32_t *p; 2128 2129 if (sc->portregs == NULL) 2130 return (0); 2131 2132 port = (offset - 0x3F0) / 0x10; 2133 2134 if (port > XHCI_MAX_DEVS) { 2135 DPRINTF(("pci_xhci: portregs_read port %d >= XHCI_MAX_DEVS", 2136 port)); 2137 2138 /* return default value for unused port */ 2139 return (XHCI_PS_SPEED_SET(3)); 2140 } 2141 2142 offset = (offset - 0x3F0) % 0x10; 2143 2144 p = &sc->portregs[port].portsc; 2145 p += offset / sizeof(uint32_t); 2146 2147 DPRINTF(("pci_xhci: portregs read offset 0x%lx port %u -> 0x%x", 2148 offset, port, *p)); 2149 2150 return (*p); 2151 } 2152 2153 static void 2154 pci_xhci_hostop_write(struct pci_xhci_softc *sc, uint64_t offset, 2155 uint64_t value) 2156 { 2157 offset -= XHCI_CAPLEN; 2158 2159 if (offset < 0x400) 2160 DPRINTF(("pci_xhci: hostop write offset 0x%lx: 0x%lx", 2161 offset, value)); 2162 2163 switch (offset) { 2164 case XHCI_USBCMD: 2165 sc->opregs.usbcmd = pci_xhci_usbcmd_write(sc, value & 0x3F0F); 2166 break; 2167 2168 case XHCI_USBSTS: 2169 /* clear bits on write */ 2170 sc->opregs.usbsts &= ~(value & 2171 (XHCI_STS_HSE|XHCI_STS_EINT|XHCI_STS_PCD|XHCI_STS_SSS| 2172 XHCI_STS_RSS|XHCI_STS_SRE|XHCI_STS_CNR)); 2173 break; 2174 2175 case XHCI_PAGESIZE: 2176 /* read only */ 2177 break; 2178 2179 case XHCI_DNCTRL: 2180 sc->opregs.dnctrl = value & 0xFFFF; 2181 break; 2182 2183 case XHCI_CRCR_LO: 2184 if (sc->opregs.crcr & XHCI_CRCR_LO_CRR) { 2185 sc->opregs.crcr &= ~(XHCI_CRCR_LO_CS|XHCI_CRCR_LO_CA); 2186 sc->opregs.crcr |= value & 2187 (XHCI_CRCR_LO_CS|XHCI_CRCR_LO_CA); 2188 } else { 2189 sc->opregs.crcr = MASK_64_HI(sc->opregs.crcr) | 2190 (value & (0xFFFFFFC0 | XHCI_CRCR_LO_RCS)); 2191 } 2192 break; 2193 2194 case XHCI_CRCR_HI: 2195 if (!(sc->opregs.crcr & XHCI_CRCR_LO_CRR)) { 2196 sc->opregs.crcr = MASK_64_LO(sc->opregs.crcr) | 2197 (value << 32); 2198 2199 sc->opregs.cr_p = XHCI_GADDR(sc, 2200 sc->opregs.crcr & ~0xF); 2201 } 2202 2203 if (sc->opregs.crcr & XHCI_CRCR_LO_CS) { 2204 /* Stop operation of Command Ring */ 2205 } 2206 2207 if (sc->opregs.crcr & XHCI_CRCR_LO_CA) { 2208 /* Abort command */ 2209 } 2210 2211 break; 2212 2213 case XHCI_DCBAAP_LO: 2214 sc->opregs.dcbaap = MASK_64_HI(sc->opregs.dcbaap) | 2215 (value & 0xFFFFFFC0); 2216 break; 2217 2218 case XHCI_DCBAAP_HI: 2219 sc->opregs.dcbaap = MASK_64_LO(sc->opregs.dcbaap) | 2220 (value << 32); 2221 sc->opregs.dcbaa_p = XHCI_GADDR(sc, sc->opregs.dcbaap & ~0x3FUL); 2222 2223 DPRINTF(("pci_xhci: opregs dcbaap = 0x%lx (vaddr 0x%lx)", 2224 sc->opregs.dcbaap, (uint64_t)sc->opregs.dcbaa_p)); 2225 break; 2226 2227 case XHCI_CONFIG: 2228 sc->opregs.config = value & 0x03FF; 2229 break; 2230 2231 default: 2232 if (offset >= 0x400) 2233 pci_xhci_portregs_write(sc, offset, value); 2234 2235 break; 2236 } 2237 } 2238 2239 2240 static void 2241 pci_xhci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, 2242 int baridx, uint64_t offset, int size, uint64_t value) 2243 { 2244 struct pci_xhci_softc *sc; 2245 2246 sc = pi->pi_arg; 2247 2248 assert(baridx == 0); 2249 2250 2251 pthread_mutex_lock(&sc->mtx); 2252 if (offset < XHCI_CAPLEN) /* read only registers */ 2253 WPRINTF(("pci_xhci: write RO-CAPs offset %ld", offset)); 2254 else if (offset < sc->dboff) 2255 pci_xhci_hostop_write(sc, offset, value); 2256 else if (offset < sc->rtsoff) 2257 pci_xhci_dbregs_write(sc, offset, value); 2258 else if (offset < sc->regsend) 2259 pci_xhci_rtsregs_write(sc, offset, value); 2260 else 2261 WPRINTF(("pci_xhci: write invalid offset %ld", offset)); 2262 2263 pthread_mutex_unlock(&sc->mtx); 2264 } 2265 2266 static uint64_t 2267 pci_xhci_hostcap_read(struct pci_xhci_softc *sc, uint64_t offset) 2268 { 2269 uint64_t value; 2270 2271 switch (offset) { 2272 case XHCI_CAPLENGTH: /* 0x00 */ 2273 value = sc->caplength; 2274 break; 2275 2276 case XHCI_HCSPARAMS1: /* 0x04 */ 2277 value = sc->hcsparams1; 2278 break; 2279 2280 case XHCI_HCSPARAMS2: /* 0x08 */ 2281 value = sc->hcsparams2; 2282 break; 2283 2284 case XHCI_HCSPARAMS3: /* 0x0C */ 2285 value = sc->hcsparams3; 2286 break; 2287 2288 case XHCI_HCSPARAMS0: /* 0x10 */ 2289 value = sc->hccparams1; 2290 break; 2291 2292 case XHCI_DBOFF: /* 0x14 */ 2293 value = sc->dboff; 2294 break; 2295 2296 case XHCI_RTSOFF: /* 0x18 */ 2297 value = sc->rtsoff; 2298 break; 2299 2300 case XHCI_HCCPRAMS2: /* 0x1C */ 2301 value = sc->hccparams2; 2302 break; 2303 2304 default: 2305 value = 0; 2306 break; 2307 } 2308 2309 DPRINTF(("pci_xhci: hostcap read offset 0x%lx -> 0x%lx", 2310 offset, value)); 2311 2312 return (value); 2313 } 2314 2315 static uint64_t 2316 pci_xhci_hostop_read(struct pci_xhci_softc *sc, uint64_t offset) 2317 { 2318 uint64_t value; 2319 2320 offset = (offset - XHCI_CAPLEN); 2321 2322 switch (offset) { 2323 case XHCI_USBCMD: /* 0x00 */ 2324 value = sc->opregs.usbcmd; 2325 break; 2326 2327 case XHCI_USBSTS: /* 0x04 */ 2328 value = sc->opregs.usbsts; 2329 break; 2330 2331 case XHCI_PAGESIZE: /* 0x08 */ 2332 value = sc->opregs.pgsz; 2333 break; 2334 2335 case XHCI_DNCTRL: /* 0x14 */ 2336 value = sc->opregs.dnctrl; 2337 break; 2338 2339 case XHCI_CRCR_LO: /* 0x18 */ 2340 value = sc->opregs.crcr & XHCI_CRCR_LO_CRR; 2341 break; 2342 2343 case XHCI_CRCR_HI: /* 0x1C */ 2344 value = 0; 2345 break; 2346 2347 case XHCI_DCBAAP_LO: /* 0x30 */ 2348 value = sc->opregs.dcbaap & 0xFFFFFFFF; 2349 break; 2350 2351 case XHCI_DCBAAP_HI: /* 0x34 */ 2352 value = (sc->opregs.dcbaap >> 32) & 0xFFFFFFFF; 2353 break; 2354 2355 case XHCI_CONFIG: /* 0x38 */ 2356 value = sc->opregs.config; 2357 break; 2358 2359 default: 2360 if (offset >= 0x400) 2361 value = pci_xhci_portregs_read(sc, offset); 2362 else 2363 value = 0; 2364 2365 break; 2366 } 2367 2368 if (offset < 0x400) 2369 DPRINTF(("pci_xhci: hostop read offset 0x%lx -> 0x%lx", 2370 offset, value)); 2371 2372 return (value); 2373 } 2374 2375 static uint64_t 2376 pci_xhci_dbregs_read(struct pci_xhci_softc *sc, uint64_t offset) 2377 { 2378 2379 /* read doorbell always returns 0 */ 2380 return (0); 2381 } 2382 2383 static uint64_t 2384 pci_xhci_rtsregs_read(struct pci_xhci_softc *sc, uint64_t offset) 2385 { 2386 uint32_t value; 2387 2388 offset -= sc->rtsoff; 2389 value = 0; 2390 2391 if (offset == XHCI_MFINDEX) { 2392 value = sc->rtsregs.mfindex; 2393 } else if (offset >= 0x20) { 2394 int item; 2395 uint32_t *p; 2396 2397 offset -= 0x20; 2398 item = offset % 32; 2399 2400 assert(offset < sizeof(sc->rtsregs.intrreg)); 2401 2402 p = &sc->rtsregs.intrreg.iman; 2403 p += item / sizeof(uint32_t); 2404 value = *p; 2405 } 2406 2407 DPRINTF(("pci_xhci: rtsregs read offset 0x%lx -> 0x%x", 2408 offset, value)); 2409 2410 return (value); 2411 } 2412 2413 static uint64_t 2414 pci_xhci_xecp_read(struct pci_xhci_softc *sc, uint64_t offset) 2415 { 2416 uint32_t value; 2417 2418 offset -= sc->regsend; 2419 value = 0; 2420 2421 switch (offset) { 2422 case 0: 2423 /* rev major | rev minor | next-cap | cap-id */ 2424 value = (0x02 << 24) | (4 << 8) | XHCI_ID_PROTOCOLS; 2425 break; 2426 case 4: 2427 /* name string = "USB" */ 2428 value = 0x20425355; 2429 break; 2430 case 8: 2431 /* psic | proto-defined | compat # | compat offset */ 2432 value = ((XHCI_MAX_DEVS/2) << 8) | sc->usb2_port_start; 2433 break; 2434 case 12: 2435 break; 2436 case 16: 2437 /* rev major | rev minor | next-cap | cap-id */ 2438 value = (0x03 << 24) | XHCI_ID_PROTOCOLS; 2439 break; 2440 case 20: 2441 /* name string = "USB" */ 2442 value = 0x20425355; 2443 break; 2444 case 24: 2445 /* psic | proto-defined | compat # | compat offset */ 2446 value = ((XHCI_MAX_DEVS/2) << 8) | sc->usb3_port_start; 2447 break; 2448 case 28: 2449 break; 2450 default: 2451 DPRINTF(("pci_xhci: xecp invalid offset 0x%lx", offset)); 2452 break; 2453 } 2454 2455 DPRINTF(("pci_xhci: xecp read offset 0x%lx -> 0x%x", 2456 offset, value)); 2457 2458 return (value); 2459 } 2460 2461 2462 static uint64_t 2463 pci_xhci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx, 2464 uint64_t offset, int size) 2465 { 2466 struct pci_xhci_softc *sc; 2467 uint32_t value; 2468 2469 sc = pi->pi_arg; 2470 2471 assert(baridx == 0); 2472 2473 pthread_mutex_lock(&sc->mtx); 2474 if (offset < XHCI_CAPLEN) 2475 value = pci_xhci_hostcap_read(sc, offset); 2476 else if (offset < sc->dboff) 2477 value = pci_xhci_hostop_read(sc, offset); 2478 else if (offset < sc->rtsoff) 2479 value = pci_xhci_dbregs_read(sc, offset); 2480 else if (offset < sc->regsend) 2481 value = pci_xhci_rtsregs_read(sc, offset); 2482 else if (offset < (sc->regsend + 4*32)) 2483 value = pci_xhci_xecp_read(sc, offset); 2484 else { 2485 value = 0; 2486 WPRINTF(("pci_xhci: read invalid offset %ld", offset)); 2487 } 2488 2489 pthread_mutex_unlock(&sc->mtx); 2490 2491 switch (size) { 2492 case 1: 2493 value &= 0xFF; 2494 break; 2495 case 2: 2496 value &= 0xFFFF; 2497 break; 2498 case 4: 2499 value &= 0xFFFFFFFF; 2500 break; 2501 } 2502 2503 return (value); 2504 } 2505 2506 static void 2507 pci_xhci_reset_port(struct pci_xhci_softc *sc, int portn, int warm) 2508 { 2509 struct pci_xhci_portregs *port; 2510 struct pci_xhci_dev_emu *dev; 2511 struct xhci_trb evtrb; 2512 int error; 2513 2514 assert(portn <= XHCI_MAX_DEVS); 2515 2516 DPRINTF(("xhci reset port %d", portn)); 2517 2518 port = XHCI_PORTREG_PTR(sc, portn); 2519 dev = XHCI_DEVINST_PTR(sc, portn); 2520 if (dev) { 2521 port->portsc &= ~(XHCI_PS_PLS_MASK | XHCI_PS_PR | XHCI_PS_PRC); 2522 port->portsc |= XHCI_PS_PED | 2523 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed); 2524 2525 if (warm && dev->dev_ue->ue_usbver == 3) { 2526 port->portsc |= XHCI_PS_WRC; 2527 } 2528 2529 if ((port->portsc & XHCI_PS_PRC) == 0) { 2530 port->portsc |= XHCI_PS_PRC; 2531 2532 pci_xhci_set_evtrb(&evtrb, portn, 2533 XHCI_TRB_ERROR_SUCCESS, 2534 XHCI_TRB_EVENT_PORT_STS_CHANGE); 2535 error = pci_xhci_insert_event(sc, &evtrb, 1); 2536 if (error != XHCI_TRB_ERROR_SUCCESS) 2537 DPRINTF(("xhci reset port insert event " 2538 "failed")); 2539 } 2540 } 2541 } 2542 2543 static void 2544 pci_xhci_init_port(struct pci_xhci_softc *sc, int portn) 2545 { 2546 struct pci_xhci_portregs *port; 2547 struct pci_xhci_dev_emu *dev; 2548 2549 port = XHCI_PORTREG_PTR(sc, portn); 2550 dev = XHCI_DEVINST_PTR(sc, portn); 2551 if (dev) { 2552 port->portsc = XHCI_PS_CCS | /* connected */ 2553 XHCI_PS_PP; /* port power */ 2554 2555 if (dev->dev_ue->ue_usbver == 2) { 2556 port->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_POLL) | 2557 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed); 2558 } else { 2559 port->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_U0) | 2560 XHCI_PS_PED | /* enabled */ 2561 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed); 2562 } 2563 2564 DPRINTF(("Init port %d 0x%x", portn, port->portsc)); 2565 } else { 2566 port->portsc = XHCI_PS_PLS_SET(UPS_PORT_LS_RX_DET) | XHCI_PS_PP; 2567 DPRINTF(("Init empty port %d 0x%x", portn, port->portsc)); 2568 } 2569 } 2570 2571 static int 2572 pci_xhci_dev_intr(struct usb_hci *hci, int epctx) 2573 { 2574 struct pci_xhci_dev_emu *dev; 2575 struct xhci_dev_ctx *dev_ctx; 2576 struct xhci_trb evtrb; 2577 struct pci_xhci_softc *sc; 2578 struct pci_xhci_portregs *p; 2579 struct xhci_endp_ctx *ep_ctx; 2580 int error = 0; 2581 int dir_in; 2582 int epid; 2583 2584 dir_in = epctx & 0x80; 2585 epid = epctx & ~0x80; 2586 2587 /* HW endpoint contexts are 0-15; convert to epid based on dir */ 2588 epid = (epid * 2) + (dir_in ? 1 : 0); 2589 2590 assert(epid >= 1 && epid <= 31); 2591 2592 dev = hci->hci_sc; 2593 sc = dev->xsc; 2594 2595 /* check if device is ready; OS has to initialise it */ 2596 if (sc->rtsregs.erstba_p == NULL || 2597 (sc->opregs.usbcmd & XHCI_CMD_RS) == 0 || 2598 dev->dev_ctx == NULL) 2599 return (0); 2600 2601 p = XHCI_PORTREG_PTR(sc, hci->hci_port); 2602 2603 /* raise event if link U3 (suspended) state */ 2604 if (XHCI_PS_PLS_GET(p->portsc) == 3) { 2605 p->portsc &= ~XHCI_PS_PLS_MASK; 2606 p->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_RESUME); 2607 if ((p->portsc & XHCI_PS_PLC) != 0) 2608 return (0); 2609 2610 p->portsc |= XHCI_PS_PLC; 2611 2612 pci_xhci_set_evtrb(&evtrb, hci->hci_port, 2613 XHCI_TRB_ERROR_SUCCESS, XHCI_TRB_EVENT_PORT_STS_CHANGE); 2614 error = pci_xhci_insert_event(sc, &evtrb, 0); 2615 if (error != XHCI_TRB_ERROR_SUCCESS) 2616 goto done; 2617 } 2618 2619 dev_ctx = dev->dev_ctx; 2620 ep_ctx = &dev_ctx->ctx_ep[epid]; 2621 if ((ep_ctx->dwEpCtx0 & 0x7) == XHCI_ST_EPCTX_DISABLED) { 2622 DPRINTF(("xhci device interrupt on disabled endpoint %d", 2623 epid)); 2624 return (0); 2625 } 2626 2627 DPRINTF(("xhci device interrupt on endpoint %d", epid)); 2628 2629 pci_xhci_device_doorbell(sc, hci->hci_port, epid, 0); 2630 2631 done: 2632 return (error); 2633 } 2634 2635 static int 2636 pci_xhci_dev_event(struct usb_hci *hci, enum hci_usbev evid, void *param) 2637 { 2638 2639 DPRINTF(("xhci device event port %d", hci->hci_port)); 2640 return (0); 2641 } 2642 2643 2644 2645 static void 2646 pci_xhci_device_usage(char *opt) 2647 { 2648 2649 EPRINTLN("Invalid USB emulation \"%s\"", opt); 2650 } 2651 2652 static int 2653 pci_xhci_parse_opts(struct pci_xhci_softc *sc, char *opts) 2654 { 2655 struct pci_xhci_dev_emu **devices; 2656 struct pci_xhci_dev_emu *dev; 2657 struct usb_devemu *ue; 2658 void *devsc; 2659 char *uopt, *xopts, *config; 2660 int usb3_port, usb2_port, i; 2661 2662 uopt = NULL; 2663 usb3_port = sc->usb3_port_start - 1; 2664 usb2_port = sc->usb2_port_start - 1; 2665 devices = NULL; 2666 2667 if (opts == NULL) 2668 goto portsfinal; 2669 2670 devices = calloc(XHCI_MAX_DEVS, sizeof(struct pci_xhci_dev_emu *)); 2671 2672 sc->slots = calloc(XHCI_MAX_SLOTS, sizeof(struct pci_xhci_dev_emu *)); 2673 sc->devices = devices; 2674 sc->ndevices = 0; 2675 2676 uopt = strdup(opts); 2677 for (xopts = strtok(uopt, ","); 2678 xopts != NULL; 2679 xopts = strtok(NULL, ",")) { 2680 if (usb2_port == ((sc->usb2_port_start-1) + XHCI_MAX_DEVS/2) || 2681 usb3_port == ((sc->usb3_port_start-1) + XHCI_MAX_DEVS/2)) { 2682 WPRINTF(("pci_xhci max number of USB 2 or 3 " 2683 "devices reached, max %d", XHCI_MAX_DEVS/2)); 2684 usb2_port = usb3_port = -1; 2685 goto done; 2686 } 2687 2688 /* device[=<config>] */ 2689 if ((config = strchr(xopts, '=')) == NULL) 2690 config = ""; /* no config */ 2691 else 2692 *config++ = '\0'; 2693 2694 ue = usb_emu_finddev(xopts); 2695 if (ue == NULL) { 2696 pci_xhci_device_usage(xopts); 2697 DPRINTF(("pci_xhci device not found %s", xopts)); 2698 usb2_port = usb3_port = -1; 2699 goto done; 2700 } 2701 2702 DPRINTF(("pci_xhci adding device %s, opts \"%s\"", 2703 xopts, config)); 2704 2705 dev = calloc(1, sizeof(struct pci_xhci_dev_emu)); 2706 dev->xsc = sc; 2707 dev->hci.hci_sc = dev; 2708 dev->hci.hci_intr = pci_xhci_dev_intr; 2709 dev->hci.hci_event = pci_xhci_dev_event; 2710 2711 if (ue->ue_usbver == 2) { 2712 dev->hci.hci_port = usb2_port + 1; 2713 devices[usb2_port] = dev; 2714 usb2_port++; 2715 } else { 2716 dev->hci.hci_port = usb3_port + 1; 2717 devices[usb3_port] = dev; 2718 usb3_port++; 2719 } 2720 2721 dev->hci.hci_address = 0; 2722 devsc = ue->ue_init(&dev->hci, config); 2723 if (devsc == NULL) { 2724 pci_xhci_device_usage(xopts); 2725 usb2_port = usb3_port = -1; 2726 goto done; 2727 } 2728 2729 dev->dev_ue = ue; 2730 dev->dev_sc = devsc; 2731 2732 /* assign slot number to device */ 2733 sc->slots[sc->ndevices] = dev; 2734 2735 sc->ndevices++; 2736 } 2737 2738 portsfinal: 2739 sc->portregs = calloc(XHCI_MAX_DEVS, sizeof(struct pci_xhci_portregs)); 2740 2741 if (sc->ndevices > 0) { 2742 /* port and slot numbering start from 1 */ 2743 sc->devices--; 2744 sc->portregs--; 2745 sc->slots--; 2746 2747 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 2748 pci_xhci_init_port(sc, i); 2749 } 2750 } else { 2751 WPRINTF(("pci_xhci no USB devices configured")); 2752 sc->ndevices = 1; 2753 } 2754 2755 done: 2756 if (devices != NULL) { 2757 if (usb2_port <= 0 && usb3_port <= 0) { 2758 sc->devices = NULL; 2759 for (i = 0; devices[i] != NULL; i++) 2760 free(devices[i]); 2761 sc->ndevices = -1; 2762 2763 free(devices); 2764 } 2765 } 2766 free(uopt); 2767 return (sc->ndevices); 2768 } 2769 2770 static int 2771 pci_xhci_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) 2772 { 2773 struct pci_xhci_softc *sc; 2774 int error; 2775 2776 if (xhci_in_use) { 2777 WPRINTF(("pci_xhci controller already defined")); 2778 return (-1); 2779 } 2780 xhci_in_use = 1; 2781 2782 sc = calloc(1, sizeof(struct pci_xhci_softc)); 2783 pi->pi_arg = sc; 2784 sc->xsc_pi = pi; 2785 2786 sc->usb2_port_start = (XHCI_MAX_DEVS/2) + 1; 2787 sc->usb3_port_start = 1; 2788 2789 /* discover devices */ 2790 error = pci_xhci_parse_opts(sc, opts); 2791 if (error < 0) 2792 goto done; 2793 else 2794 error = 0; 2795 2796 sc->caplength = XHCI_SET_CAPLEN(XHCI_CAPLEN) | 2797 XHCI_SET_HCIVERSION(0x0100); 2798 sc->hcsparams1 = XHCI_SET_HCSP1_MAXPORTS(XHCI_MAX_DEVS) | 2799 XHCI_SET_HCSP1_MAXINTR(1) | /* interrupters */ 2800 XHCI_SET_HCSP1_MAXSLOTS(XHCI_MAX_SLOTS); 2801 sc->hcsparams2 = XHCI_SET_HCSP2_ERSTMAX(XHCI_ERST_MAX) | 2802 XHCI_SET_HCSP2_IST(0x04); 2803 sc->hcsparams3 = 0; /* no latency */ 2804 sc->hccparams1 = XHCI_SET_HCCP1_NSS(1) | /* no 2nd-streams */ 2805 XHCI_SET_HCCP1_SPC(1) | /* short packet */ 2806 XHCI_SET_HCCP1_MAXPSA(XHCI_STREAMS_MAX); 2807 sc->hccparams2 = XHCI_SET_HCCP2_LEC(1) | 2808 XHCI_SET_HCCP2_U3C(1); 2809 sc->dboff = XHCI_SET_DOORBELL(XHCI_CAPLEN + XHCI_PORTREGS_START + 2810 XHCI_MAX_DEVS * sizeof(struct pci_xhci_portregs)); 2811 2812 /* dboff must be 32-bit aligned */ 2813 if (sc->dboff & 0x3) 2814 sc->dboff = (sc->dboff + 0x3) & ~0x3; 2815 2816 /* rtsoff must be 32-bytes aligned */ 2817 sc->rtsoff = XHCI_SET_RTSOFFSET(sc->dboff + (XHCI_MAX_SLOTS+1) * 32); 2818 if (sc->rtsoff & 0x1F) 2819 sc->rtsoff = (sc->rtsoff + 0x1F) & ~0x1F; 2820 2821 DPRINTF(("pci_xhci dboff: 0x%x, rtsoff: 0x%x", sc->dboff, 2822 sc->rtsoff)); 2823 2824 sc->opregs.usbsts = XHCI_STS_HCH; 2825 sc->opregs.pgsz = XHCI_PAGESIZE_4K; 2826 2827 pci_xhci_reset(sc); 2828 2829 sc->regsend = sc->rtsoff + 0x20 + 32; /* only 1 intrpter */ 2830 2831 /* 2832 * Set extended capabilities pointer to be after regsend; 2833 * value of xecp field is 32-bit offset. 2834 */ 2835 sc->hccparams1 |= XHCI_SET_HCCP1_XECP(sc->regsend/4); 2836 2837 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x1E31); 2838 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x8086); 2839 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_SERIALBUS); 2840 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_SERIALBUS_USB); 2841 pci_set_cfgdata8(pi, PCIR_PROGIF,PCIP_SERIALBUS_USB_XHCI); 2842 pci_set_cfgdata8(pi, PCI_USBREV, PCI_USB_REV_3_0); 2843 2844 pci_emul_add_msicap(pi, 1); 2845 2846 /* regsend + xecp registers */ 2847 pci_emul_alloc_bar(pi, 0, PCIBAR_MEM32, sc->regsend + 4*32); 2848 DPRINTF(("pci_xhci pci_emu_alloc: %d", sc->regsend + 4*32)); 2849 2850 2851 pci_lintr_request(pi); 2852 2853 pthread_mutex_init(&sc->mtx, NULL); 2854 2855 done: 2856 if (error) { 2857 free(sc); 2858 } 2859 2860 return (error); 2861 } 2862 2863 #ifdef BHYVE_SNAPSHOT 2864 static void 2865 pci_xhci_map_devs_slots(struct pci_xhci_softc *sc, int maps[]) 2866 { 2867 int i, j; 2868 struct pci_xhci_dev_emu *dev, *slot; 2869 2870 memset(maps, 0, sizeof(maps[0]) * XHCI_MAX_SLOTS); 2871 2872 for (i = 1; i <= XHCI_MAX_SLOTS; i++) { 2873 for (j = 1; j <= XHCI_MAX_DEVS; j++) { 2874 slot = XHCI_SLOTDEV_PTR(sc, i); 2875 dev = XHCI_DEVINST_PTR(sc, j); 2876 2877 if (slot == dev) 2878 maps[i] = j; 2879 } 2880 } 2881 } 2882 2883 static int 2884 pci_xhci_snapshot_ep(struct pci_xhci_softc *sc, struct pci_xhci_dev_emu *dev, 2885 int idx, struct vm_snapshot_meta *meta) 2886 { 2887 int k; 2888 int ret; 2889 struct usb_data_xfer *xfer; 2890 struct usb_data_xfer_block *xfer_block; 2891 2892 /* some sanity checks */ 2893 if (meta->op == VM_SNAPSHOT_SAVE) 2894 xfer = dev->eps[idx].ep_xfer; 2895 2896 SNAPSHOT_VAR_OR_LEAVE(xfer, meta, ret, done); 2897 if (xfer == NULL) { 2898 ret = 0; 2899 goto done; 2900 } 2901 2902 if (meta->op == VM_SNAPSHOT_RESTORE) { 2903 pci_xhci_init_ep(dev, idx); 2904 xfer = dev->eps[idx].ep_xfer; 2905 } 2906 2907 /* save / restore proper */ 2908 for (k = 0; k < USB_MAX_XFER_BLOCKS; k++) { 2909 xfer_block = &xfer->data[k]; 2910 2911 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(xfer_block->buf, 2912 XHCI_GADDR_SIZE(xfer_block->buf), true, meta, ret, 2913 done); 2914 SNAPSHOT_VAR_OR_LEAVE(xfer_block->blen, meta, ret, done); 2915 SNAPSHOT_VAR_OR_LEAVE(xfer_block->bdone, meta, ret, done); 2916 SNAPSHOT_VAR_OR_LEAVE(xfer_block->processed, meta, ret, done); 2917 SNAPSHOT_VAR_OR_LEAVE(xfer_block->hci_data, meta, ret, done); 2918 SNAPSHOT_VAR_OR_LEAVE(xfer_block->ccs, meta, ret, done); 2919 SNAPSHOT_VAR_OR_LEAVE(xfer_block->streamid, meta, ret, done); 2920 SNAPSHOT_VAR_OR_LEAVE(xfer_block->trbnext, meta, ret, done); 2921 } 2922 2923 SNAPSHOT_VAR_OR_LEAVE(xfer->ureq, meta, ret, done); 2924 if (xfer->ureq) { 2925 /* xfer->ureq is not allocated at restore time */ 2926 if (meta->op == VM_SNAPSHOT_RESTORE) 2927 xfer->ureq = malloc(sizeof(struct usb_device_request)); 2928 2929 SNAPSHOT_BUF_OR_LEAVE(xfer->ureq, 2930 sizeof(struct usb_device_request), 2931 meta, ret, done); 2932 } 2933 2934 SNAPSHOT_VAR_OR_LEAVE(xfer->ndata, meta, ret, done); 2935 SNAPSHOT_VAR_OR_LEAVE(xfer->head, meta, ret, done); 2936 SNAPSHOT_VAR_OR_LEAVE(xfer->tail, meta, ret, done); 2937 2938 done: 2939 return (ret); 2940 } 2941 2942 static int 2943 pci_xhci_snapshot(struct vm_snapshot_meta *meta) 2944 { 2945 int i, j; 2946 int ret; 2947 int restore_idx; 2948 struct pci_devinst *pi; 2949 struct pci_xhci_softc *sc; 2950 struct pci_xhci_portregs *port; 2951 struct pci_xhci_dev_emu *dev; 2952 char dname[SNAP_DEV_NAME_LEN]; 2953 int maps[XHCI_MAX_SLOTS + 1]; 2954 2955 pi = meta->dev_data; 2956 sc = pi->pi_arg; 2957 2958 SNAPSHOT_VAR_OR_LEAVE(sc->caplength, meta, ret, done); 2959 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams1, meta, ret, done); 2960 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams2, meta, ret, done); 2961 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams3, meta, ret, done); 2962 SNAPSHOT_VAR_OR_LEAVE(sc->hccparams1, meta, ret, done); 2963 SNAPSHOT_VAR_OR_LEAVE(sc->dboff, meta, ret, done); 2964 SNAPSHOT_VAR_OR_LEAVE(sc->rtsoff, meta, ret, done); 2965 SNAPSHOT_VAR_OR_LEAVE(sc->hccparams2, meta, ret, done); 2966 SNAPSHOT_VAR_OR_LEAVE(sc->regsend, meta, ret, done); 2967 2968 /* opregs */ 2969 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.usbcmd, meta, ret, done); 2970 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.usbsts, meta, ret, done); 2971 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.pgsz, meta, ret, done); 2972 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.dnctrl, meta, ret, done); 2973 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.crcr, meta, ret, done); 2974 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.dcbaap, meta, ret, done); 2975 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.config, meta, ret, done); 2976 2977 /* opregs.cr_p */ 2978 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->opregs.cr_p, 2979 XHCI_GADDR_SIZE(sc->opregs.cr_p), false, meta, ret, done); 2980 2981 /* opregs.dcbaa_p */ 2982 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->opregs.dcbaa_p, 2983 XHCI_GADDR_SIZE(sc->opregs.dcbaa_p), false, meta, ret, done); 2984 2985 /* rtsregs */ 2986 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.mfindex, meta, ret, done); 2987 2988 /* rtsregs.intrreg */ 2989 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.iman, meta, ret, done); 2990 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.imod, meta, ret, done); 2991 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erstsz, meta, ret, done); 2992 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.rsvd, meta, ret, done); 2993 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erstba, meta, ret, done); 2994 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erdp, meta, ret, done); 2995 2996 /* rtsregs.erstba_p */ 2997 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->rtsregs.erstba_p, 2998 XHCI_GADDR_SIZE(sc->rtsregs.erstba_p), false, meta, ret, done); 2999 3000 /* rtsregs.erst_p */ 3001 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->rtsregs.erst_p, 3002 XHCI_GADDR_SIZE(sc->rtsregs.erst_p), false, meta, ret, done); 3003 3004 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_deq_seg, meta, ret, done); 3005 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_enq_idx, meta, ret, done); 3006 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_enq_seg, meta, ret, done); 3007 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_events_cnt, meta, ret, done); 3008 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.event_pcs, meta, ret, done); 3009 3010 /* sanity checking */ 3011 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 3012 dev = XHCI_DEVINST_PTR(sc, i); 3013 if (dev == NULL) 3014 continue; 3015 3016 if (meta->op == VM_SNAPSHOT_SAVE) 3017 restore_idx = i; 3018 SNAPSHOT_VAR_OR_LEAVE(restore_idx, meta, ret, done); 3019 3020 /* check if the restored device (when restoring) is sane */ 3021 if (restore_idx != i) { 3022 fprintf(stderr, "%s: idx not matching: actual: %d, " 3023 "expected: %d\r\n", __func__, restore_idx, i); 3024 ret = EINVAL; 3025 goto done; 3026 } 3027 3028 if (meta->op == VM_SNAPSHOT_SAVE) { 3029 memset(dname, 0, sizeof(dname)); 3030 strncpy(dname, dev->dev_ue->ue_emu, sizeof(dname) - 1); 3031 } 3032 3033 SNAPSHOT_BUF_OR_LEAVE(dname, sizeof(dname), meta, ret, done); 3034 3035 if (meta->op == VM_SNAPSHOT_RESTORE) { 3036 dname[sizeof(dname) - 1] = '\0'; 3037 if (strcmp(dev->dev_ue->ue_emu, dname)) { 3038 fprintf(stderr, "%s: device names mismatch: " 3039 "actual: %s, expected: %s\r\n", 3040 __func__, dname, dev->dev_ue->ue_emu); 3041 3042 ret = EINVAL; 3043 goto done; 3044 } 3045 } 3046 } 3047 3048 /* portregs */ 3049 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 3050 port = XHCI_PORTREG_PTR(sc, i); 3051 dev = XHCI_DEVINST_PTR(sc, i); 3052 3053 if (dev == NULL) 3054 continue; 3055 3056 SNAPSHOT_VAR_OR_LEAVE(port->portsc, meta, ret, done); 3057 SNAPSHOT_VAR_OR_LEAVE(port->portpmsc, meta, ret, done); 3058 SNAPSHOT_VAR_OR_LEAVE(port->portli, meta, ret, done); 3059 SNAPSHOT_VAR_OR_LEAVE(port->porthlpmc, meta, ret, done); 3060 } 3061 3062 /* slots */ 3063 if (meta->op == VM_SNAPSHOT_SAVE) 3064 pci_xhci_map_devs_slots(sc, maps); 3065 3066 for (i = 1; i <= XHCI_MAX_SLOTS; i++) { 3067 SNAPSHOT_VAR_OR_LEAVE(maps[i], meta, ret, done); 3068 3069 if (meta->op == VM_SNAPSHOT_SAVE) { 3070 dev = XHCI_SLOTDEV_PTR(sc, i); 3071 } else if (meta->op == VM_SNAPSHOT_RESTORE) { 3072 if (maps[i] != 0) 3073 dev = XHCI_DEVINST_PTR(sc, maps[i]); 3074 else 3075 dev = NULL; 3076 3077 XHCI_SLOTDEV_PTR(sc, i) = dev; 3078 } else { 3079 /* error */ 3080 ret = EINVAL; 3081 goto done; 3082 } 3083 3084 if (dev == NULL) 3085 continue; 3086 3087 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(dev->dev_ctx, 3088 XHCI_GADDR_SIZE(dev->dev_ctx), false, meta, ret, done); 3089 3090 for (j = 1; j < XHCI_MAX_ENDPOINTS; j++) { 3091 ret = pci_xhci_snapshot_ep(sc, dev, j, meta); 3092 if (ret != 0) 3093 goto done; 3094 } 3095 3096 SNAPSHOT_VAR_OR_LEAVE(dev->dev_slotstate, meta, ret, done); 3097 3098 /* devices[i]->dev_sc */ 3099 dev->dev_ue->ue_snapshot(dev->dev_sc, meta); 3100 3101 /* devices[i]->hci */ 3102 SNAPSHOT_VAR_OR_LEAVE(dev->hci.hci_address, meta, ret, done); 3103 SNAPSHOT_VAR_OR_LEAVE(dev->hci.hci_port, meta, ret, done); 3104 } 3105 3106 SNAPSHOT_VAR_OR_LEAVE(sc->ndevices, meta, ret, done); 3107 SNAPSHOT_VAR_OR_LEAVE(sc->usb2_port_start, meta, ret, done); 3108 SNAPSHOT_VAR_OR_LEAVE(sc->usb3_port_start, meta, ret, done); 3109 3110 done: 3111 return (ret); 3112 } 3113 #endif 3114 3115 struct pci_devemu pci_de_xhci = { 3116 .pe_emu = "xhci", 3117 .pe_init = pci_xhci_init, 3118 .pe_barwrite = pci_xhci_write, 3119 .pe_barread = pci_xhci_read, 3120 #ifdef BHYVE_SNAPSHOT 3121 .pe_snapshot = pci_xhci_snapshot, 3122 #endif 3123 }; 3124 PCI_EMUL_SET(pci_de_xhci); 3125