1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2014 Leon Dang <ldang@nahannisys.com> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /* 29 XHCI options: 30 -s <n>,xhci,{devices} 31 32 devices: 33 tablet USB tablet mouse 34 */ 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <sys/param.h> 39 #include <sys/uio.h> 40 #include <sys/types.h> 41 #include <sys/queue.h> 42 43 #include <stdio.h> 44 #include <stdlib.h> 45 #include <stdint.h> 46 #include <string.h> 47 #include <errno.h> 48 #include <pthread.h> 49 #include <unistd.h> 50 51 #include <machine/vmm_snapshot.h> 52 53 #include <dev/usb/usbdi.h> 54 #include <dev/usb/usb.h> 55 #include <dev/usb/usb_freebsd.h> 56 #include <xhcireg.h> 57 58 #include "bhyverun.h" 59 #include "config.h" 60 #include "debug.h" 61 #include "pci_emul.h" 62 #include "pci_xhci.h" 63 #include "usb_emul.h" 64 65 66 static int xhci_debug = 0; 67 #define DPRINTF(params) if (xhci_debug) PRINTLN params 68 #define WPRINTF(params) PRINTLN params 69 70 71 #define XHCI_NAME "xhci" 72 #define XHCI_MAX_DEVS 8 /* 4 USB3 + 4 USB2 devs */ 73 74 #define XHCI_MAX_SLOTS 64 /* min allowed by Windows drivers */ 75 76 /* 77 * XHCI data structures can be up to 64k, but limit paddr_guest2host mapping 78 * to 4k to avoid going over the guest physical memory barrier. 79 */ 80 #define XHCI_PADDR_SZ 4096 /* paddr_guest2host max size */ 81 82 #define XHCI_ERST_MAX 0 /* max 2^entries event ring seg tbl */ 83 84 #define XHCI_CAPLEN (4*8) /* offset of op register space */ 85 #define XHCI_HCCPRAMS2 0x1C /* offset of HCCPARAMS2 register */ 86 #define XHCI_PORTREGS_START 0x400 87 #define XHCI_DOORBELL_MAX 256 88 89 #define XHCI_STREAMS_MAX 1 /* 4-15 in XHCI spec */ 90 91 /* caplength and hci-version registers */ 92 #define XHCI_SET_CAPLEN(x) ((x) & 0xFF) 93 #define XHCI_SET_HCIVERSION(x) (((x) & 0xFFFF) << 16) 94 #define XHCI_GET_HCIVERSION(x) (((x) >> 16) & 0xFFFF) 95 96 /* hcsparams1 register */ 97 #define XHCI_SET_HCSP1_MAXSLOTS(x) ((x) & 0xFF) 98 #define XHCI_SET_HCSP1_MAXINTR(x) (((x) & 0x7FF) << 8) 99 #define XHCI_SET_HCSP1_MAXPORTS(x) (((x) & 0xFF) << 24) 100 101 /* hcsparams2 register */ 102 #define XHCI_SET_HCSP2_IST(x) ((x) & 0x0F) 103 #define XHCI_SET_HCSP2_ERSTMAX(x) (((x) & 0x0F) << 4) 104 #define XHCI_SET_HCSP2_MAXSCRATCH_HI(x) (((x) & 0x1F) << 21) 105 #define XHCI_SET_HCSP2_MAXSCRATCH_LO(x) (((x) & 0x1F) << 27) 106 107 /* hcsparams3 register */ 108 #define XHCI_SET_HCSP3_U1EXITLATENCY(x) ((x) & 0xFF) 109 #define XHCI_SET_HCSP3_U2EXITLATENCY(x) (((x) & 0xFFFF) << 16) 110 111 /* hccparams1 register */ 112 #define XHCI_SET_HCCP1_AC64(x) ((x) & 0x01) 113 #define XHCI_SET_HCCP1_BNC(x) (((x) & 0x01) << 1) 114 #define XHCI_SET_HCCP1_CSZ(x) (((x) & 0x01) << 2) 115 #define XHCI_SET_HCCP1_PPC(x) (((x) & 0x01) << 3) 116 #define XHCI_SET_HCCP1_PIND(x) (((x) & 0x01) << 4) 117 #define XHCI_SET_HCCP1_LHRC(x) (((x) & 0x01) << 5) 118 #define XHCI_SET_HCCP1_LTC(x) (((x) & 0x01) << 6) 119 #define XHCI_SET_HCCP1_NSS(x) (((x) & 0x01) << 7) 120 #define XHCI_SET_HCCP1_PAE(x) (((x) & 0x01) << 8) 121 #define XHCI_SET_HCCP1_SPC(x) (((x) & 0x01) << 9) 122 #define XHCI_SET_HCCP1_SEC(x) (((x) & 0x01) << 10) 123 #define XHCI_SET_HCCP1_CFC(x) (((x) & 0x01) << 11) 124 #define XHCI_SET_HCCP1_MAXPSA(x) (((x) & 0x0F) << 12) 125 #define XHCI_SET_HCCP1_XECP(x) (((x) & 0xFFFF) << 16) 126 127 /* hccparams2 register */ 128 #define XHCI_SET_HCCP2_U3C(x) ((x) & 0x01) 129 #define XHCI_SET_HCCP2_CMC(x) (((x) & 0x01) << 1) 130 #define XHCI_SET_HCCP2_FSC(x) (((x) & 0x01) << 2) 131 #define XHCI_SET_HCCP2_CTC(x) (((x) & 0x01) << 3) 132 #define XHCI_SET_HCCP2_LEC(x) (((x) & 0x01) << 4) 133 #define XHCI_SET_HCCP2_CIC(x) (((x) & 0x01) << 5) 134 135 /* other registers */ 136 #define XHCI_SET_DOORBELL(x) ((x) & ~0x03) 137 #define XHCI_SET_RTSOFFSET(x) ((x) & ~0x0F) 138 139 /* register masks */ 140 #define XHCI_PS_PLS_MASK (0xF << 5) /* port link state */ 141 #define XHCI_PS_SPEED_MASK (0xF << 10) /* port speed */ 142 #define XHCI_PS_PIC_MASK (0x3 << 14) /* port indicator */ 143 144 /* port register set */ 145 #define XHCI_PORTREGS_BASE 0x400 /* base offset */ 146 #define XHCI_PORTREGS_PORT0 0x3F0 147 #define XHCI_PORTREGS_SETSZ 0x10 /* size of a set */ 148 149 #define MASK_64_HI(x) ((x) & ~0xFFFFFFFFULL) 150 #define MASK_64_LO(x) ((x) & 0xFFFFFFFFULL) 151 152 #define FIELD_REPLACE(a,b,m,s) (((a) & ~((m) << (s))) | \ 153 (((b) & (m)) << (s))) 154 #define FIELD_COPY(a,b,m,s) (((a) & ~((m) << (s))) | \ 155 (((b) & ((m) << (s))))) 156 157 #define SNAP_DEV_NAME_LEN 128 158 159 struct pci_xhci_trb_ring { 160 uint64_t ringaddr; /* current dequeue guest address */ 161 uint32_t ccs; /* consumer cycle state */ 162 }; 163 164 /* device endpoint transfer/stream rings */ 165 struct pci_xhci_dev_ep { 166 union { 167 struct xhci_trb *_epu_tr; 168 struct xhci_stream_ctx *_epu_sctx; 169 } _ep_trbsctx; 170 #define ep_tr _ep_trbsctx._epu_tr 171 #define ep_sctx _ep_trbsctx._epu_sctx 172 173 union { 174 struct pci_xhci_trb_ring _epu_trb; 175 struct pci_xhci_trb_ring *_epu_sctx_trbs; 176 } _ep_trb_rings; 177 #define ep_ringaddr _ep_trb_rings._epu_trb.ringaddr 178 #define ep_ccs _ep_trb_rings._epu_trb.ccs 179 #define ep_sctx_trbs _ep_trb_rings._epu_sctx_trbs 180 181 struct usb_data_xfer *ep_xfer; /* transfer chain */ 182 }; 183 184 /* device context base address array: maps slot->device context */ 185 struct xhci_dcbaa { 186 uint64_t dcba[USB_MAX_DEVICES+1]; /* xhci_dev_ctx ptrs */ 187 }; 188 189 /* port status registers */ 190 struct pci_xhci_portregs { 191 uint32_t portsc; /* port status and control */ 192 uint32_t portpmsc; /* port pwr mgmt status & control */ 193 uint32_t portli; /* port link info */ 194 uint32_t porthlpmc; /* port hardware LPM control */ 195 } __packed; 196 #define XHCI_PS_SPEED_SET(x) (((x) & 0xF) << 10) 197 198 /* xHC operational registers */ 199 struct pci_xhci_opregs { 200 uint32_t usbcmd; /* usb command */ 201 uint32_t usbsts; /* usb status */ 202 uint32_t pgsz; /* page size */ 203 uint32_t dnctrl; /* device notification control */ 204 uint64_t crcr; /* command ring control */ 205 uint64_t dcbaap; /* device ctx base addr array ptr */ 206 uint32_t config; /* configure */ 207 208 /* guest mapped addresses: */ 209 struct xhci_trb *cr_p; /* crcr dequeue */ 210 struct xhci_dcbaa *dcbaa_p; /* dev ctx array ptr */ 211 }; 212 213 /* xHC runtime registers */ 214 struct pci_xhci_rtsregs { 215 uint32_t mfindex; /* microframe index */ 216 struct { /* interrupter register set */ 217 uint32_t iman; /* interrupter management */ 218 uint32_t imod; /* interrupter moderation */ 219 uint32_t erstsz; /* event ring segment table size */ 220 uint32_t rsvd; 221 uint64_t erstba; /* event ring seg-tbl base addr */ 222 uint64_t erdp; /* event ring dequeue ptr */ 223 } intrreg __packed; 224 225 /* guest mapped addresses */ 226 struct xhci_event_ring_seg *erstba_p; 227 struct xhci_trb *erst_p; /* event ring segment tbl */ 228 int er_deq_seg; /* event ring dequeue segment */ 229 int er_enq_idx; /* event ring enqueue index - xHCI */ 230 int er_enq_seg; /* event ring enqueue segment */ 231 uint32_t er_events_cnt; /* number of events in ER */ 232 uint32_t event_pcs; /* producer cycle state flag */ 233 }; 234 235 236 struct pci_xhci_softc; 237 238 239 /* 240 * USB device emulation container. 241 * This is referenced from usb_hci->hci_sc; 1 pci_xhci_dev_emu for each 242 * emulated device instance. 243 */ 244 struct pci_xhci_dev_emu { 245 struct pci_xhci_softc *xsc; 246 247 /* XHCI contexts */ 248 struct xhci_dev_ctx *dev_ctx; 249 struct pci_xhci_dev_ep eps[XHCI_MAX_ENDPOINTS]; 250 int dev_slotstate; 251 252 struct usb_devemu *dev_ue; /* USB emulated dev */ 253 void *dev_sc; /* device's softc */ 254 255 struct usb_hci hci; 256 }; 257 258 struct pci_xhci_softc { 259 struct pci_devinst *xsc_pi; 260 261 pthread_mutex_t mtx; 262 263 uint32_t caplength; /* caplen & hciversion */ 264 uint32_t hcsparams1; /* structural parameters 1 */ 265 uint32_t hcsparams2; /* structural parameters 2 */ 266 uint32_t hcsparams3; /* structural parameters 3 */ 267 uint32_t hccparams1; /* capability parameters 1 */ 268 uint32_t dboff; /* doorbell offset */ 269 uint32_t rtsoff; /* runtime register space offset */ 270 uint32_t hccparams2; /* capability parameters 2 */ 271 272 uint32_t regsend; /* end of configuration registers */ 273 274 struct pci_xhci_opregs opregs; 275 struct pci_xhci_rtsregs rtsregs; 276 277 struct pci_xhci_portregs *portregs; 278 struct pci_xhci_dev_emu **devices; /* XHCI[port] = device */ 279 struct pci_xhci_dev_emu **slots; /* slots assigned from 1 */ 280 281 int usb2_port_start; 282 int usb3_port_start; 283 }; 284 285 286 /* portregs and devices arrays are set up to start from idx=1 */ 287 #define XHCI_PORTREG_PTR(x,n) &(x)->portregs[(n)] 288 #define XHCI_DEVINST_PTR(x,n) (x)->devices[(n)] 289 #define XHCI_SLOTDEV_PTR(x,n) (x)->slots[(n)] 290 291 #define XHCI_HALTED(sc) ((sc)->opregs.usbsts & XHCI_STS_HCH) 292 293 #define XHCI_GADDR_SIZE(a) (XHCI_PADDR_SZ - \ 294 (((uint64_t) (a)) & (XHCI_PADDR_SZ - 1))) 295 #define XHCI_GADDR(sc,a) paddr_guest2host((sc)->xsc_pi->pi_vmctx, \ 296 (a), XHCI_GADDR_SIZE(a)) 297 298 static int xhci_in_use; 299 300 /* map USB errors to XHCI */ 301 static const int xhci_usb_errors[USB_ERR_MAX] = { 302 [USB_ERR_NORMAL_COMPLETION] = XHCI_TRB_ERROR_SUCCESS, 303 [USB_ERR_PENDING_REQUESTS] = XHCI_TRB_ERROR_RESOURCE, 304 [USB_ERR_NOT_STARTED] = XHCI_TRB_ERROR_ENDP_NOT_ON, 305 [USB_ERR_INVAL] = XHCI_TRB_ERROR_INVALID, 306 [USB_ERR_NOMEM] = XHCI_TRB_ERROR_RESOURCE, 307 [USB_ERR_CANCELLED] = XHCI_TRB_ERROR_STOPPED, 308 [USB_ERR_BAD_ADDRESS] = XHCI_TRB_ERROR_PARAMETER, 309 [USB_ERR_BAD_BUFSIZE] = XHCI_TRB_ERROR_PARAMETER, 310 [USB_ERR_BAD_FLAG] = XHCI_TRB_ERROR_PARAMETER, 311 [USB_ERR_NO_CALLBACK] = XHCI_TRB_ERROR_STALL, 312 [USB_ERR_IN_USE] = XHCI_TRB_ERROR_RESOURCE, 313 [USB_ERR_NO_ADDR] = XHCI_TRB_ERROR_RESOURCE, 314 [USB_ERR_NO_PIPE] = XHCI_TRB_ERROR_RESOURCE, 315 [USB_ERR_ZERO_NFRAMES] = XHCI_TRB_ERROR_UNDEFINED, 316 [USB_ERR_ZERO_MAXP] = XHCI_TRB_ERROR_UNDEFINED, 317 [USB_ERR_SET_ADDR_FAILED] = XHCI_TRB_ERROR_RESOURCE, 318 [USB_ERR_NO_POWER] = XHCI_TRB_ERROR_ENDP_NOT_ON, 319 [USB_ERR_TOO_DEEP] = XHCI_TRB_ERROR_RESOURCE, 320 [USB_ERR_IOERROR] = XHCI_TRB_ERROR_TRB, 321 [USB_ERR_NOT_CONFIGURED] = XHCI_TRB_ERROR_ENDP_NOT_ON, 322 [USB_ERR_TIMEOUT] = XHCI_TRB_ERROR_CMD_ABORTED, 323 [USB_ERR_SHORT_XFER] = XHCI_TRB_ERROR_SHORT_PKT, 324 [USB_ERR_STALLED] = XHCI_TRB_ERROR_STALL, 325 [USB_ERR_INTERRUPTED] = XHCI_TRB_ERROR_CMD_ABORTED, 326 [USB_ERR_DMA_LOAD_FAILED] = XHCI_TRB_ERROR_DATA_BUF, 327 [USB_ERR_BAD_CONTEXT] = XHCI_TRB_ERROR_TRB, 328 [USB_ERR_NO_ROOT_HUB] = XHCI_TRB_ERROR_UNDEFINED, 329 [USB_ERR_NO_INTR_THREAD] = XHCI_TRB_ERROR_UNDEFINED, 330 [USB_ERR_NOT_LOCKED] = XHCI_TRB_ERROR_UNDEFINED, 331 }; 332 #define USB_TO_XHCI_ERR(e) ((e) < USB_ERR_MAX ? xhci_usb_errors[(e)] : \ 333 XHCI_TRB_ERROR_INVALID) 334 335 static int pci_xhci_insert_event(struct pci_xhci_softc *sc, 336 struct xhci_trb *evtrb, int do_intr); 337 static void pci_xhci_dump_trb(struct xhci_trb *trb); 338 static void pci_xhci_assert_interrupt(struct pci_xhci_softc *sc); 339 static void pci_xhci_reset_slot(struct pci_xhci_softc *sc, int slot); 340 static void pci_xhci_reset_port(struct pci_xhci_softc *sc, int portn, int warm); 341 static void pci_xhci_update_ep_ring(struct pci_xhci_softc *sc, 342 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep, 343 struct xhci_endp_ctx *ep_ctx, uint32_t streamid, 344 uint64_t ringaddr, int ccs); 345 346 static void 347 pci_xhci_set_evtrb(struct xhci_trb *evtrb, uint64_t port, uint32_t errcode, 348 uint32_t evtype) 349 { 350 evtrb->qwTrb0 = port << 24; 351 evtrb->dwTrb2 = XHCI_TRB_2_ERROR_SET(errcode); 352 evtrb->dwTrb3 = XHCI_TRB_3_TYPE_SET(evtype); 353 } 354 355 356 /* controller reset */ 357 static void 358 pci_xhci_reset(struct pci_xhci_softc *sc) 359 { 360 int i; 361 362 sc->rtsregs.er_enq_idx = 0; 363 sc->rtsregs.er_events_cnt = 0; 364 sc->rtsregs.event_pcs = 1; 365 366 for (i = 1; i <= XHCI_MAX_SLOTS; i++) { 367 pci_xhci_reset_slot(sc, i); 368 } 369 } 370 371 static uint32_t 372 pci_xhci_usbcmd_write(struct pci_xhci_softc *sc, uint32_t cmd) 373 { 374 int do_intr = 0; 375 int i; 376 377 if (cmd & XHCI_CMD_RS) { 378 do_intr = (sc->opregs.usbcmd & XHCI_CMD_RS) == 0; 379 380 sc->opregs.usbcmd |= XHCI_CMD_RS; 381 sc->opregs.usbsts &= ~XHCI_STS_HCH; 382 sc->opregs.usbsts |= XHCI_STS_PCD; 383 384 /* Queue port change event on controller run from stop */ 385 if (do_intr) 386 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 387 struct pci_xhci_dev_emu *dev; 388 struct pci_xhci_portregs *port; 389 struct xhci_trb evtrb; 390 391 if ((dev = XHCI_DEVINST_PTR(sc, i)) == NULL) 392 continue; 393 394 port = XHCI_PORTREG_PTR(sc, i); 395 port->portsc |= XHCI_PS_CSC | XHCI_PS_CCS; 396 port->portsc &= ~XHCI_PS_PLS_MASK; 397 398 /* 399 * XHCI 4.19.3 USB2 RxDetect->Polling, 400 * USB3 Polling->U0 401 */ 402 if (dev->dev_ue->ue_usbver == 2) 403 port->portsc |= 404 XHCI_PS_PLS_SET(UPS_PORT_LS_POLL); 405 else 406 port->portsc |= 407 XHCI_PS_PLS_SET(UPS_PORT_LS_U0); 408 409 pci_xhci_set_evtrb(&evtrb, i, 410 XHCI_TRB_ERROR_SUCCESS, 411 XHCI_TRB_EVENT_PORT_STS_CHANGE); 412 413 if (pci_xhci_insert_event(sc, &evtrb, 0) != 414 XHCI_TRB_ERROR_SUCCESS) 415 break; 416 } 417 } else { 418 sc->opregs.usbcmd &= ~XHCI_CMD_RS; 419 sc->opregs.usbsts |= XHCI_STS_HCH; 420 sc->opregs.usbsts &= ~XHCI_STS_PCD; 421 } 422 423 /* start execution of schedule; stop when set to 0 */ 424 cmd |= sc->opregs.usbcmd & XHCI_CMD_RS; 425 426 if (cmd & XHCI_CMD_HCRST) { 427 /* reset controller */ 428 pci_xhci_reset(sc); 429 cmd &= ~XHCI_CMD_HCRST; 430 } 431 432 cmd &= ~(XHCI_CMD_CSS | XHCI_CMD_CRS); 433 434 if (do_intr) 435 pci_xhci_assert_interrupt(sc); 436 437 return (cmd); 438 } 439 440 static void 441 pci_xhci_portregs_write(struct pci_xhci_softc *sc, uint64_t offset, 442 uint64_t value) 443 { 444 struct xhci_trb evtrb; 445 struct pci_xhci_portregs *p; 446 int port; 447 uint32_t oldpls, newpls; 448 449 if (sc->portregs == NULL) 450 return; 451 452 port = (offset - XHCI_PORTREGS_PORT0) / XHCI_PORTREGS_SETSZ; 453 offset = (offset - XHCI_PORTREGS_PORT0) % XHCI_PORTREGS_SETSZ; 454 455 DPRINTF(("pci_xhci: portregs wr offset 0x%lx, port %u: 0x%lx", 456 offset, port, value)); 457 458 assert(port >= 0); 459 460 if (port > XHCI_MAX_DEVS) { 461 DPRINTF(("pci_xhci: portregs_write port %d > ndevices", 462 port)); 463 return; 464 } 465 466 if (XHCI_DEVINST_PTR(sc, port) == NULL) { 467 DPRINTF(("pci_xhci: portregs_write to unattached port %d", 468 port)); 469 } 470 471 p = XHCI_PORTREG_PTR(sc, port); 472 switch (offset) { 473 case 0: 474 /* port reset or warm reset */ 475 if (value & (XHCI_PS_PR | XHCI_PS_WPR)) { 476 pci_xhci_reset_port(sc, port, value & XHCI_PS_WPR); 477 break; 478 } 479 480 if ((p->portsc & XHCI_PS_PP) == 0) { 481 WPRINTF(("pci_xhci: portregs_write to unpowered " 482 "port %d", port)); 483 break; 484 } 485 486 /* Port status and control register */ 487 oldpls = XHCI_PS_PLS_GET(p->portsc); 488 newpls = XHCI_PS_PLS_GET(value); 489 490 p->portsc &= XHCI_PS_PED | XHCI_PS_PLS_MASK | 491 XHCI_PS_SPEED_MASK | XHCI_PS_PIC_MASK; 492 493 if (XHCI_DEVINST_PTR(sc, port)) 494 p->portsc |= XHCI_PS_CCS; 495 496 p->portsc |= (value & 497 ~(XHCI_PS_OCA | 498 XHCI_PS_PR | 499 XHCI_PS_PED | 500 XHCI_PS_PLS_MASK | /* link state */ 501 XHCI_PS_SPEED_MASK | 502 XHCI_PS_PIC_MASK | /* port indicator */ 503 XHCI_PS_LWS | XHCI_PS_DR | XHCI_PS_WPR)); 504 505 /* clear control bits */ 506 p->portsc &= ~(value & 507 (XHCI_PS_CSC | 508 XHCI_PS_PEC | 509 XHCI_PS_WRC | 510 XHCI_PS_OCC | 511 XHCI_PS_PRC | 512 XHCI_PS_PLC | 513 XHCI_PS_CEC | 514 XHCI_PS_CAS)); 515 516 /* port disable request; for USB3, don't care */ 517 if (value & XHCI_PS_PED) 518 DPRINTF(("Disable port %d request", port)); 519 520 if (!(value & XHCI_PS_LWS)) 521 break; 522 523 DPRINTF(("Port new PLS: %d", newpls)); 524 switch (newpls) { 525 case 0: /* U0 */ 526 case 3: /* U3 */ 527 if (oldpls != newpls) { 528 p->portsc &= ~XHCI_PS_PLS_MASK; 529 p->portsc |= XHCI_PS_PLS_SET(newpls) | 530 XHCI_PS_PLC; 531 532 if (oldpls != 0 && newpls == 0) { 533 pci_xhci_set_evtrb(&evtrb, port, 534 XHCI_TRB_ERROR_SUCCESS, 535 XHCI_TRB_EVENT_PORT_STS_CHANGE); 536 537 pci_xhci_insert_event(sc, &evtrb, 1); 538 } 539 } 540 break; 541 542 default: 543 DPRINTF(("Unhandled change port %d PLS %u", 544 port, newpls)); 545 break; 546 } 547 break; 548 case 4: 549 /* Port power management status and control register */ 550 p->portpmsc = value; 551 break; 552 case 8: 553 /* Port link information register */ 554 DPRINTF(("pci_xhci attempted write to PORTLI, port %d", 555 port)); 556 break; 557 case 12: 558 /* 559 * Port hardware LPM control register. 560 * For USB3, this register is reserved. 561 */ 562 p->porthlpmc = value; 563 break; 564 } 565 } 566 567 struct xhci_dev_ctx * 568 pci_xhci_get_dev_ctx(struct pci_xhci_softc *sc, uint32_t slot) 569 { 570 uint64_t devctx_addr; 571 struct xhci_dev_ctx *devctx; 572 573 assert(slot > 0 && slot <= XHCI_MAX_DEVS); 574 assert(XHCI_SLOTDEV_PTR(sc, slot) != NULL); 575 assert(sc->opregs.dcbaa_p != NULL); 576 577 devctx_addr = sc->opregs.dcbaa_p->dcba[slot]; 578 579 if (devctx_addr == 0) { 580 DPRINTF(("get_dev_ctx devctx_addr == 0")); 581 return (NULL); 582 } 583 584 DPRINTF(("pci_xhci: get dev ctx, slot %u devctx addr %016lx", 585 slot, devctx_addr)); 586 devctx = XHCI_GADDR(sc, devctx_addr & ~0x3FUL); 587 588 return (devctx); 589 } 590 591 struct xhci_trb * 592 pci_xhci_trb_next(struct pci_xhci_softc *sc, struct xhci_trb *curtrb, 593 uint64_t *guestaddr) 594 { 595 struct xhci_trb *next; 596 597 assert(curtrb != NULL); 598 599 if (XHCI_TRB_3_TYPE_GET(curtrb->dwTrb3) == XHCI_TRB_TYPE_LINK) { 600 if (guestaddr) 601 *guestaddr = curtrb->qwTrb0 & ~0xFUL; 602 603 next = XHCI_GADDR(sc, curtrb->qwTrb0 & ~0xFUL); 604 } else { 605 if (guestaddr) 606 *guestaddr += sizeof(struct xhci_trb) & ~0xFUL; 607 608 next = curtrb + 1; 609 } 610 611 return (next); 612 } 613 614 static void 615 pci_xhci_assert_interrupt(struct pci_xhci_softc *sc) 616 { 617 618 sc->rtsregs.intrreg.erdp |= XHCI_ERDP_LO_BUSY; 619 sc->rtsregs.intrreg.iman |= XHCI_IMAN_INTR_PEND; 620 sc->opregs.usbsts |= XHCI_STS_EINT; 621 622 /* only trigger interrupt if permitted */ 623 if ((sc->opregs.usbcmd & XHCI_CMD_INTE) && 624 (sc->rtsregs.intrreg.iman & XHCI_IMAN_INTR_ENA)) { 625 if (pci_msi_enabled(sc->xsc_pi)) 626 pci_generate_msi(sc->xsc_pi, 0); 627 else 628 pci_lintr_assert(sc->xsc_pi); 629 } 630 } 631 632 static void 633 pci_xhci_deassert_interrupt(struct pci_xhci_softc *sc) 634 { 635 636 if (!pci_msi_enabled(sc->xsc_pi)) 637 pci_lintr_assert(sc->xsc_pi); 638 } 639 640 static void 641 pci_xhci_init_ep(struct pci_xhci_dev_emu *dev, int epid) 642 { 643 struct xhci_dev_ctx *dev_ctx; 644 struct pci_xhci_dev_ep *devep; 645 struct xhci_endp_ctx *ep_ctx; 646 uint32_t pstreams; 647 int i; 648 649 dev_ctx = dev->dev_ctx; 650 ep_ctx = &dev_ctx->ctx_ep[epid]; 651 devep = &dev->eps[epid]; 652 pstreams = XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0); 653 if (pstreams > 0) { 654 DPRINTF(("init_ep %d with pstreams %d", epid, pstreams)); 655 assert(devep->ep_sctx_trbs == NULL); 656 657 devep->ep_sctx = XHCI_GADDR(dev->xsc, ep_ctx->qwEpCtx2 & 658 XHCI_EPCTX_2_TR_DQ_PTR_MASK); 659 devep->ep_sctx_trbs = calloc(pstreams, 660 sizeof(struct pci_xhci_trb_ring)); 661 for (i = 0; i < pstreams; i++) { 662 devep->ep_sctx_trbs[i].ringaddr = 663 devep->ep_sctx[i].qwSctx0 & 664 XHCI_SCTX_0_TR_DQ_PTR_MASK; 665 devep->ep_sctx_trbs[i].ccs = 666 XHCI_SCTX_0_DCS_GET(devep->ep_sctx[i].qwSctx0); 667 } 668 } else { 669 DPRINTF(("init_ep %d with no pstreams", epid)); 670 devep->ep_ringaddr = ep_ctx->qwEpCtx2 & 671 XHCI_EPCTX_2_TR_DQ_PTR_MASK; 672 devep->ep_ccs = XHCI_EPCTX_2_DCS_GET(ep_ctx->qwEpCtx2); 673 devep->ep_tr = XHCI_GADDR(dev->xsc, devep->ep_ringaddr); 674 DPRINTF(("init_ep tr DCS %x", devep->ep_ccs)); 675 } 676 677 if (devep->ep_xfer == NULL) { 678 devep->ep_xfer = malloc(sizeof(struct usb_data_xfer)); 679 USB_DATA_XFER_INIT(devep->ep_xfer); 680 } 681 } 682 683 static void 684 pci_xhci_disable_ep(struct pci_xhci_dev_emu *dev, int epid) 685 { 686 struct xhci_dev_ctx *dev_ctx; 687 struct pci_xhci_dev_ep *devep; 688 struct xhci_endp_ctx *ep_ctx; 689 690 DPRINTF(("pci_xhci disable_ep %d", epid)); 691 692 dev_ctx = dev->dev_ctx; 693 ep_ctx = &dev_ctx->ctx_ep[epid]; 694 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_DISABLED; 695 696 devep = &dev->eps[epid]; 697 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) > 0 && 698 devep->ep_sctx_trbs != NULL) 699 free(devep->ep_sctx_trbs); 700 701 if (devep->ep_xfer != NULL) { 702 free(devep->ep_xfer); 703 devep->ep_xfer = NULL; 704 } 705 706 memset(devep, 0, sizeof(struct pci_xhci_dev_ep)); 707 } 708 709 710 /* reset device at slot and data structures related to it */ 711 static void 712 pci_xhci_reset_slot(struct pci_xhci_softc *sc, int slot) 713 { 714 struct pci_xhci_dev_emu *dev; 715 716 dev = XHCI_SLOTDEV_PTR(sc, slot); 717 718 if (!dev) { 719 DPRINTF(("xhci reset unassigned slot (%d)?", slot)); 720 } else { 721 dev->dev_slotstate = XHCI_ST_DISABLED; 722 } 723 724 /* TODO: reset ring buffer pointers */ 725 } 726 727 static int 728 pci_xhci_insert_event(struct pci_xhci_softc *sc, struct xhci_trb *evtrb, 729 int do_intr) 730 { 731 struct pci_xhci_rtsregs *rts; 732 uint64_t erdp; 733 int erdp_idx; 734 int err; 735 struct xhci_trb *evtrbptr; 736 737 err = XHCI_TRB_ERROR_SUCCESS; 738 739 rts = &sc->rtsregs; 740 741 erdp = rts->intrreg.erdp & ~0xF; 742 erdp_idx = (erdp - rts->erstba_p[rts->er_deq_seg].qwEvrsTablePtr) / 743 sizeof(struct xhci_trb); 744 745 DPRINTF(("pci_xhci: insert event 0[%lx] 2[%x] 3[%x]", 746 evtrb->qwTrb0, evtrb->dwTrb2, evtrb->dwTrb3)); 747 DPRINTF(("\terdp idx %d/seg %d, enq idx %d/seg %d, pcs %u", 748 erdp_idx, rts->er_deq_seg, rts->er_enq_idx, 749 rts->er_enq_seg, rts->event_pcs)); 750 DPRINTF(("\t(erdp=0x%lx, erst=0x%lx, tblsz=%u, do_intr %d)", 751 erdp, rts->erstba_p->qwEvrsTablePtr, 752 rts->erstba_p->dwEvrsTableSize, do_intr)); 753 754 evtrbptr = &rts->erst_p[rts->er_enq_idx]; 755 756 /* TODO: multi-segment table */ 757 if (rts->er_events_cnt >= rts->erstba_p->dwEvrsTableSize) { 758 DPRINTF(("pci_xhci[%d] cannot insert event; ring full", 759 __LINE__)); 760 err = XHCI_TRB_ERROR_EV_RING_FULL; 761 goto done; 762 } 763 764 if (rts->er_events_cnt == rts->erstba_p->dwEvrsTableSize - 1) { 765 struct xhci_trb errev; 766 767 if ((evtrbptr->dwTrb3 & 0x1) == (rts->event_pcs & 0x1)) { 768 769 DPRINTF(("pci_xhci[%d] insert evt err: ring full", 770 __LINE__)); 771 772 errev.qwTrb0 = 0; 773 errev.dwTrb2 = XHCI_TRB_2_ERROR_SET( 774 XHCI_TRB_ERROR_EV_RING_FULL); 775 errev.dwTrb3 = XHCI_TRB_3_TYPE_SET( 776 XHCI_TRB_EVENT_HOST_CTRL) | 777 rts->event_pcs; 778 rts->er_events_cnt++; 779 memcpy(&rts->erst_p[rts->er_enq_idx], &errev, 780 sizeof(struct xhci_trb)); 781 rts->er_enq_idx = (rts->er_enq_idx + 1) % 782 rts->erstba_p->dwEvrsTableSize; 783 err = XHCI_TRB_ERROR_EV_RING_FULL; 784 do_intr = 1; 785 786 goto done; 787 } 788 } else { 789 rts->er_events_cnt++; 790 } 791 792 evtrb->dwTrb3 &= ~XHCI_TRB_3_CYCLE_BIT; 793 evtrb->dwTrb3 |= rts->event_pcs; 794 795 memcpy(&rts->erst_p[rts->er_enq_idx], evtrb, sizeof(struct xhci_trb)); 796 rts->er_enq_idx = (rts->er_enq_idx + 1) % 797 rts->erstba_p->dwEvrsTableSize; 798 799 if (rts->er_enq_idx == 0) 800 rts->event_pcs ^= 1; 801 802 done: 803 if (do_intr) 804 pci_xhci_assert_interrupt(sc); 805 806 return (err); 807 } 808 809 static uint32_t 810 pci_xhci_cmd_enable_slot(struct pci_xhci_softc *sc, uint32_t *slot) 811 { 812 struct pci_xhci_dev_emu *dev; 813 uint32_t cmderr; 814 int i; 815 816 cmderr = XHCI_TRB_ERROR_NO_SLOTS; 817 if (sc->portregs != NULL) 818 for (i = 1; i <= XHCI_MAX_SLOTS; i++) { 819 dev = XHCI_SLOTDEV_PTR(sc, i); 820 if (dev && dev->dev_slotstate == XHCI_ST_DISABLED) { 821 *slot = i; 822 dev->dev_slotstate = XHCI_ST_ENABLED; 823 cmderr = XHCI_TRB_ERROR_SUCCESS; 824 dev->hci.hci_address = i; 825 break; 826 } 827 } 828 829 DPRINTF(("pci_xhci enable slot (error=%d) slot %u", 830 cmderr != XHCI_TRB_ERROR_SUCCESS, *slot)); 831 832 return (cmderr); 833 } 834 835 static uint32_t 836 pci_xhci_cmd_disable_slot(struct pci_xhci_softc *sc, uint32_t slot) 837 { 838 struct pci_xhci_dev_emu *dev; 839 uint32_t cmderr; 840 841 DPRINTF(("pci_xhci disable slot %u", slot)); 842 843 cmderr = XHCI_TRB_ERROR_NO_SLOTS; 844 if (sc->portregs == NULL) 845 goto done; 846 847 if (slot > XHCI_MAX_SLOTS) { 848 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 849 goto done; 850 } 851 852 dev = XHCI_SLOTDEV_PTR(sc, slot); 853 if (dev) { 854 if (dev->dev_slotstate == XHCI_ST_DISABLED) { 855 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 856 } else { 857 dev->dev_slotstate = XHCI_ST_DISABLED; 858 cmderr = XHCI_TRB_ERROR_SUCCESS; 859 /* TODO: reset events and endpoints */ 860 } 861 } else 862 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 863 864 done: 865 return (cmderr); 866 } 867 868 static uint32_t 869 pci_xhci_cmd_reset_device(struct pci_xhci_softc *sc, uint32_t slot) 870 { 871 struct pci_xhci_dev_emu *dev; 872 struct xhci_dev_ctx *dev_ctx; 873 struct xhci_endp_ctx *ep_ctx; 874 uint32_t cmderr; 875 int i; 876 877 cmderr = XHCI_TRB_ERROR_NO_SLOTS; 878 if (sc->portregs == NULL) 879 goto done; 880 881 DPRINTF(("pci_xhci reset device slot %u", slot)); 882 883 dev = XHCI_SLOTDEV_PTR(sc, slot); 884 if (!dev || dev->dev_slotstate == XHCI_ST_DISABLED) 885 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 886 else { 887 dev->dev_slotstate = XHCI_ST_DEFAULT; 888 889 dev->hci.hci_address = 0; 890 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 891 892 /* slot state */ 893 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE( 894 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_DEFAULT, 895 0x1F, 27); 896 897 /* number of contexts */ 898 dev_ctx->ctx_slot.dwSctx0 = FIELD_REPLACE( 899 dev_ctx->ctx_slot.dwSctx0, 1, 0x1F, 27); 900 901 /* reset all eps other than ep-0 */ 902 for (i = 2; i <= 31; i++) { 903 ep_ctx = &dev_ctx->ctx_ep[i]; 904 ep_ctx->dwEpCtx0 = FIELD_REPLACE( ep_ctx->dwEpCtx0, 905 XHCI_ST_EPCTX_DISABLED, 0x7, 0); 906 } 907 908 cmderr = XHCI_TRB_ERROR_SUCCESS; 909 } 910 911 pci_xhci_reset_slot(sc, slot); 912 913 done: 914 return (cmderr); 915 } 916 917 static uint32_t 918 pci_xhci_cmd_address_device(struct pci_xhci_softc *sc, uint32_t slot, 919 struct xhci_trb *trb) 920 { 921 struct pci_xhci_dev_emu *dev; 922 struct xhci_input_dev_ctx *input_ctx; 923 struct xhci_slot_ctx *islot_ctx; 924 struct xhci_dev_ctx *dev_ctx; 925 struct xhci_endp_ctx *ep0_ctx; 926 uint32_t cmderr; 927 928 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL); 929 islot_ctx = &input_ctx->ctx_slot; 930 ep0_ctx = &input_ctx->ctx_ep[1]; 931 932 cmderr = XHCI_TRB_ERROR_SUCCESS; 933 934 DPRINTF(("pci_xhci: address device, input ctl: D 0x%08x A 0x%08x,", 935 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1)); 936 DPRINTF((" slot %08x %08x %08x %08x", 937 islot_ctx->dwSctx0, islot_ctx->dwSctx1, 938 islot_ctx->dwSctx2, islot_ctx->dwSctx3)); 939 DPRINTF((" ep0 %08x %08x %016lx %08x", 940 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, 941 ep0_ctx->dwEpCtx4)); 942 943 /* when setting address: drop-ctx=0, add-ctx=slot+ep0 */ 944 if ((input_ctx->ctx_input.dwInCtx0 != 0) || 945 (input_ctx->ctx_input.dwInCtx1 & 0x03) != 0x03) { 946 DPRINTF(("pci_xhci: address device, input ctl invalid")); 947 cmderr = XHCI_TRB_ERROR_TRB; 948 goto done; 949 } 950 951 /* assign address to slot */ 952 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 953 954 DPRINTF(("pci_xhci: address device, dev ctx")); 955 DPRINTF((" slot %08x %08x %08x %08x", 956 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 957 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 958 959 dev = XHCI_SLOTDEV_PTR(sc, slot); 960 assert(dev != NULL); 961 962 dev->hci.hci_address = slot; 963 dev->dev_ctx = dev_ctx; 964 965 if (dev->dev_ue->ue_reset == NULL || 966 dev->dev_ue->ue_reset(dev->dev_sc) < 0) { 967 cmderr = XHCI_TRB_ERROR_ENDP_NOT_ON; 968 goto done; 969 } 970 971 memcpy(&dev_ctx->ctx_slot, islot_ctx, sizeof(struct xhci_slot_ctx)); 972 973 dev_ctx->ctx_slot.dwSctx3 = 974 XHCI_SCTX_3_SLOT_STATE_SET(XHCI_ST_SLCTX_ADDRESSED) | 975 XHCI_SCTX_3_DEV_ADDR_SET(slot); 976 977 memcpy(&dev_ctx->ctx_ep[1], ep0_ctx, sizeof(struct xhci_endp_ctx)); 978 ep0_ctx = &dev_ctx->ctx_ep[1]; 979 ep0_ctx->dwEpCtx0 = (ep0_ctx->dwEpCtx0 & ~0x7) | 980 XHCI_EPCTX_0_EPSTATE_SET(XHCI_ST_EPCTX_RUNNING); 981 982 pci_xhci_init_ep(dev, 1); 983 984 dev->dev_slotstate = XHCI_ST_ADDRESSED; 985 986 DPRINTF(("pci_xhci: address device, output ctx")); 987 DPRINTF((" slot %08x %08x %08x %08x", 988 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 989 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 990 DPRINTF((" ep0 %08x %08x %016lx %08x", 991 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, 992 ep0_ctx->dwEpCtx4)); 993 994 done: 995 return (cmderr); 996 } 997 998 static uint32_t 999 pci_xhci_cmd_config_ep(struct pci_xhci_softc *sc, uint32_t slot, 1000 struct xhci_trb *trb) 1001 { 1002 struct xhci_input_dev_ctx *input_ctx; 1003 struct pci_xhci_dev_emu *dev; 1004 struct xhci_dev_ctx *dev_ctx; 1005 struct xhci_endp_ctx *ep_ctx, *iep_ctx; 1006 uint32_t cmderr; 1007 int i; 1008 1009 cmderr = XHCI_TRB_ERROR_SUCCESS; 1010 1011 DPRINTF(("pci_xhci config_ep slot %u", slot)); 1012 1013 dev = XHCI_SLOTDEV_PTR(sc, slot); 1014 assert(dev != NULL); 1015 1016 if ((trb->dwTrb3 & XHCI_TRB_3_DCEP_BIT) != 0) { 1017 DPRINTF(("pci_xhci config_ep - deconfigure ep slot %u", 1018 slot)); 1019 if (dev->dev_ue->ue_stop != NULL) 1020 dev->dev_ue->ue_stop(dev->dev_sc); 1021 1022 dev->dev_slotstate = XHCI_ST_ADDRESSED; 1023 1024 dev->hci.hci_address = 0; 1025 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 1026 1027 /* number of contexts */ 1028 dev_ctx->ctx_slot.dwSctx0 = FIELD_REPLACE( 1029 dev_ctx->ctx_slot.dwSctx0, 1, 0x1F, 27); 1030 1031 /* slot state */ 1032 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE( 1033 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_ADDRESSED, 1034 0x1F, 27); 1035 1036 /* disable endpoints */ 1037 for (i = 2; i < 32; i++) 1038 pci_xhci_disable_ep(dev, i); 1039 1040 cmderr = XHCI_TRB_ERROR_SUCCESS; 1041 1042 goto done; 1043 } 1044 1045 if (dev->dev_slotstate < XHCI_ST_ADDRESSED) { 1046 DPRINTF(("pci_xhci: config_ep slotstate x%x != addressed", 1047 dev->dev_slotstate)); 1048 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 1049 goto done; 1050 } 1051 1052 /* In addressed/configured state; 1053 * for each drop endpoint ctx flag: 1054 * ep->state = DISABLED 1055 * for each add endpoint ctx flag: 1056 * cp(ep-in, ep-out) 1057 * ep->state = RUNNING 1058 * for each drop+add endpoint flag: 1059 * reset ep resources 1060 * cp(ep-in, ep-out) 1061 * ep->state = RUNNING 1062 * if input->DisabledCtx[2-31] < 30: (at least 1 ep not disabled) 1063 * slot->state = configured 1064 */ 1065 1066 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL); 1067 dev_ctx = dev->dev_ctx; 1068 DPRINTF(("pci_xhci: config_ep inputctx: D:x%08x A:x%08x 7:x%08x", 1069 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1, 1070 input_ctx->ctx_input.dwInCtx7)); 1071 1072 for (i = 2; i <= 31; i++) { 1073 ep_ctx = &dev_ctx->ctx_ep[i]; 1074 1075 if (input_ctx->ctx_input.dwInCtx0 & 1076 XHCI_INCTX_0_DROP_MASK(i)) { 1077 DPRINTF((" config ep - dropping ep %d", i)); 1078 pci_xhci_disable_ep(dev, i); 1079 } 1080 1081 if (input_ctx->ctx_input.dwInCtx1 & 1082 XHCI_INCTX_1_ADD_MASK(i)) { 1083 iep_ctx = &input_ctx->ctx_ep[i]; 1084 1085 DPRINTF((" enable ep[%d] %08x %08x %016lx %08x", 1086 i, iep_ctx->dwEpCtx0, iep_ctx->dwEpCtx1, 1087 iep_ctx->qwEpCtx2, iep_ctx->dwEpCtx4)); 1088 1089 memcpy(ep_ctx, iep_ctx, sizeof(struct xhci_endp_ctx)); 1090 1091 pci_xhci_init_ep(dev, i); 1092 1093 /* ep state */ 1094 ep_ctx->dwEpCtx0 = FIELD_REPLACE( 1095 ep_ctx->dwEpCtx0, XHCI_ST_EPCTX_RUNNING, 0x7, 0); 1096 } 1097 } 1098 1099 /* slot state to configured */ 1100 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE( 1101 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_CONFIGURED, 0x1F, 27); 1102 dev_ctx->ctx_slot.dwSctx0 = FIELD_COPY( 1103 dev_ctx->ctx_slot.dwSctx0, input_ctx->ctx_slot.dwSctx0, 0x1F, 27); 1104 dev->dev_slotstate = XHCI_ST_CONFIGURED; 1105 1106 DPRINTF(("EP configured; slot %u [0]=0x%08x [1]=0x%08x [2]=0x%08x " 1107 "[3]=0x%08x", 1108 slot, dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 1109 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 1110 1111 done: 1112 return (cmderr); 1113 } 1114 1115 static uint32_t 1116 pci_xhci_cmd_reset_ep(struct pci_xhci_softc *sc, uint32_t slot, 1117 struct xhci_trb *trb) 1118 { 1119 struct pci_xhci_dev_emu *dev; 1120 struct pci_xhci_dev_ep *devep; 1121 struct xhci_dev_ctx *dev_ctx; 1122 struct xhci_endp_ctx *ep_ctx; 1123 uint32_t cmderr, epid; 1124 uint32_t type; 1125 1126 epid = XHCI_TRB_3_EP_GET(trb->dwTrb3); 1127 1128 DPRINTF(("pci_xhci: reset ep %u: slot %u", epid, slot)); 1129 1130 cmderr = XHCI_TRB_ERROR_SUCCESS; 1131 1132 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3); 1133 1134 dev = XHCI_SLOTDEV_PTR(sc, slot); 1135 assert(dev != NULL); 1136 1137 if (type == XHCI_TRB_TYPE_STOP_EP && 1138 (trb->dwTrb3 & XHCI_TRB_3_SUSP_EP_BIT) != 0) { 1139 /* XXX suspend endpoint for 10ms */ 1140 } 1141 1142 if (epid < 1 || epid > 31) { 1143 DPRINTF(("pci_xhci: reset ep: invalid epid %u", epid)); 1144 cmderr = XHCI_TRB_ERROR_TRB; 1145 goto done; 1146 } 1147 1148 devep = &dev->eps[epid]; 1149 if (devep->ep_xfer != NULL) 1150 USB_DATA_XFER_RESET(devep->ep_xfer); 1151 1152 dev_ctx = dev->dev_ctx; 1153 assert(dev_ctx != NULL); 1154 1155 ep_ctx = &dev_ctx->ctx_ep[epid]; 1156 1157 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_STOPPED; 1158 1159 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) == 0) 1160 ep_ctx->qwEpCtx2 = devep->ep_ringaddr | devep->ep_ccs; 1161 1162 DPRINTF(("pci_xhci: reset ep[%u] %08x %08x %016lx %08x", 1163 epid, ep_ctx->dwEpCtx0, ep_ctx->dwEpCtx1, ep_ctx->qwEpCtx2, 1164 ep_ctx->dwEpCtx4)); 1165 1166 if (type == XHCI_TRB_TYPE_RESET_EP && 1167 (dev->dev_ue->ue_reset == NULL || 1168 dev->dev_ue->ue_reset(dev->dev_sc) < 0)) { 1169 cmderr = XHCI_TRB_ERROR_ENDP_NOT_ON; 1170 goto done; 1171 } 1172 1173 done: 1174 return (cmderr); 1175 } 1176 1177 1178 static uint32_t 1179 pci_xhci_find_stream(struct pci_xhci_softc *sc, struct xhci_endp_ctx *ep, 1180 uint32_t streamid, struct xhci_stream_ctx **osctx) 1181 { 1182 struct xhci_stream_ctx *sctx; 1183 uint32_t maxpstreams; 1184 1185 maxpstreams = XHCI_EPCTX_0_MAXP_STREAMS_GET(ep->dwEpCtx0); 1186 if (maxpstreams == 0) 1187 return (XHCI_TRB_ERROR_TRB); 1188 1189 if (maxpstreams > XHCI_STREAMS_MAX) 1190 return (XHCI_TRB_ERROR_INVALID_SID); 1191 1192 if (XHCI_EPCTX_0_LSA_GET(ep->dwEpCtx0) == 0) { 1193 DPRINTF(("pci_xhci: find_stream; LSA bit not set")); 1194 return (XHCI_TRB_ERROR_INVALID_SID); 1195 } 1196 1197 /* only support primary stream */ 1198 if (streamid > maxpstreams) 1199 return (XHCI_TRB_ERROR_STREAM_TYPE); 1200 1201 sctx = XHCI_GADDR(sc, ep->qwEpCtx2 & ~0xFUL) + streamid; 1202 if (!XHCI_SCTX_0_SCT_GET(sctx->qwSctx0)) 1203 return (XHCI_TRB_ERROR_STREAM_TYPE); 1204 1205 *osctx = sctx; 1206 1207 return (XHCI_TRB_ERROR_SUCCESS); 1208 } 1209 1210 1211 static uint32_t 1212 pci_xhci_cmd_set_tr(struct pci_xhci_softc *sc, uint32_t slot, 1213 struct xhci_trb *trb) 1214 { 1215 struct pci_xhci_dev_emu *dev; 1216 struct pci_xhci_dev_ep *devep; 1217 struct xhci_dev_ctx *dev_ctx; 1218 struct xhci_endp_ctx *ep_ctx; 1219 uint32_t cmderr, epid; 1220 uint32_t streamid; 1221 1222 cmderr = XHCI_TRB_ERROR_SUCCESS; 1223 1224 dev = XHCI_SLOTDEV_PTR(sc, slot); 1225 assert(dev != NULL); 1226 1227 DPRINTF(("pci_xhci set_tr: new-tr x%016lx, SCT %u DCS %u", 1228 (trb->qwTrb0 & ~0xF), (uint32_t)((trb->qwTrb0 >> 1) & 0x7), 1229 (uint32_t)(trb->qwTrb0 & 0x1))); 1230 DPRINTF((" stream-id %u, slot %u, epid %u, C %u", 1231 (trb->dwTrb2 >> 16) & 0xFFFF, 1232 XHCI_TRB_3_SLOT_GET(trb->dwTrb3), 1233 XHCI_TRB_3_EP_GET(trb->dwTrb3), trb->dwTrb3 & 0x1)); 1234 1235 epid = XHCI_TRB_3_EP_GET(trb->dwTrb3); 1236 if (epid < 1 || epid > 31) { 1237 DPRINTF(("pci_xhci: set_tr_deq: invalid epid %u", epid)); 1238 cmderr = XHCI_TRB_ERROR_TRB; 1239 goto done; 1240 } 1241 1242 dev_ctx = dev->dev_ctx; 1243 assert(dev_ctx != NULL); 1244 1245 ep_ctx = &dev_ctx->ctx_ep[epid]; 1246 devep = &dev->eps[epid]; 1247 1248 switch (XHCI_EPCTX_0_EPSTATE_GET(ep_ctx->dwEpCtx0)) { 1249 case XHCI_ST_EPCTX_STOPPED: 1250 case XHCI_ST_EPCTX_ERROR: 1251 break; 1252 default: 1253 DPRINTF(("pci_xhci cmd set_tr invalid state %x", 1254 XHCI_EPCTX_0_EPSTATE_GET(ep_ctx->dwEpCtx0))); 1255 cmderr = XHCI_TRB_ERROR_CONTEXT_STATE; 1256 goto done; 1257 } 1258 1259 streamid = XHCI_TRB_2_STREAM_GET(trb->dwTrb2); 1260 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) > 0) { 1261 struct xhci_stream_ctx *sctx; 1262 1263 sctx = NULL; 1264 cmderr = pci_xhci_find_stream(sc, ep_ctx, streamid, &sctx); 1265 if (sctx != NULL) { 1266 assert(devep->ep_sctx != NULL); 1267 1268 devep->ep_sctx[streamid].qwSctx0 = trb->qwTrb0; 1269 devep->ep_sctx_trbs[streamid].ringaddr = 1270 trb->qwTrb0 & ~0xF; 1271 devep->ep_sctx_trbs[streamid].ccs = 1272 XHCI_EPCTX_2_DCS_GET(trb->qwTrb0); 1273 } 1274 } else { 1275 if (streamid != 0) { 1276 DPRINTF(("pci_xhci cmd set_tr streamid %x != 0", 1277 streamid)); 1278 } 1279 ep_ctx->qwEpCtx2 = trb->qwTrb0 & ~0xFUL; 1280 devep->ep_ringaddr = ep_ctx->qwEpCtx2 & ~0xFUL; 1281 devep->ep_ccs = trb->qwTrb0 & 0x1; 1282 devep->ep_tr = XHCI_GADDR(sc, devep->ep_ringaddr); 1283 1284 DPRINTF(("pci_xhci set_tr first TRB:")); 1285 pci_xhci_dump_trb(devep->ep_tr); 1286 } 1287 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_STOPPED; 1288 1289 done: 1290 return (cmderr); 1291 } 1292 1293 static uint32_t 1294 pci_xhci_cmd_eval_ctx(struct pci_xhci_softc *sc, uint32_t slot, 1295 struct xhci_trb *trb) 1296 { 1297 struct xhci_input_dev_ctx *input_ctx; 1298 struct xhci_slot_ctx *islot_ctx; 1299 struct xhci_dev_ctx *dev_ctx; 1300 struct xhci_endp_ctx *ep0_ctx; 1301 uint32_t cmderr; 1302 1303 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL); 1304 islot_ctx = &input_ctx->ctx_slot; 1305 ep0_ctx = &input_ctx->ctx_ep[1]; 1306 1307 cmderr = XHCI_TRB_ERROR_SUCCESS; 1308 DPRINTF(("pci_xhci: eval ctx, input ctl: D 0x%08x A 0x%08x,", 1309 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1)); 1310 DPRINTF((" slot %08x %08x %08x %08x", 1311 islot_ctx->dwSctx0, islot_ctx->dwSctx1, 1312 islot_ctx->dwSctx2, islot_ctx->dwSctx3)); 1313 DPRINTF((" ep0 %08x %08x %016lx %08x", 1314 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, 1315 ep0_ctx->dwEpCtx4)); 1316 1317 /* this command expects drop-ctx=0 & add-ctx=slot+ep0 */ 1318 if ((input_ctx->ctx_input.dwInCtx0 != 0) || 1319 (input_ctx->ctx_input.dwInCtx1 & 0x03) == 0) { 1320 DPRINTF(("pci_xhci: eval ctx, input ctl invalid")); 1321 cmderr = XHCI_TRB_ERROR_TRB; 1322 goto done; 1323 } 1324 1325 /* assign address to slot; in this emulation, slot_id = address */ 1326 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 1327 1328 DPRINTF(("pci_xhci: eval ctx, dev ctx")); 1329 DPRINTF((" slot %08x %08x %08x %08x", 1330 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 1331 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 1332 1333 if (input_ctx->ctx_input.dwInCtx1 & 0x01) { /* slot ctx */ 1334 /* set max exit latency */ 1335 dev_ctx->ctx_slot.dwSctx1 = FIELD_COPY( 1336 dev_ctx->ctx_slot.dwSctx1, input_ctx->ctx_slot.dwSctx1, 1337 0xFFFF, 0); 1338 1339 /* set interrupter target */ 1340 dev_ctx->ctx_slot.dwSctx2 = FIELD_COPY( 1341 dev_ctx->ctx_slot.dwSctx2, input_ctx->ctx_slot.dwSctx2, 1342 0x3FF, 22); 1343 } 1344 if (input_ctx->ctx_input.dwInCtx1 & 0x02) { /* control ctx */ 1345 /* set max packet size */ 1346 dev_ctx->ctx_ep[1].dwEpCtx1 = FIELD_COPY( 1347 dev_ctx->ctx_ep[1].dwEpCtx1, ep0_ctx->dwEpCtx1, 1348 0xFFFF, 16); 1349 1350 ep0_ctx = &dev_ctx->ctx_ep[1]; 1351 } 1352 1353 DPRINTF(("pci_xhci: eval ctx, output ctx")); 1354 DPRINTF((" slot %08x %08x %08x %08x", 1355 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 1356 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 1357 DPRINTF((" ep0 %08x %08x %016lx %08x", 1358 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, 1359 ep0_ctx->dwEpCtx4)); 1360 1361 done: 1362 return (cmderr); 1363 } 1364 1365 static int 1366 pci_xhci_complete_commands(struct pci_xhci_softc *sc) 1367 { 1368 struct xhci_trb evtrb; 1369 struct xhci_trb *trb; 1370 uint64_t crcr; 1371 uint32_t ccs; /* cycle state (XHCI 4.9.2) */ 1372 uint32_t type; 1373 uint32_t slot; 1374 uint32_t cmderr; 1375 int error; 1376 1377 error = 0; 1378 sc->opregs.crcr |= XHCI_CRCR_LO_CRR; 1379 1380 trb = sc->opregs.cr_p; 1381 ccs = sc->opregs.crcr & XHCI_CRCR_LO_RCS; 1382 crcr = sc->opregs.crcr & ~0xF; 1383 1384 while (1) { 1385 sc->opregs.cr_p = trb; 1386 1387 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3); 1388 1389 if ((trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT) != 1390 (ccs & XHCI_TRB_3_CYCLE_BIT)) 1391 break; 1392 1393 DPRINTF(("pci_xhci: cmd type 0x%x, Trb0 x%016lx dwTrb2 x%08x" 1394 " dwTrb3 x%08x, TRB_CYCLE %u/ccs %u", 1395 type, trb->qwTrb0, trb->dwTrb2, trb->dwTrb3, 1396 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT, ccs)); 1397 1398 cmderr = XHCI_TRB_ERROR_SUCCESS; 1399 evtrb.dwTrb2 = 0; 1400 evtrb.dwTrb3 = (ccs & XHCI_TRB_3_CYCLE_BIT) | 1401 XHCI_TRB_3_TYPE_SET(XHCI_TRB_EVENT_CMD_COMPLETE); 1402 slot = 0; 1403 1404 switch (type) { 1405 case XHCI_TRB_TYPE_LINK: /* 0x06 */ 1406 if (trb->dwTrb3 & XHCI_TRB_3_TC_BIT) 1407 ccs ^= XHCI_CRCR_LO_RCS; 1408 break; 1409 1410 case XHCI_TRB_TYPE_ENABLE_SLOT: /* 0x09 */ 1411 cmderr = pci_xhci_cmd_enable_slot(sc, &slot); 1412 break; 1413 1414 case XHCI_TRB_TYPE_DISABLE_SLOT: /* 0x0A */ 1415 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1416 cmderr = pci_xhci_cmd_disable_slot(sc, slot); 1417 break; 1418 1419 case XHCI_TRB_TYPE_ADDRESS_DEVICE: /* 0x0B */ 1420 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1421 cmderr = pci_xhci_cmd_address_device(sc, slot, trb); 1422 break; 1423 1424 case XHCI_TRB_TYPE_CONFIGURE_EP: /* 0x0C */ 1425 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1426 cmderr = pci_xhci_cmd_config_ep(sc, slot, trb); 1427 break; 1428 1429 case XHCI_TRB_TYPE_EVALUATE_CTX: /* 0x0D */ 1430 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1431 cmderr = pci_xhci_cmd_eval_ctx(sc, slot, trb); 1432 break; 1433 1434 case XHCI_TRB_TYPE_RESET_EP: /* 0x0E */ 1435 DPRINTF(("Reset Endpoint on slot %d", slot)); 1436 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1437 cmderr = pci_xhci_cmd_reset_ep(sc, slot, trb); 1438 break; 1439 1440 case XHCI_TRB_TYPE_STOP_EP: /* 0x0F */ 1441 DPRINTF(("Stop Endpoint on slot %d", slot)); 1442 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1443 cmderr = pci_xhci_cmd_reset_ep(sc, slot, trb); 1444 break; 1445 1446 case XHCI_TRB_TYPE_SET_TR_DEQUEUE: /* 0x10 */ 1447 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1448 cmderr = pci_xhci_cmd_set_tr(sc, slot, trb); 1449 break; 1450 1451 case XHCI_TRB_TYPE_RESET_DEVICE: /* 0x11 */ 1452 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1453 cmderr = pci_xhci_cmd_reset_device(sc, slot); 1454 break; 1455 1456 case XHCI_TRB_TYPE_FORCE_EVENT: /* 0x12 */ 1457 /* TODO: */ 1458 break; 1459 1460 case XHCI_TRB_TYPE_NEGOTIATE_BW: /* 0x13 */ 1461 break; 1462 1463 case XHCI_TRB_TYPE_SET_LATENCY_TOL: /* 0x14 */ 1464 break; 1465 1466 case XHCI_TRB_TYPE_GET_PORT_BW: /* 0x15 */ 1467 break; 1468 1469 case XHCI_TRB_TYPE_FORCE_HEADER: /* 0x16 */ 1470 break; 1471 1472 case XHCI_TRB_TYPE_NOOP_CMD: /* 0x17 */ 1473 break; 1474 1475 default: 1476 DPRINTF(("pci_xhci: unsupported cmd %x", type)); 1477 break; 1478 } 1479 1480 if (type != XHCI_TRB_TYPE_LINK) { 1481 /* 1482 * insert command completion event and assert intr 1483 */ 1484 evtrb.qwTrb0 = crcr; 1485 evtrb.dwTrb2 |= XHCI_TRB_2_ERROR_SET(cmderr); 1486 evtrb.dwTrb3 |= XHCI_TRB_3_SLOT_SET(slot); 1487 DPRINTF(("pci_xhci: command 0x%x result: 0x%x", 1488 type, cmderr)); 1489 pci_xhci_insert_event(sc, &evtrb, 1); 1490 } 1491 1492 trb = pci_xhci_trb_next(sc, trb, &crcr); 1493 } 1494 1495 sc->opregs.crcr = crcr | (sc->opregs.crcr & XHCI_CRCR_LO_CA) | ccs; 1496 sc->opregs.crcr &= ~XHCI_CRCR_LO_CRR; 1497 return (error); 1498 } 1499 1500 static void 1501 pci_xhci_dump_trb(struct xhci_trb *trb) 1502 { 1503 static const char *trbtypes[] = { 1504 "RESERVED", 1505 "NORMAL", 1506 "SETUP_STAGE", 1507 "DATA_STAGE", 1508 "STATUS_STAGE", 1509 "ISOCH", 1510 "LINK", 1511 "EVENT_DATA", 1512 "NOOP", 1513 "ENABLE_SLOT", 1514 "DISABLE_SLOT", 1515 "ADDRESS_DEVICE", 1516 "CONFIGURE_EP", 1517 "EVALUATE_CTX", 1518 "RESET_EP", 1519 "STOP_EP", 1520 "SET_TR_DEQUEUE", 1521 "RESET_DEVICE", 1522 "FORCE_EVENT", 1523 "NEGOTIATE_BW", 1524 "SET_LATENCY_TOL", 1525 "GET_PORT_BW", 1526 "FORCE_HEADER", 1527 "NOOP_CMD" 1528 }; 1529 uint32_t type; 1530 1531 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3); 1532 DPRINTF(("pci_xhci: trb[@%p] type x%02x %s 0:x%016lx 2:x%08x 3:x%08x", 1533 trb, type, 1534 type <= XHCI_TRB_TYPE_NOOP_CMD ? trbtypes[type] : "INVALID", 1535 trb->qwTrb0, trb->dwTrb2, trb->dwTrb3)); 1536 } 1537 1538 static int 1539 pci_xhci_xfer_complete(struct pci_xhci_softc *sc, struct usb_data_xfer *xfer, 1540 uint32_t slot, uint32_t epid, int *do_intr) 1541 { 1542 struct pci_xhci_dev_emu *dev; 1543 struct pci_xhci_dev_ep *devep; 1544 struct xhci_dev_ctx *dev_ctx; 1545 struct xhci_endp_ctx *ep_ctx; 1546 struct xhci_trb *trb; 1547 struct xhci_trb evtrb; 1548 uint32_t trbflags; 1549 uint32_t edtla; 1550 int i, err; 1551 1552 dev = XHCI_SLOTDEV_PTR(sc, slot); 1553 devep = &dev->eps[epid]; 1554 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 1555 1556 assert(dev_ctx != NULL); 1557 1558 ep_ctx = &dev_ctx->ctx_ep[epid]; 1559 1560 err = XHCI_TRB_ERROR_SUCCESS; 1561 *do_intr = 0; 1562 edtla = 0; 1563 1564 /* go through list of TRBs and insert event(s) */ 1565 for (i = xfer->head; xfer->ndata > 0; ) { 1566 evtrb.qwTrb0 = (uint64_t)xfer->data[i].hci_data; 1567 trb = XHCI_GADDR(sc, evtrb.qwTrb0); 1568 trbflags = trb->dwTrb3; 1569 1570 DPRINTF(("pci_xhci: xfer[%d] done?%u:%d trb %x %016lx %x " 1571 "(err %d) IOC?%d", 1572 i, xfer->data[i].processed, xfer->data[i].blen, 1573 XHCI_TRB_3_TYPE_GET(trbflags), evtrb.qwTrb0, 1574 trbflags, err, 1575 trb->dwTrb3 & XHCI_TRB_3_IOC_BIT ? 1 : 0)); 1576 1577 if (!xfer->data[i].processed) { 1578 xfer->head = i; 1579 break; 1580 } 1581 1582 xfer->ndata--; 1583 edtla += xfer->data[i].bdone; 1584 1585 trb->dwTrb3 = (trb->dwTrb3 & ~0x1) | (xfer->data[i].ccs); 1586 1587 pci_xhci_update_ep_ring(sc, dev, devep, ep_ctx, 1588 xfer->data[i].streamid, xfer->data[i].trbnext, 1589 xfer->data[i].ccs); 1590 1591 /* Only interrupt if IOC or short packet */ 1592 if (!(trb->dwTrb3 & XHCI_TRB_3_IOC_BIT) && 1593 !((err == XHCI_TRB_ERROR_SHORT_PKT) && 1594 (trb->dwTrb3 & XHCI_TRB_3_ISP_BIT))) { 1595 1596 i = (i + 1) % USB_MAX_XFER_BLOCKS; 1597 continue; 1598 } 1599 1600 evtrb.dwTrb2 = XHCI_TRB_2_ERROR_SET(err) | 1601 XHCI_TRB_2_REM_SET(xfer->data[i].blen); 1602 1603 evtrb.dwTrb3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_EVENT_TRANSFER) | 1604 XHCI_TRB_3_SLOT_SET(slot) | XHCI_TRB_3_EP_SET(epid); 1605 1606 if (XHCI_TRB_3_TYPE_GET(trbflags) == XHCI_TRB_TYPE_EVENT_DATA) { 1607 DPRINTF(("pci_xhci EVENT_DATA edtla %u", edtla)); 1608 evtrb.qwTrb0 = trb->qwTrb0; 1609 evtrb.dwTrb2 = (edtla & 0xFFFFF) | 1610 XHCI_TRB_2_ERROR_SET(err); 1611 evtrb.dwTrb3 |= XHCI_TRB_3_ED_BIT; 1612 edtla = 0; 1613 } 1614 1615 *do_intr = 1; 1616 1617 err = pci_xhci_insert_event(sc, &evtrb, 0); 1618 if (err != XHCI_TRB_ERROR_SUCCESS) { 1619 break; 1620 } 1621 1622 i = (i + 1) % USB_MAX_XFER_BLOCKS; 1623 } 1624 1625 return (err); 1626 } 1627 1628 static void 1629 pci_xhci_update_ep_ring(struct pci_xhci_softc *sc, struct pci_xhci_dev_emu *dev, 1630 struct pci_xhci_dev_ep *devep, struct xhci_endp_ctx *ep_ctx, 1631 uint32_t streamid, uint64_t ringaddr, int ccs) 1632 { 1633 1634 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) != 0) { 1635 devep->ep_sctx[streamid].qwSctx0 = (ringaddr & ~0xFUL) | 1636 (ccs & 0x1); 1637 1638 devep->ep_sctx_trbs[streamid].ringaddr = ringaddr & ~0xFUL; 1639 devep->ep_sctx_trbs[streamid].ccs = ccs & 0x1; 1640 ep_ctx->qwEpCtx2 = (ep_ctx->qwEpCtx2 & ~0x1) | (ccs & 0x1); 1641 1642 DPRINTF(("xhci update ep-ring stream %d, addr %lx", 1643 streamid, devep->ep_sctx[streamid].qwSctx0)); 1644 } else { 1645 devep->ep_ringaddr = ringaddr & ~0xFUL; 1646 devep->ep_ccs = ccs & 0x1; 1647 devep->ep_tr = XHCI_GADDR(sc, ringaddr & ~0xFUL); 1648 ep_ctx->qwEpCtx2 = (ringaddr & ~0xFUL) | (ccs & 0x1); 1649 1650 DPRINTF(("xhci update ep-ring, addr %lx", 1651 (devep->ep_ringaddr | devep->ep_ccs))); 1652 } 1653 } 1654 1655 /* 1656 * Outstanding transfer still in progress (device NAK'd earlier) so retry 1657 * the transfer again to see if it succeeds. 1658 */ 1659 static int 1660 pci_xhci_try_usb_xfer(struct pci_xhci_softc *sc, 1661 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep, 1662 struct xhci_endp_ctx *ep_ctx, uint32_t slot, uint32_t epid) 1663 { 1664 struct usb_data_xfer *xfer; 1665 int err; 1666 int do_intr; 1667 1668 ep_ctx->dwEpCtx0 = FIELD_REPLACE( 1669 ep_ctx->dwEpCtx0, XHCI_ST_EPCTX_RUNNING, 0x7, 0); 1670 1671 err = 0; 1672 do_intr = 0; 1673 1674 xfer = devep->ep_xfer; 1675 USB_DATA_XFER_LOCK(xfer); 1676 1677 /* outstanding requests queued up */ 1678 if (dev->dev_ue->ue_data != NULL) { 1679 err = dev->dev_ue->ue_data(dev->dev_sc, xfer, 1680 epid & 0x1 ? USB_XFER_IN : USB_XFER_OUT, epid/2); 1681 if (err == USB_ERR_CANCELLED) { 1682 if (USB_DATA_GET_ERRCODE(&xfer->data[xfer->head]) == 1683 USB_NAK) 1684 err = XHCI_TRB_ERROR_SUCCESS; 1685 } else { 1686 err = pci_xhci_xfer_complete(sc, xfer, slot, epid, 1687 &do_intr); 1688 if (err == XHCI_TRB_ERROR_SUCCESS && do_intr) { 1689 pci_xhci_assert_interrupt(sc); 1690 } 1691 1692 1693 /* XXX should not do it if error? */ 1694 USB_DATA_XFER_RESET(xfer); 1695 } 1696 } 1697 1698 USB_DATA_XFER_UNLOCK(xfer); 1699 1700 1701 return (err); 1702 } 1703 1704 1705 static int 1706 pci_xhci_handle_transfer(struct pci_xhci_softc *sc, 1707 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep, 1708 struct xhci_endp_ctx *ep_ctx, struct xhci_trb *trb, uint32_t slot, 1709 uint32_t epid, uint64_t addr, uint32_t ccs, uint32_t streamid) 1710 { 1711 struct xhci_trb *setup_trb; 1712 struct usb_data_xfer *xfer; 1713 struct usb_data_xfer_block *xfer_block; 1714 uint64_t val; 1715 uint32_t trbflags; 1716 int do_intr, err; 1717 int do_retry; 1718 1719 ep_ctx->dwEpCtx0 = FIELD_REPLACE(ep_ctx->dwEpCtx0, 1720 XHCI_ST_EPCTX_RUNNING, 0x7, 0); 1721 1722 xfer = devep->ep_xfer; 1723 USB_DATA_XFER_LOCK(xfer); 1724 1725 DPRINTF(("pci_xhci handle_transfer slot %u", slot)); 1726 1727 retry: 1728 err = 0; 1729 do_retry = 0; 1730 do_intr = 0; 1731 setup_trb = NULL; 1732 1733 while (1) { 1734 pci_xhci_dump_trb(trb); 1735 1736 trbflags = trb->dwTrb3; 1737 1738 if (XHCI_TRB_3_TYPE_GET(trbflags) != XHCI_TRB_TYPE_LINK && 1739 (trbflags & XHCI_TRB_3_CYCLE_BIT) != 1740 (ccs & XHCI_TRB_3_CYCLE_BIT)) { 1741 DPRINTF(("Cycle-bit changed trbflags %x, ccs %x", 1742 trbflags & XHCI_TRB_3_CYCLE_BIT, ccs)); 1743 break; 1744 } 1745 1746 xfer_block = NULL; 1747 1748 switch (XHCI_TRB_3_TYPE_GET(trbflags)) { 1749 case XHCI_TRB_TYPE_LINK: 1750 if (trb->dwTrb3 & XHCI_TRB_3_TC_BIT) 1751 ccs ^= 0x1; 1752 1753 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1754 (void *)addr, ccs); 1755 xfer_block->processed = 1; 1756 break; 1757 1758 case XHCI_TRB_TYPE_SETUP_STAGE: 1759 if ((trbflags & XHCI_TRB_3_IDT_BIT) == 0 || 1760 XHCI_TRB_2_BYTES_GET(trb->dwTrb2) != 8) { 1761 DPRINTF(("pci_xhci: invalid setup trb")); 1762 err = XHCI_TRB_ERROR_TRB; 1763 goto errout; 1764 } 1765 setup_trb = trb; 1766 1767 val = trb->qwTrb0; 1768 if (!xfer->ureq) 1769 xfer->ureq = malloc( 1770 sizeof(struct usb_device_request)); 1771 memcpy(xfer->ureq, &val, 1772 sizeof(struct usb_device_request)); 1773 1774 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1775 (void *)addr, ccs); 1776 xfer_block->processed = 1; 1777 break; 1778 1779 case XHCI_TRB_TYPE_NORMAL: 1780 case XHCI_TRB_TYPE_ISOCH: 1781 if (setup_trb != NULL) { 1782 DPRINTF(("pci_xhci: trb not supposed to be in " 1783 "ctl scope")); 1784 err = XHCI_TRB_ERROR_TRB; 1785 goto errout; 1786 } 1787 /* fall through */ 1788 1789 case XHCI_TRB_TYPE_DATA_STAGE: 1790 xfer_block = usb_data_xfer_append(xfer, 1791 (void *)(trbflags & XHCI_TRB_3_IDT_BIT ? 1792 &trb->qwTrb0 : XHCI_GADDR(sc, trb->qwTrb0)), 1793 trb->dwTrb2 & 0x1FFFF, (void *)addr, ccs); 1794 break; 1795 1796 case XHCI_TRB_TYPE_STATUS_STAGE: 1797 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1798 (void *)addr, ccs); 1799 break; 1800 1801 case XHCI_TRB_TYPE_NOOP: 1802 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1803 (void *)addr, ccs); 1804 xfer_block->processed = 1; 1805 break; 1806 1807 case XHCI_TRB_TYPE_EVENT_DATA: 1808 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1809 (void *)addr, ccs); 1810 if ((epid > 1) && (trbflags & XHCI_TRB_3_IOC_BIT)) { 1811 xfer_block->processed = 1; 1812 } 1813 break; 1814 1815 default: 1816 DPRINTF(("pci_xhci: handle xfer unexpected trb type " 1817 "0x%x", 1818 XHCI_TRB_3_TYPE_GET(trbflags))); 1819 err = XHCI_TRB_ERROR_TRB; 1820 goto errout; 1821 } 1822 1823 trb = pci_xhci_trb_next(sc, trb, &addr); 1824 1825 DPRINTF(("pci_xhci: next trb: 0x%lx", (uint64_t)trb)); 1826 1827 if (xfer_block) { 1828 xfer_block->trbnext = addr; 1829 xfer_block->streamid = streamid; 1830 } 1831 1832 if (!setup_trb && !(trbflags & XHCI_TRB_3_CHAIN_BIT) && 1833 XHCI_TRB_3_TYPE_GET(trbflags) != XHCI_TRB_TYPE_LINK) { 1834 break; 1835 } 1836 1837 /* handle current batch that requires interrupt on complete */ 1838 if (trbflags & XHCI_TRB_3_IOC_BIT) { 1839 DPRINTF(("pci_xhci: trb IOC bit set")); 1840 if (epid == 1) 1841 do_retry = 1; 1842 break; 1843 } 1844 } 1845 1846 DPRINTF(("pci_xhci[%d]: xfer->ndata %u", __LINE__, xfer->ndata)); 1847 1848 if (xfer->ndata <= 0) 1849 goto errout; 1850 1851 if (epid == 1) { 1852 err = USB_ERR_NOT_STARTED; 1853 if (dev->dev_ue->ue_request != NULL) 1854 err = dev->dev_ue->ue_request(dev->dev_sc, xfer); 1855 setup_trb = NULL; 1856 } else { 1857 /* handle data transfer */ 1858 pci_xhci_try_usb_xfer(sc, dev, devep, ep_ctx, slot, epid); 1859 err = XHCI_TRB_ERROR_SUCCESS; 1860 goto errout; 1861 } 1862 1863 err = USB_TO_XHCI_ERR(err); 1864 if ((err == XHCI_TRB_ERROR_SUCCESS) || 1865 (err == XHCI_TRB_ERROR_STALL) || 1866 (err == XHCI_TRB_ERROR_SHORT_PKT)) { 1867 err = pci_xhci_xfer_complete(sc, xfer, slot, epid, &do_intr); 1868 if (err != XHCI_TRB_ERROR_SUCCESS) 1869 do_retry = 0; 1870 } 1871 1872 errout: 1873 if (err == XHCI_TRB_ERROR_EV_RING_FULL) 1874 DPRINTF(("pci_xhci[%d]: event ring full", __LINE__)); 1875 1876 if (!do_retry) 1877 USB_DATA_XFER_UNLOCK(xfer); 1878 1879 if (do_intr) 1880 pci_xhci_assert_interrupt(sc); 1881 1882 if (do_retry) { 1883 USB_DATA_XFER_RESET(xfer); 1884 DPRINTF(("pci_xhci[%d]: retry:continuing with next TRBs", 1885 __LINE__)); 1886 goto retry; 1887 } 1888 1889 if (epid == 1) 1890 USB_DATA_XFER_RESET(xfer); 1891 1892 return (err); 1893 } 1894 1895 static void 1896 pci_xhci_device_doorbell(struct pci_xhci_softc *sc, uint32_t slot, 1897 uint32_t epid, uint32_t streamid) 1898 { 1899 struct pci_xhci_dev_emu *dev; 1900 struct pci_xhci_dev_ep *devep; 1901 struct xhci_dev_ctx *dev_ctx; 1902 struct xhci_endp_ctx *ep_ctx; 1903 struct pci_xhci_trb_ring *sctx_tr; 1904 struct xhci_trb *trb; 1905 uint64_t ringaddr; 1906 uint32_t ccs; 1907 1908 DPRINTF(("pci_xhci doorbell slot %u epid %u stream %u", 1909 slot, epid, streamid)); 1910 1911 if (slot == 0 || slot > XHCI_MAX_SLOTS) { 1912 DPRINTF(("pci_xhci: invalid doorbell slot %u", slot)); 1913 return; 1914 } 1915 1916 if (epid == 0 || epid >= XHCI_MAX_ENDPOINTS) { 1917 DPRINTF(("pci_xhci: invalid endpoint %u", epid)); 1918 return; 1919 } 1920 1921 dev = XHCI_SLOTDEV_PTR(sc, slot); 1922 devep = &dev->eps[epid]; 1923 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 1924 if (!dev_ctx) { 1925 return; 1926 } 1927 ep_ctx = &dev_ctx->ctx_ep[epid]; 1928 1929 sctx_tr = NULL; 1930 1931 DPRINTF(("pci_xhci: device doorbell ep[%u] %08x %08x %016lx %08x", 1932 epid, ep_ctx->dwEpCtx0, ep_ctx->dwEpCtx1, ep_ctx->qwEpCtx2, 1933 ep_ctx->dwEpCtx4)); 1934 1935 if (ep_ctx->qwEpCtx2 == 0) 1936 return; 1937 1938 /* handle pending transfers */ 1939 if (devep->ep_xfer->ndata > 0) { 1940 pci_xhci_try_usb_xfer(sc, dev, devep, ep_ctx, slot, epid); 1941 return; 1942 } 1943 1944 /* get next trb work item */ 1945 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) != 0) { 1946 struct xhci_stream_ctx *sctx; 1947 1948 /* 1949 * Stream IDs of 0, 65535 (any stream), and 65534 1950 * (prime) are invalid. 1951 */ 1952 if (streamid == 0 || streamid == 65534 || streamid == 65535) { 1953 DPRINTF(("pci_xhci: invalid stream %u", streamid)); 1954 return; 1955 } 1956 1957 sctx = NULL; 1958 pci_xhci_find_stream(sc, ep_ctx, streamid, &sctx); 1959 if (sctx == NULL) { 1960 DPRINTF(("pci_xhci: invalid stream %u", streamid)); 1961 return; 1962 } 1963 sctx_tr = &devep->ep_sctx_trbs[streamid]; 1964 ringaddr = sctx_tr->ringaddr; 1965 ccs = sctx_tr->ccs; 1966 trb = XHCI_GADDR(sc, sctx_tr->ringaddr & ~0xFUL); 1967 DPRINTF(("doorbell, stream %u, ccs %lx, trb ccs %x", 1968 streamid, ep_ctx->qwEpCtx2 & XHCI_TRB_3_CYCLE_BIT, 1969 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT)); 1970 } else { 1971 if (streamid != 0) { 1972 DPRINTF(("pci_xhci: invalid stream %u", streamid)); 1973 return; 1974 } 1975 ringaddr = devep->ep_ringaddr; 1976 ccs = devep->ep_ccs; 1977 trb = devep->ep_tr; 1978 DPRINTF(("doorbell, ccs %lx, trb ccs %x", 1979 ep_ctx->qwEpCtx2 & XHCI_TRB_3_CYCLE_BIT, 1980 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT)); 1981 } 1982 1983 if (XHCI_TRB_3_TYPE_GET(trb->dwTrb3) == 0) { 1984 DPRINTF(("pci_xhci: ring %lx trb[%lx] EP %u is RESERVED?", 1985 ep_ctx->qwEpCtx2, devep->ep_ringaddr, epid)); 1986 return; 1987 } 1988 1989 pci_xhci_handle_transfer(sc, dev, devep, ep_ctx, trb, slot, epid, 1990 ringaddr, ccs, streamid); 1991 } 1992 1993 static void 1994 pci_xhci_dbregs_write(struct pci_xhci_softc *sc, uint64_t offset, 1995 uint64_t value) 1996 { 1997 1998 offset = (offset - sc->dboff) / sizeof(uint32_t); 1999 2000 DPRINTF(("pci_xhci: doorbell write offset 0x%lx: 0x%lx", 2001 offset, value)); 2002 2003 if (XHCI_HALTED(sc)) { 2004 DPRINTF(("pci_xhci: controller halted")); 2005 return; 2006 } 2007 2008 if (offset == 0) 2009 pci_xhci_complete_commands(sc); 2010 else if (sc->portregs != NULL) 2011 pci_xhci_device_doorbell(sc, offset, 2012 XHCI_DB_TARGET_GET(value), XHCI_DB_SID_GET(value)); 2013 } 2014 2015 static void 2016 pci_xhci_rtsregs_write(struct pci_xhci_softc *sc, uint64_t offset, 2017 uint64_t value) 2018 { 2019 struct pci_xhci_rtsregs *rts; 2020 2021 offset -= sc->rtsoff; 2022 2023 if (offset == 0) { 2024 DPRINTF(("pci_xhci attempted write to MFINDEX")); 2025 return; 2026 } 2027 2028 DPRINTF(("pci_xhci: runtime regs write offset 0x%lx: 0x%lx", 2029 offset, value)); 2030 2031 offset -= 0x20; /* start of intrreg */ 2032 2033 rts = &sc->rtsregs; 2034 2035 switch (offset) { 2036 case 0x00: 2037 if (value & XHCI_IMAN_INTR_PEND) 2038 rts->intrreg.iman &= ~XHCI_IMAN_INTR_PEND; 2039 rts->intrreg.iman = (value & XHCI_IMAN_INTR_ENA) | 2040 (rts->intrreg.iman & XHCI_IMAN_INTR_PEND); 2041 2042 if (!(value & XHCI_IMAN_INTR_ENA)) 2043 pci_xhci_deassert_interrupt(sc); 2044 2045 break; 2046 2047 case 0x04: 2048 rts->intrreg.imod = value; 2049 break; 2050 2051 case 0x08: 2052 rts->intrreg.erstsz = value & 0xFFFF; 2053 break; 2054 2055 case 0x10: 2056 /* ERSTBA low bits */ 2057 rts->intrreg.erstba = MASK_64_HI(sc->rtsregs.intrreg.erstba) | 2058 (value & ~0x3F); 2059 break; 2060 2061 case 0x14: 2062 /* ERSTBA high bits */ 2063 rts->intrreg.erstba = (value << 32) | 2064 MASK_64_LO(sc->rtsregs.intrreg.erstba); 2065 2066 rts->erstba_p = XHCI_GADDR(sc, 2067 sc->rtsregs.intrreg.erstba & ~0x3FUL); 2068 2069 rts->erst_p = XHCI_GADDR(sc, 2070 sc->rtsregs.erstba_p->qwEvrsTablePtr & ~0x3FUL); 2071 2072 rts->er_enq_idx = 0; 2073 rts->er_events_cnt = 0; 2074 2075 DPRINTF(("pci_xhci: wr erstba erst (%p) ptr 0x%lx, sz %u", 2076 rts->erstba_p, 2077 rts->erstba_p->qwEvrsTablePtr, 2078 rts->erstba_p->dwEvrsTableSize)); 2079 break; 2080 2081 case 0x18: 2082 /* ERDP low bits */ 2083 rts->intrreg.erdp = 2084 MASK_64_HI(sc->rtsregs.intrreg.erdp) | 2085 (rts->intrreg.erdp & XHCI_ERDP_LO_BUSY) | 2086 (value & ~0xF); 2087 if (value & XHCI_ERDP_LO_BUSY) { 2088 rts->intrreg.erdp &= ~XHCI_ERDP_LO_BUSY; 2089 rts->intrreg.iman &= ~XHCI_IMAN_INTR_PEND; 2090 } 2091 2092 rts->er_deq_seg = XHCI_ERDP_LO_SINDEX(value); 2093 2094 break; 2095 2096 case 0x1C: 2097 /* ERDP high bits */ 2098 rts->intrreg.erdp = (value << 32) | 2099 MASK_64_LO(sc->rtsregs.intrreg.erdp); 2100 2101 if (rts->er_events_cnt > 0) { 2102 uint64_t erdp; 2103 uint32_t erdp_i; 2104 2105 erdp = rts->intrreg.erdp & ~0xF; 2106 erdp_i = (erdp - rts->erstba_p->qwEvrsTablePtr) / 2107 sizeof(struct xhci_trb); 2108 2109 if (erdp_i <= rts->er_enq_idx) 2110 rts->er_events_cnt = rts->er_enq_idx - erdp_i; 2111 else 2112 rts->er_events_cnt = 2113 rts->erstba_p->dwEvrsTableSize - 2114 (erdp_i - rts->er_enq_idx); 2115 2116 DPRINTF(("pci_xhci: erdp 0x%lx, events cnt %u", 2117 erdp, rts->er_events_cnt)); 2118 } 2119 2120 break; 2121 2122 default: 2123 DPRINTF(("pci_xhci attempted write to RTS offset 0x%lx", 2124 offset)); 2125 break; 2126 } 2127 } 2128 2129 static uint64_t 2130 pci_xhci_portregs_read(struct pci_xhci_softc *sc, uint64_t offset) 2131 { 2132 int port; 2133 uint32_t *p; 2134 2135 if (sc->portregs == NULL) 2136 return (0); 2137 2138 port = (offset - 0x3F0) / 0x10; 2139 2140 if (port > XHCI_MAX_DEVS) { 2141 DPRINTF(("pci_xhci: portregs_read port %d >= XHCI_MAX_DEVS", 2142 port)); 2143 2144 /* return default value for unused port */ 2145 return (XHCI_PS_SPEED_SET(3)); 2146 } 2147 2148 offset = (offset - 0x3F0) % 0x10; 2149 2150 p = &sc->portregs[port].portsc; 2151 p += offset / sizeof(uint32_t); 2152 2153 DPRINTF(("pci_xhci: portregs read offset 0x%lx port %u -> 0x%x", 2154 offset, port, *p)); 2155 2156 return (*p); 2157 } 2158 2159 static void 2160 pci_xhci_hostop_write(struct pci_xhci_softc *sc, uint64_t offset, 2161 uint64_t value) 2162 { 2163 offset -= XHCI_CAPLEN; 2164 2165 if (offset < 0x400) 2166 DPRINTF(("pci_xhci: hostop write offset 0x%lx: 0x%lx", 2167 offset, value)); 2168 2169 switch (offset) { 2170 case XHCI_USBCMD: 2171 sc->opregs.usbcmd = pci_xhci_usbcmd_write(sc, value & 0x3F0F); 2172 break; 2173 2174 case XHCI_USBSTS: 2175 /* clear bits on write */ 2176 sc->opregs.usbsts &= ~(value & 2177 (XHCI_STS_HSE|XHCI_STS_EINT|XHCI_STS_PCD|XHCI_STS_SSS| 2178 XHCI_STS_RSS|XHCI_STS_SRE|XHCI_STS_CNR)); 2179 break; 2180 2181 case XHCI_PAGESIZE: 2182 /* read only */ 2183 break; 2184 2185 case XHCI_DNCTRL: 2186 sc->opregs.dnctrl = value & 0xFFFF; 2187 break; 2188 2189 case XHCI_CRCR_LO: 2190 if (sc->opregs.crcr & XHCI_CRCR_LO_CRR) { 2191 sc->opregs.crcr &= ~(XHCI_CRCR_LO_CS|XHCI_CRCR_LO_CA); 2192 sc->opregs.crcr |= value & 2193 (XHCI_CRCR_LO_CS|XHCI_CRCR_LO_CA); 2194 } else { 2195 sc->opregs.crcr = MASK_64_HI(sc->opregs.crcr) | 2196 (value & (0xFFFFFFC0 | XHCI_CRCR_LO_RCS)); 2197 } 2198 break; 2199 2200 case XHCI_CRCR_HI: 2201 if (!(sc->opregs.crcr & XHCI_CRCR_LO_CRR)) { 2202 sc->opregs.crcr = MASK_64_LO(sc->opregs.crcr) | 2203 (value << 32); 2204 2205 sc->opregs.cr_p = XHCI_GADDR(sc, 2206 sc->opregs.crcr & ~0xF); 2207 } 2208 2209 if (sc->opregs.crcr & XHCI_CRCR_LO_CS) { 2210 /* Stop operation of Command Ring */ 2211 } 2212 2213 if (sc->opregs.crcr & XHCI_CRCR_LO_CA) { 2214 /* Abort command */ 2215 } 2216 2217 break; 2218 2219 case XHCI_DCBAAP_LO: 2220 sc->opregs.dcbaap = MASK_64_HI(sc->opregs.dcbaap) | 2221 (value & 0xFFFFFFC0); 2222 break; 2223 2224 case XHCI_DCBAAP_HI: 2225 sc->opregs.dcbaap = MASK_64_LO(sc->opregs.dcbaap) | 2226 (value << 32); 2227 sc->opregs.dcbaa_p = XHCI_GADDR(sc, sc->opregs.dcbaap & ~0x3FUL); 2228 2229 DPRINTF(("pci_xhci: opregs dcbaap = 0x%lx (vaddr 0x%lx)", 2230 sc->opregs.dcbaap, (uint64_t)sc->opregs.dcbaa_p)); 2231 break; 2232 2233 case XHCI_CONFIG: 2234 sc->opregs.config = value & 0x03FF; 2235 break; 2236 2237 default: 2238 if (offset >= 0x400) 2239 pci_xhci_portregs_write(sc, offset, value); 2240 2241 break; 2242 } 2243 } 2244 2245 2246 static void 2247 pci_xhci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, 2248 int baridx, uint64_t offset, int size, uint64_t value) 2249 { 2250 struct pci_xhci_softc *sc; 2251 2252 sc = pi->pi_arg; 2253 2254 assert(baridx == 0); 2255 2256 pthread_mutex_lock(&sc->mtx); 2257 if (offset < XHCI_CAPLEN) /* read only registers */ 2258 WPRINTF(("pci_xhci: write RO-CAPs offset %ld", offset)); 2259 else if (offset < sc->dboff) 2260 pci_xhci_hostop_write(sc, offset, value); 2261 else if (offset < sc->rtsoff) 2262 pci_xhci_dbregs_write(sc, offset, value); 2263 else if (offset < sc->regsend) 2264 pci_xhci_rtsregs_write(sc, offset, value); 2265 else 2266 WPRINTF(("pci_xhci: write invalid offset %ld", offset)); 2267 2268 pthread_mutex_unlock(&sc->mtx); 2269 } 2270 2271 static uint64_t 2272 pci_xhci_hostcap_read(struct pci_xhci_softc *sc, uint64_t offset) 2273 { 2274 uint64_t value; 2275 2276 switch (offset) { 2277 case XHCI_CAPLENGTH: /* 0x00 */ 2278 value = sc->caplength; 2279 break; 2280 2281 case XHCI_HCSPARAMS1: /* 0x04 */ 2282 value = sc->hcsparams1; 2283 break; 2284 2285 case XHCI_HCSPARAMS2: /* 0x08 */ 2286 value = sc->hcsparams2; 2287 break; 2288 2289 case XHCI_HCSPARAMS3: /* 0x0C */ 2290 value = sc->hcsparams3; 2291 break; 2292 2293 case XHCI_HCSPARAMS0: /* 0x10 */ 2294 value = sc->hccparams1; 2295 break; 2296 2297 case XHCI_DBOFF: /* 0x14 */ 2298 value = sc->dboff; 2299 break; 2300 2301 case XHCI_RTSOFF: /* 0x18 */ 2302 value = sc->rtsoff; 2303 break; 2304 2305 case XHCI_HCCPRAMS2: /* 0x1C */ 2306 value = sc->hccparams2; 2307 break; 2308 2309 default: 2310 value = 0; 2311 break; 2312 } 2313 2314 DPRINTF(("pci_xhci: hostcap read offset 0x%lx -> 0x%lx", 2315 offset, value)); 2316 2317 return (value); 2318 } 2319 2320 static uint64_t 2321 pci_xhci_hostop_read(struct pci_xhci_softc *sc, uint64_t offset) 2322 { 2323 uint64_t value; 2324 2325 offset = (offset - XHCI_CAPLEN); 2326 2327 switch (offset) { 2328 case XHCI_USBCMD: /* 0x00 */ 2329 value = sc->opregs.usbcmd; 2330 break; 2331 2332 case XHCI_USBSTS: /* 0x04 */ 2333 value = sc->opregs.usbsts; 2334 break; 2335 2336 case XHCI_PAGESIZE: /* 0x08 */ 2337 value = sc->opregs.pgsz; 2338 break; 2339 2340 case XHCI_DNCTRL: /* 0x14 */ 2341 value = sc->opregs.dnctrl; 2342 break; 2343 2344 case XHCI_CRCR_LO: /* 0x18 */ 2345 value = sc->opregs.crcr & XHCI_CRCR_LO_CRR; 2346 break; 2347 2348 case XHCI_CRCR_HI: /* 0x1C */ 2349 value = 0; 2350 break; 2351 2352 case XHCI_DCBAAP_LO: /* 0x30 */ 2353 value = sc->opregs.dcbaap & 0xFFFFFFFF; 2354 break; 2355 2356 case XHCI_DCBAAP_HI: /* 0x34 */ 2357 value = (sc->opregs.dcbaap >> 32) & 0xFFFFFFFF; 2358 break; 2359 2360 case XHCI_CONFIG: /* 0x38 */ 2361 value = sc->opregs.config; 2362 break; 2363 2364 default: 2365 if (offset >= 0x400) 2366 value = pci_xhci_portregs_read(sc, offset); 2367 else 2368 value = 0; 2369 2370 break; 2371 } 2372 2373 if (offset < 0x400) 2374 DPRINTF(("pci_xhci: hostop read offset 0x%lx -> 0x%lx", 2375 offset, value)); 2376 2377 return (value); 2378 } 2379 2380 static uint64_t 2381 pci_xhci_dbregs_read(struct pci_xhci_softc *sc, uint64_t offset) 2382 { 2383 2384 /* read doorbell always returns 0 */ 2385 return (0); 2386 } 2387 2388 static uint64_t 2389 pci_xhci_rtsregs_read(struct pci_xhci_softc *sc, uint64_t offset) 2390 { 2391 uint32_t value; 2392 2393 offset -= sc->rtsoff; 2394 value = 0; 2395 2396 if (offset == XHCI_MFINDEX) { 2397 value = sc->rtsregs.mfindex; 2398 } else if (offset >= 0x20) { 2399 int item; 2400 uint32_t *p; 2401 2402 offset -= 0x20; 2403 item = offset % 32; 2404 2405 assert(offset < sizeof(sc->rtsregs.intrreg)); 2406 2407 p = &sc->rtsregs.intrreg.iman; 2408 p += item / sizeof(uint32_t); 2409 value = *p; 2410 } 2411 2412 DPRINTF(("pci_xhci: rtsregs read offset 0x%lx -> 0x%x", 2413 offset, value)); 2414 2415 return (value); 2416 } 2417 2418 static uint64_t 2419 pci_xhci_xecp_read(struct pci_xhci_softc *sc, uint64_t offset) 2420 { 2421 uint32_t value; 2422 2423 offset -= sc->regsend; 2424 value = 0; 2425 2426 switch (offset) { 2427 case 0: 2428 /* rev major | rev minor | next-cap | cap-id */ 2429 value = (0x02 << 24) | (4 << 8) | XHCI_ID_PROTOCOLS; 2430 break; 2431 case 4: 2432 /* name string = "USB" */ 2433 value = 0x20425355; 2434 break; 2435 case 8: 2436 /* psic | proto-defined | compat # | compat offset */ 2437 value = ((XHCI_MAX_DEVS/2) << 8) | sc->usb2_port_start; 2438 break; 2439 case 12: 2440 break; 2441 case 16: 2442 /* rev major | rev minor | next-cap | cap-id */ 2443 value = (0x03 << 24) | XHCI_ID_PROTOCOLS; 2444 break; 2445 case 20: 2446 /* name string = "USB" */ 2447 value = 0x20425355; 2448 break; 2449 case 24: 2450 /* psic | proto-defined | compat # | compat offset */ 2451 value = ((XHCI_MAX_DEVS/2) << 8) | sc->usb3_port_start; 2452 break; 2453 case 28: 2454 break; 2455 default: 2456 DPRINTF(("pci_xhci: xecp invalid offset 0x%lx", offset)); 2457 break; 2458 } 2459 2460 DPRINTF(("pci_xhci: xecp read offset 0x%lx -> 0x%x", 2461 offset, value)); 2462 2463 return (value); 2464 } 2465 2466 2467 static uint64_t 2468 pci_xhci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx, 2469 uint64_t offset, int size) 2470 { 2471 struct pci_xhci_softc *sc; 2472 uint32_t value; 2473 2474 sc = pi->pi_arg; 2475 2476 assert(baridx == 0); 2477 2478 pthread_mutex_lock(&sc->mtx); 2479 if (offset < XHCI_CAPLEN) 2480 value = pci_xhci_hostcap_read(sc, offset); 2481 else if (offset < sc->dboff) 2482 value = pci_xhci_hostop_read(sc, offset); 2483 else if (offset < sc->rtsoff) 2484 value = pci_xhci_dbregs_read(sc, offset); 2485 else if (offset < sc->regsend) 2486 value = pci_xhci_rtsregs_read(sc, offset); 2487 else if (offset < (sc->regsend + 4*32)) 2488 value = pci_xhci_xecp_read(sc, offset); 2489 else { 2490 value = 0; 2491 WPRINTF(("pci_xhci: read invalid offset %ld", offset)); 2492 } 2493 2494 pthread_mutex_unlock(&sc->mtx); 2495 2496 switch (size) { 2497 case 1: 2498 value &= 0xFF; 2499 break; 2500 case 2: 2501 value &= 0xFFFF; 2502 break; 2503 case 4: 2504 value &= 0xFFFFFFFF; 2505 break; 2506 } 2507 2508 return (value); 2509 } 2510 2511 static void 2512 pci_xhci_reset_port(struct pci_xhci_softc *sc, int portn, int warm) 2513 { 2514 struct pci_xhci_portregs *port; 2515 struct pci_xhci_dev_emu *dev; 2516 struct xhci_trb evtrb; 2517 int error; 2518 2519 assert(portn <= XHCI_MAX_DEVS); 2520 2521 DPRINTF(("xhci reset port %d", portn)); 2522 2523 port = XHCI_PORTREG_PTR(sc, portn); 2524 dev = XHCI_DEVINST_PTR(sc, portn); 2525 if (dev) { 2526 port->portsc &= ~(XHCI_PS_PLS_MASK | XHCI_PS_PR | XHCI_PS_PRC); 2527 port->portsc |= XHCI_PS_PED | 2528 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed); 2529 2530 if (warm && dev->dev_ue->ue_usbver == 3) { 2531 port->portsc |= XHCI_PS_WRC; 2532 } 2533 2534 if ((port->portsc & XHCI_PS_PRC) == 0) { 2535 port->portsc |= XHCI_PS_PRC; 2536 2537 pci_xhci_set_evtrb(&evtrb, portn, 2538 XHCI_TRB_ERROR_SUCCESS, 2539 XHCI_TRB_EVENT_PORT_STS_CHANGE); 2540 error = pci_xhci_insert_event(sc, &evtrb, 1); 2541 if (error != XHCI_TRB_ERROR_SUCCESS) 2542 DPRINTF(("xhci reset port insert event " 2543 "failed")); 2544 } 2545 } 2546 } 2547 2548 static void 2549 pci_xhci_init_port(struct pci_xhci_softc *sc, int portn) 2550 { 2551 struct pci_xhci_portregs *port; 2552 struct pci_xhci_dev_emu *dev; 2553 2554 port = XHCI_PORTREG_PTR(sc, portn); 2555 dev = XHCI_DEVINST_PTR(sc, portn); 2556 if (dev) { 2557 port->portsc = XHCI_PS_CCS | /* connected */ 2558 XHCI_PS_PP; /* port power */ 2559 2560 if (dev->dev_ue->ue_usbver == 2) { 2561 port->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_POLL) | 2562 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed); 2563 } else { 2564 port->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_U0) | 2565 XHCI_PS_PED | /* enabled */ 2566 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed); 2567 } 2568 2569 DPRINTF(("Init port %d 0x%x", portn, port->portsc)); 2570 } else { 2571 port->portsc = XHCI_PS_PLS_SET(UPS_PORT_LS_RX_DET) | XHCI_PS_PP; 2572 DPRINTF(("Init empty port %d 0x%x", portn, port->portsc)); 2573 } 2574 } 2575 2576 static int 2577 pci_xhci_dev_intr(struct usb_hci *hci, int epctx) 2578 { 2579 struct pci_xhci_dev_emu *dev; 2580 struct xhci_dev_ctx *dev_ctx; 2581 struct xhci_trb evtrb; 2582 struct pci_xhci_softc *sc; 2583 struct pci_xhci_portregs *p; 2584 struct xhci_endp_ctx *ep_ctx; 2585 int error = 0; 2586 int dir_in; 2587 int epid; 2588 2589 dir_in = epctx & 0x80; 2590 epid = epctx & ~0x80; 2591 2592 /* HW endpoint contexts are 0-15; convert to epid based on dir */ 2593 epid = (epid * 2) + (dir_in ? 1 : 0); 2594 2595 assert(epid >= 1 && epid <= 31); 2596 2597 dev = hci->hci_sc; 2598 sc = dev->xsc; 2599 2600 /* check if device is ready; OS has to initialise it */ 2601 if (sc->rtsregs.erstba_p == NULL || 2602 (sc->opregs.usbcmd & XHCI_CMD_RS) == 0 || 2603 dev->dev_ctx == NULL) 2604 return (0); 2605 2606 p = XHCI_PORTREG_PTR(sc, hci->hci_port); 2607 2608 /* raise event if link U3 (suspended) state */ 2609 if (XHCI_PS_PLS_GET(p->portsc) == 3) { 2610 p->portsc &= ~XHCI_PS_PLS_MASK; 2611 p->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_RESUME); 2612 if ((p->portsc & XHCI_PS_PLC) != 0) 2613 return (0); 2614 2615 p->portsc |= XHCI_PS_PLC; 2616 2617 pci_xhci_set_evtrb(&evtrb, hci->hci_port, 2618 XHCI_TRB_ERROR_SUCCESS, XHCI_TRB_EVENT_PORT_STS_CHANGE); 2619 error = pci_xhci_insert_event(sc, &evtrb, 0); 2620 if (error != XHCI_TRB_ERROR_SUCCESS) 2621 goto done; 2622 } 2623 2624 dev_ctx = dev->dev_ctx; 2625 ep_ctx = &dev_ctx->ctx_ep[epid]; 2626 if ((ep_ctx->dwEpCtx0 & 0x7) == XHCI_ST_EPCTX_DISABLED) { 2627 DPRINTF(("xhci device interrupt on disabled endpoint %d", 2628 epid)); 2629 return (0); 2630 } 2631 2632 DPRINTF(("xhci device interrupt on endpoint %d", epid)); 2633 2634 pci_xhci_device_doorbell(sc, hci->hci_port, epid, 0); 2635 2636 done: 2637 return (error); 2638 } 2639 2640 static int 2641 pci_xhci_dev_event(struct usb_hci *hci, enum hci_usbev evid, void *param) 2642 { 2643 2644 DPRINTF(("xhci device event port %d", hci->hci_port)); 2645 return (0); 2646 } 2647 2648 /* 2649 * Each controller contains a "slot" node which contains a list of 2650 * child nodes each of which is a device. Each slot node's name 2651 * corresponds to a specific controller slot. These nodes 2652 * contain a "device" variable identifying the device model of the 2653 * USB device. For example: 2654 * 2655 * pci.0.1.0 2656 * .device="xhci" 2657 * .slot 2658 * .1 2659 * .device="tablet" 2660 */ 2661 static int 2662 pci_xhci_legacy_config(nvlist_t *nvl, const char *opts) 2663 { 2664 char node_name[16]; 2665 nvlist_t *slots_nvl, *slot_nvl; 2666 char *cp, *opt, *str, *tofree; 2667 int slot; 2668 2669 if (opts == NULL) 2670 return (0); 2671 2672 slots_nvl = create_relative_config_node(nvl, "slot"); 2673 slot = 1; 2674 tofree = str = strdup(opts); 2675 while ((opt = strsep(&str, ",")) != NULL) { 2676 /* device[=<config>] */ 2677 cp = strchr(opt, '='); 2678 if (cp != NULL) { 2679 *cp = '\0'; 2680 cp++; 2681 } 2682 2683 snprintf(node_name, sizeof(node_name), "%d", slot); 2684 slot++; 2685 slot_nvl = create_relative_config_node(slots_nvl, node_name); 2686 set_config_value_node(slot_nvl, "device", opt); 2687 2688 /* 2689 * NB: Given that we split on commas above, the legacy 2690 * format only supports a single option. 2691 */ 2692 if (cp != NULL && *cp != '\0') 2693 pci_parse_legacy_config(slot_nvl, cp); 2694 } 2695 free(tofree); 2696 return (0); 2697 } 2698 2699 static int 2700 pci_xhci_parse_devices(struct pci_xhci_softc *sc, nvlist_t *nvl) 2701 { 2702 struct pci_xhci_dev_emu *dev; 2703 struct usb_devemu *ue; 2704 const nvlist_t *slots_nvl, *slot_nvl; 2705 const char *name, *device; 2706 char *cp; 2707 void *devsc, *cookie; 2708 long slot; 2709 int type, usb3_port, usb2_port, i, ndevices; 2710 2711 usb3_port = sc->usb3_port_start; 2712 usb2_port = sc->usb2_port_start; 2713 2714 sc->devices = calloc(XHCI_MAX_DEVS, sizeof(struct pci_xhci_dev_emu *)); 2715 sc->slots = calloc(XHCI_MAX_SLOTS, sizeof(struct pci_xhci_dev_emu *)); 2716 2717 /* port and slot numbering start from 1 */ 2718 sc->devices--; 2719 sc->slots--; 2720 2721 ndevices = 0; 2722 2723 slots_nvl = find_relative_config_node(nvl, "slot"); 2724 if (slots_nvl == NULL) 2725 goto portsfinal; 2726 2727 cookie = NULL; 2728 while ((name = nvlist_next(slots_nvl, &type, &cookie)) != NULL) { 2729 if (usb2_port == ((sc->usb2_port_start) + XHCI_MAX_DEVS/2) || 2730 usb3_port == ((sc->usb3_port_start) + XHCI_MAX_DEVS/2)) { 2731 WPRINTF(("pci_xhci max number of USB 2 or 3 " 2732 "devices reached, max %d", XHCI_MAX_DEVS/2)); 2733 goto bad; 2734 } 2735 2736 if (type != NV_TYPE_NVLIST) { 2737 EPRINTLN( 2738 "pci_xhci: config variable '%s' under slot node", 2739 name); 2740 goto bad; 2741 } 2742 2743 slot = strtol(name, &cp, 0); 2744 if (*cp != '\0' || slot <= 0 || slot > XHCI_MAX_SLOTS) { 2745 EPRINTLN("pci_xhci: invalid slot '%s'", name); 2746 goto bad; 2747 } 2748 2749 if (XHCI_SLOTDEV_PTR(sc, slot) != NULL) { 2750 EPRINTLN("pci_xhci: duplicate slot '%s'", name); 2751 goto bad; 2752 } 2753 2754 slot_nvl = nvlist_get_nvlist(slots_nvl, name); 2755 device = get_config_value_node(slot_nvl, "device"); 2756 if (device == NULL) { 2757 EPRINTLN( 2758 "pci_xhci: missing \"device\" value for slot '%s'", 2759 name); 2760 goto bad; 2761 } 2762 2763 ue = usb_emu_finddev(device); 2764 if (ue == NULL) { 2765 EPRINTLN("pci_xhci: unknown device model \"%s\"", 2766 device); 2767 goto bad; 2768 } 2769 2770 DPRINTF(("pci_xhci adding device %s", device)); 2771 2772 dev = calloc(1, sizeof(struct pci_xhci_dev_emu)); 2773 dev->xsc = sc; 2774 dev->hci.hci_sc = dev; 2775 dev->hci.hci_intr = pci_xhci_dev_intr; 2776 dev->hci.hci_event = pci_xhci_dev_event; 2777 2778 if (ue->ue_usbver == 2) { 2779 if (usb2_port == sc->usb2_port_start + 2780 XHCI_MAX_DEVS / 2) { 2781 WPRINTF(("pci_xhci max number of USB 2 devices " 2782 "reached, max %d", XHCI_MAX_DEVS / 2)); 2783 goto bad; 2784 } 2785 dev->hci.hci_port = usb2_port; 2786 usb2_port++; 2787 } else { 2788 if (usb3_port == sc->usb3_port_start + 2789 XHCI_MAX_DEVS / 2) { 2790 WPRINTF(("pci_xhci max number of USB 3 devices " 2791 "reached, max %d", XHCI_MAX_DEVS / 2)); 2792 goto bad; 2793 } 2794 dev->hci.hci_port = usb3_port; 2795 usb3_port++; 2796 } 2797 XHCI_DEVINST_PTR(sc, dev->hci.hci_port) = dev; 2798 2799 dev->hci.hci_address = 0; 2800 devsc = ue->ue_init(&dev->hci, nvl); 2801 if (devsc == NULL) { 2802 goto bad; 2803 } 2804 2805 dev->dev_ue = ue; 2806 dev->dev_sc = devsc; 2807 2808 XHCI_SLOTDEV_PTR(sc, slot) = dev; 2809 ndevices++; 2810 } 2811 2812 portsfinal: 2813 sc->portregs = calloc(XHCI_MAX_DEVS, sizeof(struct pci_xhci_portregs)); 2814 sc->portregs--; 2815 2816 if (ndevices > 0) { 2817 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 2818 pci_xhci_init_port(sc, i); 2819 } 2820 } else { 2821 WPRINTF(("pci_xhci no USB devices configured")); 2822 } 2823 return (0); 2824 2825 bad: 2826 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 2827 free(XHCI_DEVINST_PTR(sc, i)); 2828 } 2829 2830 free(sc->devices + 1); 2831 free(sc->slots + 1); 2832 2833 return (-1); 2834 } 2835 2836 static int 2837 pci_xhci_init(struct vmctx *ctx, struct pci_devinst *pi, nvlist_t *nvl) 2838 { 2839 struct pci_xhci_softc *sc; 2840 int error; 2841 2842 if (xhci_in_use) { 2843 WPRINTF(("pci_xhci controller already defined")); 2844 return (-1); 2845 } 2846 xhci_in_use = 1; 2847 2848 sc = calloc(1, sizeof(struct pci_xhci_softc)); 2849 pi->pi_arg = sc; 2850 sc->xsc_pi = pi; 2851 2852 sc->usb2_port_start = (XHCI_MAX_DEVS/2) + 1; 2853 sc->usb3_port_start = 1; 2854 2855 /* discover devices */ 2856 error = pci_xhci_parse_devices(sc, nvl); 2857 if (error < 0) 2858 goto done; 2859 else 2860 error = 0; 2861 2862 sc->caplength = XHCI_SET_CAPLEN(XHCI_CAPLEN) | 2863 XHCI_SET_HCIVERSION(0x0100); 2864 sc->hcsparams1 = XHCI_SET_HCSP1_MAXPORTS(XHCI_MAX_DEVS) | 2865 XHCI_SET_HCSP1_MAXINTR(1) | /* interrupters */ 2866 XHCI_SET_HCSP1_MAXSLOTS(XHCI_MAX_SLOTS); 2867 sc->hcsparams2 = XHCI_SET_HCSP2_ERSTMAX(XHCI_ERST_MAX) | 2868 XHCI_SET_HCSP2_IST(0x04); 2869 sc->hcsparams3 = 0; /* no latency */ 2870 sc->hccparams1 = XHCI_SET_HCCP1_AC64(1) | /* 64-bit addrs */ 2871 XHCI_SET_HCCP1_NSS(1) | /* no 2nd-streams */ 2872 XHCI_SET_HCCP1_SPC(1) | /* short packet */ 2873 XHCI_SET_HCCP1_MAXPSA(XHCI_STREAMS_MAX); 2874 sc->hccparams2 = XHCI_SET_HCCP2_LEC(1) | 2875 XHCI_SET_HCCP2_U3C(1); 2876 sc->dboff = XHCI_SET_DOORBELL(XHCI_CAPLEN + XHCI_PORTREGS_START + 2877 XHCI_MAX_DEVS * sizeof(struct pci_xhci_portregs)); 2878 2879 /* dboff must be 32-bit aligned */ 2880 if (sc->dboff & 0x3) 2881 sc->dboff = (sc->dboff + 0x3) & ~0x3; 2882 2883 /* rtsoff must be 32-bytes aligned */ 2884 sc->rtsoff = XHCI_SET_RTSOFFSET(sc->dboff + (XHCI_MAX_SLOTS+1) * 32); 2885 if (sc->rtsoff & 0x1F) 2886 sc->rtsoff = (sc->rtsoff + 0x1F) & ~0x1F; 2887 2888 DPRINTF(("pci_xhci dboff: 0x%x, rtsoff: 0x%x", sc->dboff, 2889 sc->rtsoff)); 2890 2891 sc->opregs.usbsts = XHCI_STS_HCH; 2892 sc->opregs.pgsz = XHCI_PAGESIZE_4K; 2893 2894 pci_xhci_reset(sc); 2895 2896 sc->regsend = sc->rtsoff + 0x20 + 32; /* only 1 intrpter */ 2897 2898 /* 2899 * Set extended capabilities pointer to be after regsend; 2900 * value of xecp field is 32-bit offset. 2901 */ 2902 sc->hccparams1 |= XHCI_SET_HCCP1_XECP(sc->regsend/4); 2903 2904 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x1E31); 2905 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x8086); 2906 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_SERIALBUS); 2907 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_SERIALBUS_USB); 2908 pci_set_cfgdata8(pi, PCIR_PROGIF,PCIP_SERIALBUS_USB_XHCI); 2909 pci_set_cfgdata8(pi, PCI_USBREV, PCI_USB_REV_3_0); 2910 2911 pci_emul_add_msicap(pi, 1); 2912 2913 /* regsend + xecp registers */ 2914 pci_emul_alloc_bar(pi, 0, PCIBAR_MEM32, sc->regsend + 4*32); 2915 DPRINTF(("pci_xhci pci_emu_alloc: %d", sc->regsend + 4*32)); 2916 2917 2918 pci_lintr_request(pi); 2919 2920 pthread_mutex_init(&sc->mtx, NULL); 2921 2922 done: 2923 if (error) { 2924 free(sc); 2925 } 2926 2927 return (error); 2928 } 2929 2930 #ifdef BHYVE_SNAPSHOT 2931 static void 2932 pci_xhci_map_devs_slots(struct pci_xhci_softc *sc, int maps[]) 2933 { 2934 int i, j; 2935 struct pci_xhci_dev_emu *dev, *slot; 2936 2937 memset(maps, 0, sizeof(maps[0]) * XHCI_MAX_SLOTS); 2938 2939 for (i = 1; i <= XHCI_MAX_SLOTS; i++) { 2940 for (j = 1; j <= XHCI_MAX_DEVS; j++) { 2941 slot = XHCI_SLOTDEV_PTR(sc, i); 2942 dev = XHCI_DEVINST_PTR(sc, j); 2943 2944 if (slot == dev) 2945 maps[i] = j; 2946 } 2947 } 2948 } 2949 2950 static int 2951 pci_xhci_snapshot_ep(struct pci_xhci_softc *sc, struct pci_xhci_dev_emu *dev, 2952 int idx, struct vm_snapshot_meta *meta) 2953 { 2954 int k; 2955 int ret; 2956 struct usb_data_xfer *xfer; 2957 struct usb_data_xfer_block *xfer_block; 2958 2959 /* some sanity checks */ 2960 if (meta->op == VM_SNAPSHOT_SAVE) 2961 xfer = dev->eps[idx].ep_xfer; 2962 2963 SNAPSHOT_VAR_OR_LEAVE(xfer, meta, ret, done); 2964 if (xfer == NULL) { 2965 ret = 0; 2966 goto done; 2967 } 2968 2969 if (meta->op == VM_SNAPSHOT_RESTORE) { 2970 pci_xhci_init_ep(dev, idx); 2971 xfer = dev->eps[idx].ep_xfer; 2972 } 2973 2974 /* save / restore proper */ 2975 for (k = 0; k < USB_MAX_XFER_BLOCKS; k++) { 2976 xfer_block = &xfer->data[k]; 2977 2978 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(xfer_block->buf, 2979 XHCI_GADDR_SIZE(xfer_block->buf), true, meta, ret, 2980 done); 2981 SNAPSHOT_VAR_OR_LEAVE(xfer_block->blen, meta, ret, done); 2982 SNAPSHOT_VAR_OR_LEAVE(xfer_block->bdone, meta, ret, done); 2983 SNAPSHOT_VAR_OR_LEAVE(xfer_block->processed, meta, ret, done); 2984 SNAPSHOT_VAR_OR_LEAVE(xfer_block->hci_data, meta, ret, done); 2985 SNAPSHOT_VAR_OR_LEAVE(xfer_block->ccs, meta, ret, done); 2986 SNAPSHOT_VAR_OR_LEAVE(xfer_block->streamid, meta, ret, done); 2987 SNAPSHOT_VAR_OR_LEAVE(xfer_block->trbnext, meta, ret, done); 2988 } 2989 2990 SNAPSHOT_VAR_OR_LEAVE(xfer->ureq, meta, ret, done); 2991 if (xfer->ureq) { 2992 /* xfer->ureq is not allocated at restore time */ 2993 if (meta->op == VM_SNAPSHOT_RESTORE) 2994 xfer->ureq = malloc(sizeof(struct usb_device_request)); 2995 2996 SNAPSHOT_BUF_OR_LEAVE(xfer->ureq, 2997 sizeof(struct usb_device_request), 2998 meta, ret, done); 2999 } 3000 3001 SNAPSHOT_VAR_OR_LEAVE(xfer->ndata, meta, ret, done); 3002 SNAPSHOT_VAR_OR_LEAVE(xfer->head, meta, ret, done); 3003 SNAPSHOT_VAR_OR_LEAVE(xfer->tail, meta, ret, done); 3004 3005 done: 3006 return (ret); 3007 } 3008 3009 static int 3010 pci_xhci_snapshot(struct vm_snapshot_meta *meta) 3011 { 3012 int i, j; 3013 int ret; 3014 int restore_idx; 3015 struct pci_devinst *pi; 3016 struct pci_xhci_softc *sc; 3017 struct pci_xhci_portregs *port; 3018 struct pci_xhci_dev_emu *dev; 3019 char dname[SNAP_DEV_NAME_LEN]; 3020 int maps[XHCI_MAX_SLOTS + 1]; 3021 3022 pi = meta->dev_data; 3023 sc = pi->pi_arg; 3024 3025 SNAPSHOT_VAR_OR_LEAVE(sc->caplength, meta, ret, done); 3026 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams1, meta, ret, done); 3027 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams2, meta, ret, done); 3028 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams3, meta, ret, done); 3029 SNAPSHOT_VAR_OR_LEAVE(sc->hccparams1, meta, ret, done); 3030 SNAPSHOT_VAR_OR_LEAVE(sc->dboff, meta, ret, done); 3031 SNAPSHOT_VAR_OR_LEAVE(sc->rtsoff, meta, ret, done); 3032 SNAPSHOT_VAR_OR_LEAVE(sc->hccparams2, meta, ret, done); 3033 SNAPSHOT_VAR_OR_LEAVE(sc->regsend, meta, ret, done); 3034 3035 /* opregs */ 3036 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.usbcmd, meta, ret, done); 3037 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.usbsts, meta, ret, done); 3038 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.pgsz, meta, ret, done); 3039 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.dnctrl, meta, ret, done); 3040 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.crcr, meta, ret, done); 3041 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.dcbaap, meta, ret, done); 3042 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.config, meta, ret, done); 3043 3044 /* opregs.cr_p */ 3045 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->opregs.cr_p, 3046 XHCI_GADDR_SIZE(sc->opregs.cr_p), true, meta, ret, done); 3047 3048 /* opregs.dcbaa_p */ 3049 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->opregs.dcbaa_p, 3050 XHCI_GADDR_SIZE(sc->opregs.dcbaa_p), true, meta, ret, done); 3051 3052 /* rtsregs */ 3053 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.mfindex, meta, ret, done); 3054 3055 /* rtsregs.intrreg */ 3056 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.iman, meta, ret, done); 3057 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.imod, meta, ret, done); 3058 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erstsz, meta, ret, done); 3059 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.rsvd, meta, ret, done); 3060 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erstba, meta, ret, done); 3061 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erdp, meta, ret, done); 3062 3063 /* rtsregs.erstba_p */ 3064 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->rtsregs.erstba_p, 3065 XHCI_GADDR_SIZE(sc->rtsregs.erstba_p), true, meta, ret, done); 3066 3067 /* rtsregs.erst_p */ 3068 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->rtsregs.erst_p, 3069 XHCI_GADDR_SIZE(sc->rtsregs.erst_p), true, meta, ret, done); 3070 3071 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_deq_seg, meta, ret, done); 3072 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_enq_idx, meta, ret, done); 3073 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_enq_seg, meta, ret, done); 3074 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_events_cnt, meta, ret, done); 3075 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.event_pcs, meta, ret, done); 3076 3077 /* sanity checking */ 3078 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 3079 dev = XHCI_DEVINST_PTR(sc, i); 3080 if (dev == NULL) 3081 continue; 3082 3083 if (meta->op == VM_SNAPSHOT_SAVE) 3084 restore_idx = i; 3085 SNAPSHOT_VAR_OR_LEAVE(restore_idx, meta, ret, done); 3086 3087 /* check if the restored device (when restoring) is sane */ 3088 if (restore_idx != i) { 3089 fprintf(stderr, "%s: idx not matching: actual: %d, " 3090 "expected: %d\r\n", __func__, restore_idx, i); 3091 ret = EINVAL; 3092 goto done; 3093 } 3094 3095 if (meta->op == VM_SNAPSHOT_SAVE) { 3096 memset(dname, 0, sizeof(dname)); 3097 strncpy(dname, dev->dev_ue->ue_emu, sizeof(dname) - 1); 3098 } 3099 3100 SNAPSHOT_BUF_OR_LEAVE(dname, sizeof(dname), meta, ret, done); 3101 3102 if (meta->op == VM_SNAPSHOT_RESTORE) { 3103 dname[sizeof(dname) - 1] = '\0'; 3104 if (strcmp(dev->dev_ue->ue_emu, dname)) { 3105 fprintf(stderr, "%s: device names mismatch: " 3106 "actual: %s, expected: %s\r\n", 3107 __func__, dname, dev->dev_ue->ue_emu); 3108 3109 ret = EINVAL; 3110 goto done; 3111 } 3112 } 3113 } 3114 3115 /* portregs */ 3116 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 3117 port = XHCI_PORTREG_PTR(sc, i); 3118 dev = XHCI_DEVINST_PTR(sc, i); 3119 3120 if (dev == NULL) 3121 continue; 3122 3123 SNAPSHOT_VAR_OR_LEAVE(port->portsc, meta, ret, done); 3124 SNAPSHOT_VAR_OR_LEAVE(port->portpmsc, meta, ret, done); 3125 SNAPSHOT_VAR_OR_LEAVE(port->portli, meta, ret, done); 3126 SNAPSHOT_VAR_OR_LEAVE(port->porthlpmc, meta, ret, done); 3127 } 3128 3129 /* slots */ 3130 if (meta->op == VM_SNAPSHOT_SAVE) 3131 pci_xhci_map_devs_slots(sc, maps); 3132 3133 for (i = 1; i <= XHCI_MAX_SLOTS; i++) { 3134 SNAPSHOT_VAR_OR_LEAVE(maps[i], meta, ret, done); 3135 3136 if (meta->op == VM_SNAPSHOT_SAVE) { 3137 dev = XHCI_SLOTDEV_PTR(sc, i); 3138 } else if (meta->op == VM_SNAPSHOT_RESTORE) { 3139 if (maps[i] != 0) 3140 dev = XHCI_DEVINST_PTR(sc, maps[i]); 3141 else 3142 dev = NULL; 3143 3144 XHCI_SLOTDEV_PTR(sc, i) = dev; 3145 } else { 3146 /* error */ 3147 ret = EINVAL; 3148 goto done; 3149 } 3150 3151 if (dev == NULL) 3152 continue; 3153 3154 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(dev->dev_ctx, 3155 XHCI_GADDR_SIZE(dev->dev_ctx), true, meta, ret, done); 3156 3157 if (dev->dev_ctx != NULL) { 3158 for (j = 1; j < XHCI_MAX_ENDPOINTS; j++) { 3159 ret = pci_xhci_snapshot_ep(sc, dev, j, meta); 3160 if (ret != 0) 3161 goto done; 3162 } 3163 } 3164 3165 SNAPSHOT_VAR_OR_LEAVE(dev->dev_slotstate, meta, ret, done); 3166 3167 /* devices[i]->dev_sc */ 3168 dev->dev_ue->ue_snapshot(dev->dev_sc, meta); 3169 3170 /* devices[i]->hci */ 3171 SNAPSHOT_VAR_OR_LEAVE(dev->hci.hci_address, meta, ret, done); 3172 SNAPSHOT_VAR_OR_LEAVE(dev->hci.hci_port, meta, ret, done); 3173 } 3174 3175 SNAPSHOT_VAR_OR_LEAVE(sc->usb2_port_start, meta, ret, done); 3176 SNAPSHOT_VAR_OR_LEAVE(sc->usb3_port_start, meta, ret, done); 3177 3178 done: 3179 return (ret); 3180 } 3181 #endif 3182 3183 struct pci_devemu pci_de_xhci = { 3184 .pe_emu = "xhci", 3185 .pe_init = pci_xhci_init, 3186 .pe_legacy_config = pci_xhci_legacy_config, 3187 .pe_barwrite = pci_xhci_write, 3188 .pe_barread = pci_xhci_read, 3189 #ifdef BHYVE_SNAPSHOT 3190 .pe_snapshot = pci_xhci_snapshot, 3191 #endif 3192 }; 3193 PCI_EMUL_SET(pci_de_xhci); 3194