1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2014 Leon Dang <ldang@nahannisys.com> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /* 29 XHCI options: 30 -s <n>,xhci,{devices} 31 32 devices: 33 tablet USB tablet mouse 34 */ 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <sys/param.h> 39 #include <sys/uio.h> 40 #include <sys/types.h> 41 #include <sys/queue.h> 42 43 #include <stdio.h> 44 #include <stdlib.h> 45 #include <stdint.h> 46 #include <string.h> 47 #include <errno.h> 48 #include <pthread.h> 49 #include <unistd.h> 50 51 #include <machine/vmm_snapshot.h> 52 53 #include <dev/usb/usbdi.h> 54 #include <dev/usb/usb.h> 55 #include <dev/usb/usb_freebsd.h> 56 #include <xhcireg.h> 57 58 #include "bhyverun.h" 59 #include "config.h" 60 #include "debug.h" 61 #include "pci_emul.h" 62 #include "pci_xhci.h" 63 #include "usb_emul.h" 64 65 66 static int xhci_debug = 0; 67 #define DPRINTF(params) if (xhci_debug) PRINTLN params 68 #define WPRINTF(params) PRINTLN params 69 70 71 #define XHCI_NAME "xhci" 72 #define XHCI_MAX_DEVS 8 /* 4 USB3 + 4 USB2 devs */ 73 74 #define XHCI_MAX_SLOTS 64 /* min allowed by Windows drivers */ 75 76 /* 77 * XHCI data structures can be up to 64k, but limit paddr_guest2host mapping 78 * to 4k to avoid going over the guest physical memory barrier. 79 */ 80 #define XHCI_PADDR_SZ 4096 /* paddr_guest2host max size */ 81 82 #define XHCI_ERST_MAX 0 /* max 2^entries event ring seg tbl */ 83 84 #define XHCI_CAPLEN (4*8) /* offset of op register space */ 85 #define XHCI_HCCPRAMS2 0x1C /* offset of HCCPARAMS2 register */ 86 #define XHCI_PORTREGS_START 0x400 87 #define XHCI_DOORBELL_MAX 256 88 89 #define XHCI_STREAMS_MAX 1 /* 4-15 in XHCI spec */ 90 91 /* caplength and hci-version registers */ 92 #define XHCI_SET_CAPLEN(x) ((x) & 0xFF) 93 #define XHCI_SET_HCIVERSION(x) (((x) & 0xFFFF) << 16) 94 #define XHCI_GET_HCIVERSION(x) (((x) >> 16) & 0xFFFF) 95 96 /* hcsparams1 register */ 97 #define XHCI_SET_HCSP1_MAXSLOTS(x) ((x) & 0xFF) 98 #define XHCI_SET_HCSP1_MAXINTR(x) (((x) & 0x7FF) << 8) 99 #define XHCI_SET_HCSP1_MAXPORTS(x) (((x) & 0xFF) << 24) 100 101 /* hcsparams2 register */ 102 #define XHCI_SET_HCSP2_IST(x) ((x) & 0x0F) 103 #define XHCI_SET_HCSP2_ERSTMAX(x) (((x) & 0x0F) << 4) 104 #define XHCI_SET_HCSP2_MAXSCRATCH_HI(x) (((x) & 0x1F) << 21) 105 #define XHCI_SET_HCSP2_MAXSCRATCH_LO(x) (((x) & 0x1F) << 27) 106 107 /* hcsparams3 register */ 108 #define XHCI_SET_HCSP3_U1EXITLATENCY(x) ((x) & 0xFF) 109 #define XHCI_SET_HCSP3_U2EXITLATENCY(x) (((x) & 0xFFFF) << 16) 110 111 /* hccparams1 register */ 112 #define XHCI_SET_HCCP1_AC64(x) ((x) & 0x01) 113 #define XHCI_SET_HCCP1_BNC(x) (((x) & 0x01) << 1) 114 #define XHCI_SET_HCCP1_CSZ(x) (((x) & 0x01) << 2) 115 #define XHCI_SET_HCCP1_PPC(x) (((x) & 0x01) << 3) 116 #define XHCI_SET_HCCP1_PIND(x) (((x) & 0x01) << 4) 117 #define XHCI_SET_HCCP1_LHRC(x) (((x) & 0x01) << 5) 118 #define XHCI_SET_HCCP1_LTC(x) (((x) & 0x01) << 6) 119 #define XHCI_SET_HCCP1_NSS(x) (((x) & 0x01) << 7) 120 #define XHCI_SET_HCCP1_PAE(x) (((x) & 0x01) << 8) 121 #define XHCI_SET_HCCP1_SPC(x) (((x) & 0x01) << 9) 122 #define XHCI_SET_HCCP1_SEC(x) (((x) & 0x01) << 10) 123 #define XHCI_SET_HCCP1_CFC(x) (((x) & 0x01) << 11) 124 #define XHCI_SET_HCCP1_MAXPSA(x) (((x) & 0x0F) << 12) 125 #define XHCI_SET_HCCP1_XECP(x) (((x) & 0xFFFF) << 16) 126 127 /* hccparams2 register */ 128 #define XHCI_SET_HCCP2_U3C(x) ((x) & 0x01) 129 #define XHCI_SET_HCCP2_CMC(x) (((x) & 0x01) << 1) 130 #define XHCI_SET_HCCP2_FSC(x) (((x) & 0x01) << 2) 131 #define XHCI_SET_HCCP2_CTC(x) (((x) & 0x01) << 3) 132 #define XHCI_SET_HCCP2_LEC(x) (((x) & 0x01) << 4) 133 #define XHCI_SET_HCCP2_CIC(x) (((x) & 0x01) << 5) 134 135 /* other registers */ 136 #define XHCI_SET_DOORBELL(x) ((x) & ~0x03) 137 #define XHCI_SET_RTSOFFSET(x) ((x) & ~0x0F) 138 139 /* register masks */ 140 #define XHCI_PS_PLS_MASK (0xF << 5) /* port link state */ 141 #define XHCI_PS_SPEED_MASK (0xF << 10) /* port speed */ 142 #define XHCI_PS_PIC_MASK (0x3 << 14) /* port indicator */ 143 144 /* port register set */ 145 #define XHCI_PORTREGS_BASE 0x400 /* base offset */ 146 #define XHCI_PORTREGS_PORT0 0x3F0 147 #define XHCI_PORTREGS_SETSZ 0x10 /* size of a set */ 148 149 #define MASK_64_HI(x) ((x) & ~0xFFFFFFFFULL) 150 #define MASK_64_LO(x) ((x) & 0xFFFFFFFFULL) 151 152 #define FIELD_REPLACE(a,b,m,s) (((a) & ~((m) << (s))) | \ 153 (((b) & (m)) << (s))) 154 #define FIELD_COPY(a,b,m,s) (((a) & ~((m) << (s))) | \ 155 (((b) & ((m) << (s))))) 156 157 #define SNAP_DEV_NAME_LEN 128 158 159 struct pci_xhci_trb_ring { 160 uint64_t ringaddr; /* current dequeue guest address */ 161 uint32_t ccs; /* consumer cycle state */ 162 }; 163 164 /* device endpoint transfer/stream rings */ 165 struct pci_xhci_dev_ep { 166 union { 167 struct xhci_trb *_epu_tr; 168 struct xhci_stream_ctx *_epu_sctx; 169 } _ep_trbsctx; 170 #define ep_tr _ep_trbsctx._epu_tr 171 #define ep_sctx _ep_trbsctx._epu_sctx 172 173 /* 174 * Caches the value of MaxPStreams from the endpoint context 175 * when an endpoint is initialized and is used to validate the 176 * use of ep_ringaddr vs ep_sctx_trbs[] as well as the length 177 * of ep_sctx_trbs[]. 178 */ 179 uint32_t ep_MaxPStreams; 180 union { 181 struct pci_xhci_trb_ring _epu_trb; 182 struct pci_xhci_trb_ring *_epu_sctx_trbs; 183 } _ep_trb_rings; 184 #define ep_ringaddr _ep_trb_rings._epu_trb.ringaddr 185 #define ep_ccs _ep_trb_rings._epu_trb.ccs 186 #define ep_sctx_trbs _ep_trb_rings._epu_sctx_trbs 187 188 struct usb_data_xfer *ep_xfer; /* transfer chain */ 189 }; 190 191 /* device context base address array: maps slot->device context */ 192 struct xhci_dcbaa { 193 uint64_t dcba[USB_MAX_DEVICES+1]; /* xhci_dev_ctx ptrs */ 194 }; 195 196 /* port status registers */ 197 struct pci_xhci_portregs { 198 uint32_t portsc; /* port status and control */ 199 uint32_t portpmsc; /* port pwr mgmt status & control */ 200 uint32_t portli; /* port link info */ 201 uint32_t porthlpmc; /* port hardware LPM control */ 202 } __packed; 203 #define XHCI_PS_SPEED_SET(x) (((x) & 0xF) << 10) 204 205 /* xHC operational registers */ 206 struct pci_xhci_opregs { 207 uint32_t usbcmd; /* usb command */ 208 uint32_t usbsts; /* usb status */ 209 uint32_t pgsz; /* page size */ 210 uint32_t dnctrl; /* device notification control */ 211 uint64_t crcr; /* command ring control */ 212 uint64_t dcbaap; /* device ctx base addr array ptr */ 213 uint32_t config; /* configure */ 214 215 /* guest mapped addresses: */ 216 struct xhci_trb *cr_p; /* crcr dequeue */ 217 struct xhci_dcbaa *dcbaa_p; /* dev ctx array ptr */ 218 }; 219 220 /* xHC runtime registers */ 221 struct pci_xhci_rtsregs { 222 uint32_t mfindex; /* microframe index */ 223 struct { /* interrupter register set */ 224 uint32_t iman; /* interrupter management */ 225 uint32_t imod; /* interrupter moderation */ 226 uint32_t erstsz; /* event ring segment table size */ 227 uint32_t rsvd; 228 uint64_t erstba; /* event ring seg-tbl base addr */ 229 uint64_t erdp; /* event ring dequeue ptr */ 230 } intrreg __packed; 231 232 /* guest mapped addresses */ 233 struct xhci_event_ring_seg *erstba_p; 234 struct xhci_trb *erst_p; /* event ring segment tbl */ 235 int er_deq_seg; /* event ring dequeue segment */ 236 int er_enq_idx; /* event ring enqueue index - xHCI */ 237 int er_enq_seg; /* event ring enqueue segment */ 238 uint32_t er_events_cnt; /* number of events in ER */ 239 uint32_t event_pcs; /* producer cycle state flag */ 240 }; 241 242 243 struct pci_xhci_softc; 244 245 246 /* 247 * USB device emulation container. 248 * This is referenced from usb_hci->hci_sc; 1 pci_xhci_dev_emu for each 249 * emulated device instance. 250 */ 251 struct pci_xhci_dev_emu { 252 struct pci_xhci_softc *xsc; 253 254 /* XHCI contexts */ 255 struct xhci_dev_ctx *dev_ctx; 256 struct pci_xhci_dev_ep eps[XHCI_MAX_ENDPOINTS]; 257 int dev_slotstate; 258 259 struct usb_devemu *dev_ue; /* USB emulated dev */ 260 void *dev_sc; /* device's softc */ 261 262 struct usb_hci hci; 263 }; 264 265 struct pci_xhci_softc { 266 struct pci_devinst *xsc_pi; 267 268 pthread_mutex_t mtx; 269 270 uint32_t caplength; /* caplen & hciversion */ 271 uint32_t hcsparams1; /* structural parameters 1 */ 272 uint32_t hcsparams2; /* structural parameters 2 */ 273 uint32_t hcsparams3; /* structural parameters 3 */ 274 uint32_t hccparams1; /* capability parameters 1 */ 275 uint32_t dboff; /* doorbell offset */ 276 uint32_t rtsoff; /* runtime register space offset */ 277 uint32_t hccparams2; /* capability parameters 2 */ 278 279 uint32_t regsend; /* end of configuration registers */ 280 281 struct pci_xhci_opregs opregs; 282 struct pci_xhci_rtsregs rtsregs; 283 284 struct pci_xhci_portregs *portregs; 285 struct pci_xhci_dev_emu **devices; /* XHCI[port] = device */ 286 struct pci_xhci_dev_emu **slots; /* slots assigned from 1 */ 287 288 int usb2_port_start; 289 int usb3_port_start; 290 }; 291 292 293 /* portregs and devices arrays are set up to start from idx=1 */ 294 #define XHCI_PORTREG_PTR(x,n) &(x)->portregs[(n)] 295 #define XHCI_DEVINST_PTR(x,n) (x)->devices[(n)] 296 #define XHCI_SLOTDEV_PTR(x,n) (x)->slots[(n)] 297 298 #define XHCI_HALTED(sc) ((sc)->opregs.usbsts & XHCI_STS_HCH) 299 300 #define XHCI_GADDR_SIZE(a) (XHCI_PADDR_SZ - \ 301 (((uint64_t) (a)) & (XHCI_PADDR_SZ - 1))) 302 #define XHCI_GADDR(sc,a) paddr_guest2host((sc)->xsc_pi->pi_vmctx, \ 303 (a), XHCI_GADDR_SIZE(a)) 304 305 static int xhci_in_use; 306 307 /* map USB errors to XHCI */ 308 static const int xhci_usb_errors[USB_ERR_MAX] = { 309 [USB_ERR_NORMAL_COMPLETION] = XHCI_TRB_ERROR_SUCCESS, 310 [USB_ERR_PENDING_REQUESTS] = XHCI_TRB_ERROR_RESOURCE, 311 [USB_ERR_NOT_STARTED] = XHCI_TRB_ERROR_ENDP_NOT_ON, 312 [USB_ERR_INVAL] = XHCI_TRB_ERROR_INVALID, 313 [USB_ERR_NOMEM] = XHCI_TRB_ERROR_RESOURCE, 314 [USB_ERR_CANCELLED] = XHCI_TRB_ERROR_STOPPED, 315 [USB_ERR_BAD_ADDRESS] = XHCI_TRB_ERROR_PARAMETER, 316 [USB_ERR_BAD_BUFSIZE] = XHCI_TRB_ERROR_PARAMETER, 317 [USB_ERR_BAD_FLAG] = XHCI_TRB_ERROR_PARAMETER, 318 [USB_ERR_NO_CALLBACK] = XHCI_TRB_ERROR_STALL, 319 [USB_ERR_IN_USE] = XHCI_TRB_ERROR_RESOURCE, 320 [USB_ERR_NO_ADDR] = XHCI_TRB_ERROR_RESOURCE, 321 [USB_ERR_NO_PIPE] = XHCI_TRB_ERROR_RESOURCE, 322 [USB_ERR_ZERO_NFRAMES] = XHCI_TRB_ERROR_UNDEFINED, 323 [USB_ERR_ZERO_MAXP] = XHCI_TRB_ERROR_UNDEFINED, 324 [USB_ERR_SET_ADDR_FAILED] = XHCI_TRB_ERROR_RESOURCE, 325 [USB_ERR_NO_POWER] = XHCI_TRB_ERROR_ENDP_NOT_ON, 326 [USB_ERR_TOO_DEEP] = XHCI_TRB_ERROR_RESOURCE, 327 [USB_ERR_IOERROR] = XHCI_TRB_ERROR_TRB, 328 [USB_ERR_NOT_CONFIGURED] = XHCI_TRB_ERROR_ENDP_NOT_ON, 329 [USB_ERR_TIMEOUT] = XHCI_TRB_ERROR_CMD_ABORTED, 330 [USB_ERR_SHORT_XFER] = XHCI_TRB_ERROR_SHORT_PKT, 331 [USB_ERR_STALLED] = XHCI_TRB_ERROR_STALL, 332 [USB_ERR_INTERRUPTED] = XHCI_TRB_ERROR_CMD_ABORTED, 333 [USB_ERR_DMA_LOAD_FAILED] = XHCI_TRB_ERROR_DATA_BUF, 334 [USB_ERR_BAD_CONTEXT] = XHCI_TRB_ERROR_TRB, 335 [USB_ERR_NO_ROOT_HUB] = XHCI_TRB_ERROR_UNDEFINED, 336 [USB_ERR_NO_INTR_THREAD] = XHCI_TRB_ERROR_UNDEFINED, 337 [USB_ERR_NOT_LOCKED] = XHCI_TRB_ERROR_UNDEFINED, 338 }; 339 #define USB_TO_XHCI_ERR(e) ((e) < USB_ERR_MAX ? xhci_usb_errors[(e)] : \ 340 XHCI_TRB_ERROR_INVALID) 341 342 static int pci_xhci_insert_event(struct pci_xhci_softc *sc, 343 struct xhci_trb *evtrb, int do_intr); 344 static void pci_xhci_dump_trb(struct xhci_trb *trb); 345 static void pci_xhci_assert_interrupt(struct pci_xhci_softc *sc); 346 static void pci_xhci_reset_slot(struct pci_xhci_softc *sc, int slot); 347 static void pci_xhci_reset_port(struct pci_xhci_softc *sc, int portn, int warm); 348 static void pci_xhci_update_ep_ring(struct pci_xhci_softc *sc, 349 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep, 350 struct xhci_endp_ctx *ep_ctx, uint32_t streamid, 351 uint64_t ringaddr, int ccs); 352 353 static void 354 pci_xhci_set_evtrb(struct xhci_trb *evtrb, uint64_t port, uint32_t errcode, 355 uint32_t evtype) 356 { 357 evtrb->qwTrb0 = port << 24; 358 evtrb->dwTrb2 = XHCI_TRB_2_ERROR_SET(errcode); 359 evtrb->dwTrb3 = XHCI_TRB_3_TYPE_SET(evtype); 360 } 361 362 363 /* controller reset */ 364 static void 365 pci_xhci_reset(struct pci_xhci_softc *sc) 366 { 367 int i; 368 369 sc->rtsregs.er_enq_idx = 0; 370 sc->rtsregs.er_events_cnt = 0; 371 sc->rtsregs.event_pcs = 1; 372 373 for (i = 1; i <= XHCI_MAX_SLOTS; i++) { 374 pci_xhci_reset_slot(sc, i); 375 } 376 } 377 378 static uint32_t 379 pci_xhci_usbcmd_write(struct pci_xhci_softc *sc, uint32_t cmd) 380 { 381 int do_intr = 0; 382 int i; 383 384 if (cmd & XHCI_CMD_RS) { 385 do_intr = (sc->opregs.usbcmd & XHCI_CMD_RS) == 0; 386 387 sc->opregs.usbcmd |= XHCI_CMD_RS; 388 sc->opregs.usbsts &= ~XHCI_STS_HCH; 389 sc->opregs.usbsts |= XHCI_STS_PCD; 390 391 /* Queue port change event on controller run from stop */ 392 if (do_intr) 393 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 394 struct pci_xhci_dev_emu *dev; 395 struct pci_xhci_portregs *port; 396 struct xhci_trb evtrb; 397 398 if ((dev = XHCI_DEVINST_PTR(sc, i)) == NULL) 399 continue; 400 401 port = XHCI_PORTREG_PTR(sc, i); 402 port->portsc |= XHCI_PS_CSC | XHCI_PS_CCS; 403 port->portsc &= ~XHCI_PS_PLS_MASK; 404 405 /* 406 * XHCI 4.19.3 USB2 RxDetect->Polling, 407 * USB3 Polling->U0 408 */ 409 if (dev->dev_ue->ue_usbver == 2) 410 port->portsc |= 411 XHCI_PS_PLS_SET(UPS_PORT_LS_POLL); 412 else 413 port->portsc |= 414 XHCI_PS_PLS_SET(UPS_PORT_LS_U0); 415 416 pci_xhci_set_evtrb(&evtrb, i, 417 XHCI_TRB_ERROR_SUCCESS, 418 XHCI_TRB_EVENT_PORT_STS_CHANGE); 419 420 if (pci_xhci_insert_event(sc, &evtrb, 0) != 421 XHCI_TRB_ERROR_SUCCESS) 422 break; 423 } 424 } else { 425 sc->opregs.usbcmd &= ~XHCI_CMD_RS; 426 sc->opregs.usbsts |= XHCI_STS_HCH; 427 sc->opregs.usbsts &= ~XHCI_STS_PCD; 428 } 429 430 /* start execution of schedule; stop when set to 0 */ 431 cmd |= sc->opregs.usbcmd & XHCI_CMD_RS; 432 433 if (cmd & XHCI_CMD_HCRST) { 434 /* reset controller */ 435 pci_xhci_reset(sc); 436 cmd &= ~XHCI_CMD_HCRST; 437 } 438 439 cmd &= ~(XHCI_CMD_CSS | XHCI_CMD_CRS); 440 441 if (do_intr) 442 pci_xhci_assert_interrupt(sc); 443 444 return (cmd); 445 } 446 447 static void 448 pci_xhci_portregs_write(struct pci_xhci_softc *sc, uint64_t offset, 449 uint64_t value) 450 { 451 struct xhci_trb evtrb; 452 struct pci_xhci_portregs *p; 453 int port; 454 uint32_t oldpls, newpls; 455 456 if (sc->portregs == NULL) 457 return; 458 459 port = (offset - XHCI_PORTREGS_PORT0) / XHCI_PORTREGS_SETSZ; 460 offset = (offset - XHCI_PORTREGS_PORT0) % XHCI_PORTREGS_SETSZ; 461 462 DPRINTF(("pci_xhci: portregs wr offset 0x%lx, port %u: 0x%lx", 463 offset, port, value)); 464 465 assert(port >= 0); 466 467 if (port > XHCI_MAX_DEVS) { 468 DPRINTF(("pci_xhci: portregs_write port %d > ndevices", 469 port)); 470 return; 471 } 472 473 if (XHCI_DEVINST_PTR(sc, port) == NULL) { 474 DPRINTF(("pci_xhci: portregs_write to unattached port %d", 475 port)); 476 } 477 478 p = XHCI_PORTREG_PTR(sc, port); 479 switch (offset) { 480 case 0: 481 /* port reset or warm reset */ 482 if (value & (XHCI_PS_PR | XHCI_PS_WPR)) { 483 pci_xhci_reset_port(sc, port, value & XHCI_PS_WPR); 484 break; 485 } 486 487 if ((p->portsc & XHCI_PS_PP) == 0) { 488 WPRINTF(("pci_xhci: portregs_write to unpowered " 489 "port %d", port)); 490 break; 491 } 492 493 /* Port status and control register */ 494 oldpls = XHCI_PS_PLS_GET(p->portsc); 495 newpls = XHCI_PS_PLS_GET(value); 496 497 p->portsc &= XHCI_PS_PED | XHCI_PS_PLS_MASK | 498 XHCI_PS_SPEED_MASK | XHCI_PS_PIC_MASK; 499 500 if (XHCI_DEVINST_PTR(sc, port)) 501 p->portsc |= XHCI_PS_CCS; 502 503 p->portsc |= (value & 504 ~(XHCI_PS_OCA | 505 XHCI_PS_PR | 506 XHCI_PS_PED | 507 XHCI_PS_PLS_MASK | /* link state */ 508 XHCI_PS_SPEED_MASK | 509 XHCI_PS_PIC_MASK | /* port indicator */ 510 XHCI_PS_LWS | XHCI_PS_DR | XHCI_PS_WPR)); 511 512 /* clear control bits */ 513 p->portsc &= ~(value & 514 (XHCI_PS_CSC | 515 XHCI_PS_PEC | 516 XHCI_PS_WRC | 517 XHCI_PS_OCC | 518 XHCI_PS_PRC | 519 XHCI_PS_PLC | 520 XHCI_PS_CEC | 521 XHCI_PS_CAS)); 522 523 /* port disable request; for USB3, don't care */ 524 if (value & XHCI_PS_PED) 525 DPRINTF(("Disable port %d request", port)); 526 527 if (!(value & XHCI_PS_LWS)) 528 break; 529 530 DPRINTF(("Port new PLS: %d", newpls)); 531 switch (newpls) { 532 case 0: /* U0 */ 533 case 3: /* U3 */ 534 if (oldpls != newpls) { 535 p->portsc &= ~XHCI_PS_PLS_MASK; 536 p->portsc |= XHCI_PS_PLS_SET(newpls) | 537 XHCI_PS_PLC; 538 539 if (oldpls != 0 && newpls == 0) { 540 pci_xhci_set_evtrb(&evtrb, port, 541 XHCI_TRB_ERROR_SUCCESS, 542 XHCI_TRB_EVENT_PORT_STS_CHANGE); 543 544 pci_xhci_insert_event(sc, &evtrb, 1); 545 } 546 } 547 break; 548 549 default: 550 DPRINTF(("Unhandled change port %d PLS %u", 551 port, newpls)); 552 break; 553 } 554 break; 555 case 4: 556 /* Port power management status and control register */ 557 p->portpmsc = value; 558 break; 559 case 8: 560 /* Port link information register */ 561 DPRINTF(("pci_xhci attempted write to PORTLI, port %d", 562 port)); 563 break; 564 case 12: 565 /* 566 * Port hardware LPM control register. 567 * For USB3, this register is reserved. 568 */ 569 p->porthlpmc = value; 570 break; 571 default: 572 DPRINTF(("pci_xhci: unaligned portreg write offset %#lx", 573 offset)); 574 break; 575 } 576 } 577 578 static struct xhci_dev_ctx * 579 pci_xhci_get_dev_ctx(struct pci_xhci_softc *sc, uint32_t slot) 580 { 581 uint64_t devctx_addr; 582 struct xhci_dev_ctx *devctx; 583 584 assert(slot > 0 && slot <= XHCI_MAX_DEVS); 585 assert(XHCI_SLOTDEV_PTR(sc, slot) != NULL); 586 assert(sc->opregs.dcbaa_p != NULL); 587 588 devctx_addr = sc->opregs.dcbaa_p->dcba[slot]; 589 590 if (devctx_addr == 0) { 591 DPRINTF(("get_dev_ctx devctx_addr == 0")); 592 return (NULL); 593 } 594 595 DPRINTF(("pci_xhci: get dev ctx, slot %u devctx addr %016lx", 596 slot, devctx_addr)); 597 devctx = XHCI_GADDR(sc, devctx_addr & ~0x3FUL); 598 599 return (devctx); 600 } 601 602 static struct xhci_trb * 603 pci_xhci_trb_next(struct pci_xhci_softc *sc, struct xhci_trb *curtrb, 604 uint64_t *guestaddr) 605 { 606 struct xhci_trb *next; 607 608 assert(curtrb != NULL); 609 610 if (XHCI_TRB_3_TYPE_GET(curtrb->dwTrb3) == XHCI_TRB_TYPE_LINK) { 611 if (guestaddr) 612 *guestaddr = curtrb->qwTrb0 & ~0xFUL; 613 614 next = XHCI_GADDR(sc, curtrb->qwTrb0 & ~0xFUL); 615 } else { 616 if (guestaddr) 617 *guestaddr += sizeof(struct xhci_trb) & ~0xFUL; 618 619 next = curtrb + 1; 620 } 621 622 return (next); 623 } 624 625 static void 626 pci_xhci_assert_interrupt(struct pci_xhci_softc *sc) 627 { 628 629 sc->rtsregs.intrreg.erdp |= XHCI_ERDP_LO_BUSY; 630 sc->rtsregs.intrreg.iman |= XHCI_IMAN_INTR_PEND; 631 sc->opregs.usbsts |= XHCI_STS_EINT; 632 633 /* only trigger interrupt if permitted */ 634 if ((sc->opregs.usbcmd & XHCI_CMD_INTE) && 635 (sc->rtsregs.intrreg.iman & XHCI_IMAN_INTR_ENA)) { 636 if (pci_msi_enabled(sc->xsc_pi)) 637 pci_generate_msi(sc->xsc_pi, 0); 638 else 639 pci_lintr_assert(sc->xsc_pi); 640 } 641 } 642 643 static void 644 pci_xhci_deassert_interrupt(struct pci_xhci_softc *sc) 645 { 646 647 if (!pci_msi_enabled(sc->xsc_pi)) 648 pci_lintr_assert(sc->xsc_pi); 649 } 650 651 static void 652 pci_xhci_init_ep(struct pci_xhci_dev_emu *dev, int epid) 653 { 654 struct xhci_dev_ctx *dev_ctx; 655 struct pci_xhci_dev_ep *devep; 656 struct xhci_endp_ctx *ep_ctx; 657 uint32_t i, pstreams; 658 659 dev_ctx = dev->dev_ctx; 660 ep_ctx = &dev_ctx->ctx_ep[epid]; 661 devep = &dev->eps[epid]; 662 pstreams = XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0); 663 if (pstreams > 0) { 664 DPRINTF(("init_ep %d with pstreams %d", epid, pstreams)); 665 assert(devep->ep_sctx_trbs == NULL); 666 667 devep->ep_sctx = XHCI_GADDR(dev->xsc, ep_ctx->qwEpCtx2 & 668 XHCI_EPCTX_2_TR_DQ_PTR_MASK); 669 devep->ep_sctx_trbs = calloc(pstreams, 670 sizeof(struct pci_xhci_trb_ring)); 671 for (i = 0; i < pstreams; i++) { 672 devep->ep_sctx_trbs[i].ringaddr = 673 devep->ep_sctx[i].qwSctx0 & 674 XHCI_SCTX_0_TR_DQ_PTR_MASK; 675 devep->ep_sctx_trbs[i].ccs = 676 XHCI_SCTX_0_DCS_GET(devep->ep_sctx[i].qwSctx0); 677 } 678 } else { 679 DPRINTF(("init_ep %d with no pstreams", epid)); 680 devep->ep_ringaddr = ep_ctx->qwEpCtx2 & 681 XHCI_EPCTX_2_TR_DQ_PTR_MASK; 682 devep->ep_ccs = XHCI_EPCTX_2_DCS_GET(ep_ctx->qwEpCtx2); 683 devep->ep_tr = XHCI_GADDR(dev->xsc, devep->ep_ringaddr); 684 DPRINTF(("init_ep tr DCS %x", devep->ep_ccs)); 685 } 686 devep->ep_MaxPStreams = pstreams; 687 688 if (devep->ep_xfer == NULL) { 689 devep->ep_xfer = malloc(sizeof(struct usb_data_xfer)); 690 USB_DATA_XFER_INIT(devep->ep_xfer); 691 } 692 } 693 694 static void 695 pci_xhci_disable_ep(struct pci_xhci_dev_emu *dev, int epid) 696 { 697 struct xhci_dev_ctx *dev_ctx; 698 struct pci_xhci_dev_ep *devep; 699 struct xhci_endp_ctx *ep_ctx; 700 701 DPRINTF(("pci_xhci disable_ep %d", epid)); 702 703 dev_ctx = dev->dev_ctx; 704 ep_ctx = &dev_ctx->ctx_ep[epid]; 705 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_DISABLED; 706 707 devep = &dev->eps[epid]; 708 if (devep->ep_MaxPStreams > 0) 709 free(devep->ep_sctx_trbs); 710 711 if (devep->ep_xfer != NULL) { 712 free(devep->ep_xfer); 713 devep->ep_xfer = NULL; 714 } 715 716 memset(devep, 0, sizeof(struct pci_xhci_dev_ep)); 717 } 718 719 720 /* reset device at slot and data structures related to it */ 721 static void 722 pci_xhci_reset_slot(struct pci_xhci_softc *sc, int slot) 723 { 724 struct pci_xhci_dev_emu *dev; 725 726 dev = XHCI_SLOTDEV_PTR(sc, slot); 727 728 if (!dev) { 729 DPRINTF(("xhci reset unassigned slot (%d)?", slot)); 730 } else { 731 dev->dev_slotstate = XHCI_ST_DISABLED; 732 } 733 734 /* TODO: reset ring buffer pointers */ 735 } 736 737 static int 738 pci_xhci_insert_event(struct pci_xhci_softc *sc, struct xhci_trb *evtrb, 739 int do_intr) 740 { 741 struct pci_xhci_rtsregs *rts; 742 uint64_t erdp; 743 int erdp_idx; 744 int err; 745 struct xhci_trb *evtrbptr; 746 747 err = XHCI_TRB_ERROR_SUCCESS; 748 749 rts = &sc->rtsregs; 750 751 erdp = rts->intrreg.erdp & ~0xF; 752 erdp_idx = (erdp - rts->erstba_p[rts->er_deq_seg].qwEvrsTablePtr) / 753 sizeof(struct xhci_trb); 754 755 DPRINTF(("pci_xhci: insert event 0[%lx] 2[%x] 3[%x]", 756 evtrb->qwTrb0, evtrb->dwTrb2, evtrb->dwTrb3)); 757 DPRINTF(("\terdp idx %d/seg %d, enq idx %d/seg %d, pcs %u", 758 erdp_idx, rts->er_deq_seg, rts->er_enq_idx, 759 rts->er_enq_seg, rts->event_pcs)); 760 DPRINTF(("\t(erdp=0x%lx, erst=0x%lx, tblsz=%u, do_intr %d)", 761 erdp, rts->erstba_p->qwEvrsTablePtr, 762 rts->erstba_p->dwEvrsTableSize, do_intr)); 763 764 evtrbptr = &rts->erst_p[rts->er_enq_idx]; 765 766 /* TODO: multi-segment table */ 767 if (rts->er_events_cnt >= rts->erstba_p->dwEvrsTableSize) { 768 DPRINTF(("pci_xhci[%d] cannot insert event; ring full", 769 __LINE__)); 770 err = XHCI_TRB_ERROR_EV_RING_FULL; 771 goto done; 772 } 773 774 if (rts->er_events_cnt == rts->erstba_p->dwEvrsTableSize - 1) { 775 struct xhci_trb errev; 776 777 if ((evtrbptr->dwTrb3 & 0x1) == (rts->event_pcs & 0x1)) { 778 779 DPRINTF(("pci_xhci[%d] insert evt err: ring full", 780 __LINE__)); 781 782 errev.qwTrb0 = 0; 783 errev.dwTrb2 = XHCI_TRB_2_ERROR_SET( 784 XHCI_TRB_ERROR_EV_RING_FULL); 785 errev.dwTrb3 = XHCI_TRB_3_TYPE_SET( 786 XHCI_TRB_EVENT_HOST_CTRL) | 787 rts->event_pcs; 788 rts->er_events_cnt++; 789 memcpy(&rts->erst_p[rts->er_enq_idx], &errev, 790 sizeof(struct xhci_trb)); 791 rts->er_enq_idx = (rts->er_enq_idx + 1) % 792 rts->erstba_p->dwEvrsTableSize; 793 err = XHCI_TRB_ERROR_EV_RING_FULL; 794 do_intr = 1; 795 796 goto done; 797 } 798 } else { 799 rts->er_events_cnt++; 800 } 801 802 evtrb->dwTrb3 &= ~XHCI_TRB_3_CYCLE_BIT; 803 evtrb->dwTrb3 |= rts->event_pcs; 804 805 memcpy(&rts->erst_p[rts->er_enq_idx], evtrb, sizeof(struct xhci_trb)); 806 rts->er_enq_idx = (rts->er_enq_idx + 1) % 807 rts->erstba_p->dwEvrsTableSize; 808 809 if (rts->er_enq_idx == 0) 810 rts->event_pcs ^= 1; 811 812 done: 813 if (do_intr) 814 pci_xhci_assert_interrupt(sc); 815 816 return (err); 817 } 818 819 static uint32_t 820 pci_xhci_cmd_enable_slot(struct pci_xhci_softc *sc, uint32_t *slot) 821 { 822 struct pci_xhci_dev_emu *dev; 823 uint32_t cmderr; 824 int i; 825 826 cmderr = XHCI_TRB_ERROR_NO_SLOTS; 827 if (sc->portregs != NULL) 828 for (i = 1; i <= XHCI_MAX_SLOTS; i++) { 829 dev = XHCI_SLOTDEV_PTR(sc, i); 830 if (dev && dev->dev_slotstate == XHCI_ST_DISABLED) { 831 *slot = i; 832 dev->dev_slotstate = XHCI_ST_ENABLED; 833 cmderr = XHCI_TRB_ERROR_SUCCESS; 834 dev->hci.hci_address = i; 835 break; 836 } 837 } 838 839 DPRINTF(("pci_xhci enable slot (error=%d) slot %u", 840 cmderr != XHCI_TRB_ERROR_SUCCESS, *slot)); 841 842 return (cmderr); 843 } 844 845 static uint32_t 846 pci_xhci_cmd_disable_slot(struct pci_xhci_softc *sc, uint32_t slot) 847 { 848 struct pci_xhci_dev_emu *dev; 849 uint32_t cmderr; 850 851 DPRINTF(("pci_xhci disable slot %u", slot)); 852 853 cmderr = XHCI_TRB_ERROR_NO_SLOTS; 854 if (sc->portregs == NULL) 855 goto done; 856 857 if (slot > XHCI_MAX_SLOTS) { 858 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 859 goto done; 860 } 861 862 dev = XHCI_SLOTDEV_PTR(sc, slot); 863 if (dev) { 864 if (dev->dev_slotstate == XHCI_ST_DISABLED) { 865 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 866 } else { 867 dev->dev_slotstate = XHCI_ST_DISABLED; 868 cmderr = XHCI_TRB_ERROR_SUCCESS; 869 /* TODO: reset events and endpoints */ 870 } 871 } else 872 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 873 874 done: 875 return (cmderr); 876 } 877 878 static uint32_t 879 pci_xhci_cmd_reset_device(struct pci_xhci_softc *sc, uint32_t slot) 880 { 881 struct pci_xhci_dev_emu *dev; 882 struct xhci_dev_ctx *dev_ctx; 883 struct xhci_endp_ctx *ep_ctx; 884 uint32_t cmderr; 885 int i; 886 887 cmderr = XHCI_TRB_ERROR_NO_SLOTS; 888 if (sc->portregs == NULL) 889 goto done; 890 891 DPRINTF(("pci_xhci reset device slot %u", slot)); 892 893 dev = XHCI_SLOTDEV_PTR(sc, slot); 894 if (!dev || dev->dev_slotstate == XHCI_ST_DISABLED) 895 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 896 else { 897 dev->dev_slotstate = XHCI_ST_DEFAULT; 898 899 dev->hci.hci_address = 0; 900 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 901 902 /* slot state */ 903 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE( 904 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_DEFAULT, 905 0x1F, 27); 906 907 /* number of contexts */ 908 dev_ctx->ctx_slot.dwSctx0 = FIELD_REPLACE( 909 dev_ctx->ctx_slot.dwSctx0, 1, 0x1F, 27); 910 911 /* reset all eps other than ep-0 */ 912 for (i = 2; i <= 31; i++) { 913 ep_ctx = &dev_ctx->ctx_ep[i]; 914 ep_ctx->dwEpCtx0 = FIELD_REPLACE( ep_ctx->dwEpCtx0, 915 XHCI_ST_EPCTX_DISABLED, 0x7, 0); 916 } 917 918 cmderr = XHCI_TRB_ERROR_SUCCESS; 919 } 920 921 pci_xhci_reset_slot(sc, slot); 922 923 done: 924 return (cmderr); 925 } 926 927 static uint32_t 928 pci_xhci_cmd_address_device(struct pci_xhci_softc *sc, uint32_t slot, 929 struct xhci_trb *trb) 930 { 931 struct pci_xhci_dev_emu *dev; 932 struct xhci_input_dev_ctx *input_ctx; 933 struct xhci_slot_ctx *islot_ctx; 934 struct xhci_dev_ctx *dev_ctx; 935 struct xhci_endp_ctx *ep0_ctx; 936 uint32_t cmderr; 937 938 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL); 939 islot_ctx = &input_ctx->ctx_slot; 940 ep0_ctx = &input_ctx->ctx_ep[1]; 941 942 cmderr = XHCI_TRB_ERROR_SUCCESS; 943 944 DPRINTF(("pci_xhci: address device, input ctl: D 0x%08x A 0x%08x,", 945 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1)); 946 DPRINTF((" slot %08x %08x %08x %08x", 947 islot_ctx->dwSctx0, islot_ctx->dwSctx1, 948 islot_ctx->dwSctx2, islot_ctx->dwSctx3)); 949 DPRINTF((" ep0 %08x %08x %016lx %08x", 950 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, 951 ep0_ctx->dwEpCtx4)); 952 953 /* when setting address: drop-ctx=0, add-ctx=slot+ep0 */ 954 if ((input_ctx->ctx_input.dwInCtx0 != 0) || 955 (input_ctx->ctx_input.dwInCtx1 & 0x03) != 0x03) { 956 DPRINTF(("pci_xhci: address device, input ctl invalid")); 957 cmderr = XHCI_TRB_ERROR_TRB; 958 goto done; 959 } 960 961 /* assign address to slot */ 962 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 963 964 DPRINTF(("pci_xhci: address device, dev ctx")); 965 DPRINTF((" slot %08x %08x %08x %08x", 966 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 967 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 968 969 dev = XHCI_SLOTDEV_PTR(sc, slot); 970 assert(dev != NULL); 971 972 dev->hci.hci_address = slot; 973 dev->dev_ctx = dev_ctx; 974 975 if (dev->dev_ue->ue_reset == NULL || 976 dev->dev_ue->ue_reset(dev->dev_sc) < 0) { 977 cmderr = XHCI_TRB_ERROR_ENDP_NOT_ON; 978 goto done; 979 } 980 981 memcpy(&dev_ctx->ctx_slot, islot_ctx, sizeof(struct xhci_slot_ctx)); 982 983 dev_ctx->ctx_slot.dwSctx3 = 984 XHCI_SCTX_3_SLOT_STATE_SET(XHCI_ST_SLCTX_ADDRESSED) | 985 XHCI_SCTX_3_DEV_ADDR_SET(slot); 986 987 memcpy(&dev_ctx->ctx_ep[1], ep0_ctx, sizeof(struct xhci_endp_ctx)); 988 ep0_ctx = &dev_ctx->ctx_ep[1]; 989 ep0_ctx->dwEpCtx0 = (ep0_ctx->dwEpCtx0 & ~0x7) | 990 XHCI_EPCTX_0_EPSTATE_SET(XHCI_ST_EPCTX_RUNNING); 991 992 pci_xhci_init_ep(dev, 1); 993 994 dev->dev_slotstate = XHCI_ST_ADDRESSED; 995 996 DPRINTF(("pci_xhci: address device, output ctx")); 997 DPRINTF((" slot %08x %08x %08x %08x", 998 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 999 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 1000 DPRINTF((" ep0 %08x %08x %016lx %08x", 1001 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, 1002 ep0_ctx->dwEpCtx4)); 1003 1004 done: 1005 return (cmderr); 1006 } 1007 1008 static uint32_t 1009 pci_xhci_cmd_config_ep(struct pci_xhci_softc *sc, uint32_t slot, 1010 struct xhci_trb *trb) 1011 { 1012 struct xhci_input_dev_ctx *input_ctx; 1013 struct pci_xhci_dev_emu *dev; 1014 struct xhci_dev_ctx *dev_ctx; 1015 struct xhci_endp_ctx *ep_ctx, *iep_ctx; 1016 uint32_t cmderr; 1017 int i; 1018 1019 cmderr = XHCI_TRB_ERROR_SUCCESS; 1020 1021 DPRINTF(("pci_xhci config_ep slot %u", slot)); 1022 1023 dev = XHCI_SLOTDEV_PTR(sc, slot); 1024 assert(dev != NULL); 1025 1026 if ((trb->dwTrb3 & XHCI_TRB_3_DCEP_BIT) != 0) { 1027 DPRINTF(("pci_xhci config_ep - deconfigure ep slot %u", 1028 slot)); 1029 if (dev->dev_ue->ue_stop != NULL) 1030 dev->dev_ue->ue_stop(dev->dev_sc); 1031 1032 dev->dev_slotstate = XHCI_ST_ADDRESSED; 1033 1034 dev->hci.hci_address = 0; 1035 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 1036 1037 /* number of contexts */ 1038 dev_ctx->ctx_slot.dwSctx0 = FIELD_REPLACE( 1039 dev_ctx->ctx_slot.dwSctx0, 1, 0x1F, 27); 1040 1041 /* slot state */ 1042 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE( 1043 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_ADDRESSED, 1044 0x1F, 27); 1045 1046 /* disable endpoints */ 1047 for (i = 2; i < 32; i++) 1048 pci_xhci_disable_ep(dev, i); 1049 1050 cmderr = XHCI_TRB_ERROR_SUCCESS; 1051 1052 goto done; 1053 } 1054 1055 if (dev->dev_slotstate < XHCI_ST_ADDRESSED) { 1056 DPRINTF(("pci_xhci: config_ep slotstate x%x != addressed", 1057 dev->dev_slotstate)); 1058 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 1059 goto done; 1060 } 1061 1062 /* In addressed/configured state; 1063 * for each drop endpoint ctx flag: 1064 * ep->state = DISABLED 1065 * for each add endpoint ctx flag: 1066 * cp(ep-in, ep-out) 1067 * ep->state = RUNNING 1068 * for each drop+add endpoint flag: 1069 * reset ep resources 1070 * cp(ep-in, ep-out) 1071 * ep->state = RUNNING 1072 * if input->DisabledCtx[2-31] < 30: (at least 1 ep not disabled) 1073 * slot->state = configured 1074 */ 1075 1076 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL); 1077 dev_ctx = dev->dev_ctx; 1078 DPRINTF(("pci_xhci: config_ep inputctx: D:x%08x A:x%08x 7:x%08x", 1079 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1, 1080 input_ctx->ctx_input.dwInCtx7)); 1081 1082 for (i = 2; i <= 31; i++) { 1083 ep_ctx = &dev_ctx->ctx_ep[i]; 1084 1085 if (input_ctx->ctx_input.dwInCtx0 & 1086 XHCI_INCTX_0_DROP_MASK(i)) { 1087 DPRINTF((" config ep - dropping ep %d", i)); 1088 pci_xhci_disable_ep(dev, i); 1089 } 1090 1091 if (input_ctx->ctx_input.dwInCtx1 & 1092 XHCI_INCTX_1_ADD_MASK(i)) { 1093 iep_ctx = &input_ctx->ctx_ep[i]; 1094 1095 DPRINTF((" enable ep[%d] %08x %08x %016lx %08x", 1096 i, iep_ctx->dwEpCtx0, iep_ctx->dwEpCtx1, 1097 iep_ctx->qwEpCtx2, iep_ctx->dwEpCtx4)); 1098 1099 memcpy(ep_ctx, iep_ctx, sizeof(struct xhci_endp_ctx)); 1100 1101 pci_xhci_init_ep(dev, i); 1102 1103 /* ep state */ 1104 ep_ctx->dwEpCtx0 = FIELD_REPLACE( 1105 ep_ctx->dwEpCtx0, XHCI_ST_EPCTX_RUNNING, 0x7, 0); 1106 } 1107 } 1108 1109 /* slot state to configured */ 1110 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE( 1111 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_CONFIGURED, 0x1F, 27); 1112 dev_ctx->ctx_slot.dwSctx0 = FIELD_COPY( 1113 dev_ctx->ctx_slot.dwSctx0, input_ctx->ctx_slot.dwSctx0, 0x1F, 27); 1114 dev->dev_slotstate = XHCI_ST_CONFIGURED; 1115 1116 DPRINTF(("EP configured; slot %u [0]=0x%08x [1]=0x%08x [2]=0x%08x " 1117 "[3]=0x%08x", 1118 slot, dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 1119 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 1120 1121 done: 1122 return (cmderr); 1123 } 1124 1125 static uint32_t 1126 pci_xhci_cmd_reset_ep(struct pci_xhci_softc *sc, uint32_t slot, 1127 struct xhci_trb *trb) 1128 { 1129 struct pci_xhci_dev_emu *dev; 1130 struct pci_xhci_dev_ep *devep; 1131 struct xhci_dev_ctx *dev_ctx; 1132 struct xhci_endp_ctx *ep_ctx; 1133 uint32_t cmderr, epid; 1134 uint32_t type; 1135 1136 epid = XHCI_TRB_3_EP_GET(trb->dwTrb3); 1137 1138 DPRINTF(("pci_xhci: reset ep %u: slot %u", epid, slot)); 1139 1140 cmderr = XHCI_TRB_ERROR_SUCCESS; 1141 1142 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3); 1143 1144 dev = XHCI_SLOTDEV_PTR(sc, slot); 1145 assert(dev != NULL); 1146 1147 if (type == XHCI_TRB_TYPE_STOP_EP && 1148 (trb->dwTrb3 & XHCI_TRB_3_SUSP_EP_BIT) != 0) { 1149 /* XXX suspend endpoint for 10ms */ 1150 } 1151 1152 if (epid < 1 || epid > 31) { 1153 DPRINTF(("pci_xhci: reset ep: invalid epid %u", epid)); 1154 cmderr = XHCI_TRB_ERROR_TRB; 1155 goto done; 1156 } 1157 1158 devep = &dev->eps[epid]; 1159 if (devep->ep_xfer != NULL) 1160 USB_DATA_XFER_RESET(devep->ep_xfer); 1161 1162 dev_ctx = dev->dev_ctx; 1163 assert(dev_ctx != NULL); 1164 1165 ep_ctx = &dev_ctx->ctx_ep[epid]; 1166 1167 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_STOPPED; 1168 1169 if (devep->ep_MaxPStreams == 0) 1170 ep_ctx->qwEpCtx2 = devep->ep_ringaddr | devep->ep_ccs; 1171 1172 DPRINTF(("pci_xhci: reset ep[%u] %08x %08x %016lx %08x", 1173 epid, ep_ctx->dwEpCtx0, ep_ctx->dwEpCtx1, ep_ctx->qwEpCtx2, 1174 ep_ctx->dwEpCtx4)); 1175 1176 if (type == XHCI_TRB_TYPE_RESET_EP && 1177 (dev->dev_ue->ue_reset == NULL || 1178 dev->dev_ue->ue_reset(dev->dev_sc) < 0)) { 1179 cmderr = XHCI_TRB_ERROR_ENDP_NOT_ON; 1180 goto done; 1181 } 1182 1183 done: 1184 return (cmderr); 1185 } 1186 1187 1188 static uint32_t 1189 pci_xhci_find_stream(struct pci_xhci_softc *sc, struct xhci_endp_ctx *ep, 1190 struct pci_xhci_dev_ep *devep, uint32_t streamid) 1191 { 1192 struct xhci_stream_ctx *sctx; 1193 1194 if (devep->ep_MaxPStreams == 0) 1195 return (XHCI_TRB_ERROR_TRB); 1196 1197 if (devep->ep_MaxPStreams > XHCI_STREAMS_MAX) 1198 return (XHCI_TRB_ERROR_INVALID_SID); 1199 1200 if (XHCI_EPCTX_0_LSA_GET(ep->dwEpCtx0) == 0) { 1201 DPRINTF(("pci_xhci: find_stream; LSA bit not set")); 1202 return (XHCI_TRB_ERROR_INVALID_SID); 1203 } 1204 1205 /* only support primary stream */ 1206 if (streamid > devep->ep_MaxPStreams) 1207 return (XHCI_TRB_ERROR_STREAM_TYPE); 1208 1209 sctx = (struct xhci_stream_ctx *)XHCI_GADDR(sc, ep->qwEpCtx2 & ~0xFUL) + 1210 streamid; 1211 if (!XHCI_SCTX_0_SCT_GET(sctx->qwSctx0)) 1212 return (XHCI_TRB_ERROR_STREAM_TYPE); 1213 1214 return (XHCI_TRB_ERROR_SUCCESS); 1215 } 1216 1217 1218 static uint32_t 1219 pci_xhci_cmd_set_tr(struct pci_xhci_softc *sc, uint32_t slot, 1220 struct xhci_trb *trb) 1221 { 1222 struct pci_xhci_dev_emu *dev; 1223 struct pci_xhci_dev_ep *devep; 1224 struct xhci_dev_ctx *dev_ctx; 1225 struct xhci_endp_ctx *ep_ctx; 1226 uint32_t cmderr, epid; 1227 uint32_t streamid; 1228 1229 cmderr = XHCI_TRB_ERROR_SUCCESS; 1230 1231 dev = XHCI_SLOTDEV_PTR(sc, slot); 1232 assert(dev != NULL); 1233 1234 DPRINTF(("pci_xhci set_tr: new-tr x%016lx, SCT %u DCS %u", 1235 (trb->qwTrb0 & ~0xF), (uint32_t)((trb->qwTrb0 >> 1) & 0x7), 1236 (uint32_t)(trb->qwTrb0 & 0x1))); 1237 DPRINTF((" stream-id %u, slot %u, epid %u, C %u", 1238 (trb->dwTrb2 >> 16) & 0xFFFF, 1239 XHCI_TRB_3_SLOT_GET(trb->dwTrb3), 1240 XHCI_TRB_3_EP_GET(trb->dwTrb3), trb->dwTrb3 & 0x1)); 1241 1242 epid = XHCI_TRB_3_EP_GET(trb->dwTrb3); 1243 if (epid < 1 || epid > 31) { 1244 DPRINTF(("pci_xhci: set_tr_deq: invalid epid %u", epid)); 1245 cmderr = XHCI_TRB_ERROR_TRB; 1246 goto done; 1247 } 1248 1249 dev_ctx = dev->dev_ctx; 1250 assert(dev_ctx != NULL); 1251 1252 ep_ctx = &dev_ctx->ctx_ep[epid]; 1253 devep = &dev->eps[epid]; 1254 1255 switch (XHCI_EPCTX_0_EPSTATE_GET(ep_ctx->dwEpCtx0)) { 1256 case XHCI_ST_EPCTX_STOPPED: 1257 case XHCI_ST_EPCTX_ERROR: 1258 break; 1259 default: 1260 DPRINTF(("pci_xhci cmd set_tr invalid state %x", 1261 XHCI_EPCTX_0_EPSTATE_GET(ep_ctx->dwEpCtx0))); 1262 cmderr = XHCI_TRB_ERROR_CONTEXT_STATE; 1263 goto done; 1264 } 1265 1266 streamid = XHCI_TRB_2_STREAM_GET(trb->dwTrb2); 1267 if (devep->ep_MaxPStreams > 0) { 1268 cmderr = pci_xhci_find_stream(sc, ep_ctx, devep, streamid); 1269 if (cmderr == XHCI_TRB_ERROR_SUCCESS) { 1270 assert(devep->ep_sctx != NULL); 1271 1272 devep->ep_sctx[streamid].qwSctx0 = trb->qwTrb0; 1273 devep->ep_sctx_trbs[streamid].ringaddr = 1274 trb->qwTrb0 & ~0xF; 1275 devep->ep_sctx_trbs[streamid].ccs = 1276 XHCI_EPCTX_2_DCS_GET(trb->qwTrb0); 1277 } 1278 } else { 1279 if (streamid != 0) { 1280 DPRINTF(("pci_xhci cmd set_tr streamid %x != 0", 1281 streamid)); 1282 } 1283 ep_ctx->qwEpCtx2 = trb->qwTrb0 & ~0xFUL; 1284 devep->ep_ringaddr = ep_ctx->qwEpCtx2 & ~0xFUL; 1285 devep->ep_ccs = trb->qwTrb0 & 0x1; 1286 devep->ep_tr = XHCI_GADDR(sc, devep->ep_ringaddr); 1287 1288 DPRINTF(("pci_xhci set_tr first TRB:")); 1289 pci_xhci_dump_trb(devep->ep_tr); 1290 } 1291 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_STOPPED; 1292 1293 done: 1294 return (cmderr); 1295 } 1296 1297 static uint32_t 1298 pci_xhci_cmd_eval_ctx(struct pci_xhci_softc *sc, uint32_t slot, 1299 struct xhci_trb *trb) 1300 { 1301 struct xhci_input_dev_ctx *input_ctx; 1302 struct xhci_slot_ctx *islot_ctx; 1303 struct xhci_dev_ctx *dev_ctx; 1304 struct xhci_endp_ctx *ep0_ctx; 1305 uint32_t cmderr; 1306 1307 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL); 1308 islot_ctx = &input_ctx->ctx_slot; 1309 ep0_ctx = &input_ctx->ctx_ep[1]; 1310 1311 cmderr = XHCI_TRB_ERROR_SUCCESS; 1312 DPRINTF(("pci_xhci: eval ctx, input ctl: D 0x%08x A 0x%08x,", 1313 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1)); 1314 DPRINTF((" slot %08x %08x %08x %08x", 1315 islot_ctx->dwSctx0, islot_ctx->dwSctx1, 1316 islot_ctx->dwSctx2, islot_ctx->dwSctx3)); 1317 DPRINTF((" ep0 %08x %08x %016lx %08x", 1318 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, 1319 ep0_ctx->dwEpCtx4)); 1320 1321 /* this command expects drop-ctx=0 & add-ctx=slot+ep0 */ 1322 if ((input_ctx->ctx_input.dwInCtx0 != 0) || 1323 (input_ctx->ctx_input.dwInCtx1 & 0x03) == 0) { 1324 DPRINTF(("pci_xhci: eval ctx, input ctl invalid")); 1325 cmderr = XHCI_TRB_ERROR_TRB; 1326 goto done; 1327 } 1328 1329 /* assign address to slot; in this emulation, slot_id = address */ 1330 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 1331 1332 DPRINTF(("pci_xhci: eval ctx, dev ctx")); 1333 DPRINTF((" slot %08x %08x %08x %08x", 1334 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 1335 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 1336 1337 if (input_ctx->ctx_input.dwInCtx1 & 0x01) { /* slot ctx */ 1338 /* set max exit latency */ 1339 dev_ctx->ctx_slot.dwSctx1 = FIELD_COPY( 1340 dev_ctx->ctx_slot.dwSctx1, input_ctx->ctx_slot.dwSctx1, 1341 0xFFFF, 0); 1342 1343 /* set interrupter target */ 1344 dev_ctx->ctx_slot.dwSctx2 = FIELD_COPY( 1345 dev_ctx->ctx_slot.dwSctx2, input_ctx->ctx_slot.dwSctx2, 1346 0x3FF, 22); 1347 } 1348 if (input_ctx->ctx_input.dwInCtx1 & 0x02) { /* control ctx */ 1349 /* set max packet size */ 1350 dev_ctx->ctx_ep[1].dwEpCtx1 = FIELD_COPY( 1351 dev_ctx->ctx_ep[1].dwEpCtx1, ep0_ctx->dwEpCtx1, 1352 0xFFFF, 16); 1353 1354 ep0_ctx = &dev_ctx->ctx_ep[1]; 1355 } 1356 1357 DPRINTF(("pci_xhci: eval ctx, output ctx")); 1358 DPRINTF((" slot %08x %08x %08x %08x", 1359 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 1360 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 1361 DPRINTF((" ep0 %08x %08x %016lx %08x", 1362 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, 1363 ep0_ctx->dwEpCtx4)); 1364 1365 done: 1366 return (cmderr); 1367 } 1368 1369 static int 1370 pci_xhci_complete_commands(struct pci_xhci_softc *sc) 1371 { 1372 struct xhci_trb evtrb; 1373 struct xhci_trb *trb; 1374 uint64_t crcr; 1375 uint32_t ccs; /* cycle state (XHCI 4.9.2) */ 1376 uint32_t type; 1377 uint32_t slot; 1378 uint32_t cmderr; 1379 int error; 1380 1381 error = 0; 1382 sc->opregs.crcr |= XHCI_CRCR_LO_CRR; 1383 1384 trb = sc->opregs.cr_p; 1385 ccs = sc->opregs.crcr & XHCI_CRCR_LO_RCS; 1386 crcr = sc->opregs.crcr & ~0xF; 1387 1388 while (1) { 1389 sc->opregs.cr_p = trb; 1390 1391 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3); 1392 1393 if ((trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT) != 1394 (ccs & XHCI_TRB_3_CYCLE_BIT)) 1395 break; 1396 1397 DPRINTF(("pci_xhci: cmd type 0x%x, Trb0 x%016lx dwTrb2 x%08x" 1398 " dwTrb3 x%08x, TRB_CYCLE %u/ccs %u", 1399 type, trb->qwTrb0, trb->dwTrb2, trb->dwTrb3, 1400 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT, ccs)); 1401 1402 cmderr = XHCI_TRB_ERROR_SUCCESS; 1403 evtrb.dwTrb2 = 0; 1404 evtrb.dwTrb3 = (ccs & XHCI_TRB_3_CYCLE_BIT) | 1405 XHCI_TRB_3_TYPE_SET(XHCI_TRB_EVENT_CMD_COMPLETE); 1406 slot = 0; 1407 1408 switch (type) { 1409 case XHCI_TRB_TYPE_LINK: /* 0x06 */ 1410 if (trb->dwTrb3 & XHCI_TRB_3_TC_BIT) 1411 ccs ^= XHCI_CRCR_LO_RCS; 1412 break; 1413 1414 case XHCI_TRB_TYPE_ENABLE_SLOT: /* 0x09 */ 1415 cmderr = pci_xhci_cmd_enable_slot(sc, &slot); 1416 break; 1417 1418 case XHCI_TRB_TYPE_DISABLE_SLOT: /* 0x0A */ 1419 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1420 cmderr = pci_xhci_cmd_disable_slot(sc, slot); 1421 break; 1422 1423 case XHCI_TRB_TYPE_ADDRESS_DEVICE: /* 0x0B */ 1424 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1425 cmderr = pci_xhci_cmd_address_device(sc, slot, trb); 1426 break; 1427 1428 case XHCI_TRB_TYPE_CONFIGURE_EP: /* 0x0C */ 1429 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1430 cmderr = pci_xhci_cmd_config_ep(sc, slot, trb); 1431 break; 1432 1433 case XHCI_TRB_TYPE_EVALUATE_CTX: /* 0x0D */ 1434 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1435 cmderr = pci_xhci_cmd_eval_ctx(sc, slot, trb); 1436 break; 1437 1438 case XHCI_TRB_TYPE_RESET_EP: /* 0x0E */ 1439 DPRINTF(("Reset Endpoint on slot %d", slot)); 1440 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1441 cmderr = pci_xhci_cmd_reset_ep(sc, slot, trb); 1442 break; 1443 1444 case XHCI_TRB_TYPE_STOP_EP: /* 0x0F */ 1445 DPRINTF(("Stop Endpoint on slot %d", slot)); 1446 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1447 cmderr = pci_xhci_cmd_reset_ep(sc, slot, trb); 1448 break; 1449 1450 case XHCI_TRB_TYPE_SET_TR_DEQUEUE: /* 0x10 */ 1451 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1452 cmderr = pci_xhci_cmd_set_tr(sc, slot, trb); 1453 break; 1454 1455 case XHCI_TRB_TYPE_RESET_DEVICE: /* 0x11 */ 1456 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1457 cmderr = pci_xhci_cmd_reset_device(sc, slot); 1458 break; 1459 1460 case XHCI_TRB_TYPE_FORCE_EVENT: /* 0x12 */ 1461 /* TODO: */ 1462 break; 1463 1464 case XHCI_TRB_TYPE_NEGOTIATE_BW: /* 0x13 */ 1465 break; 1466 1467 case XHCI_TRB_TYPE_SET_LATENCY_TOL: /* 0x14 */ 1468 break; 1469 1470 case XHCI_TRB_TYPE_GET_PORT_BW: /* 0x15 */ 1471 break; 1472 1473 case XHCI_TRB_TYPE_FORCE_HEADER: /* 0x16 */ 1474 break; 1475 1476 case XHCI_TRB_TYPE_NOOP_CMD: /* 0x17 */ 1477 break; 1478 1479 default: 1480 DPRINTF(("pci_xhci: unsupported cmd %x", type)); 1481 break; 1482 } 1483 1484 if (type != XHCI_TRB_TYPE_LINK) { 1485 /* 1486 * insert command completion event and assert intr 1487 */ 1488 evtrb.qwTrb0 = crcr; 1489 evtrb.dwTrb2 |= XHCI_TRB_2_ERROR_SET(cmderr); 1490 evtrb.dwTrb3 |= XHCI_TRB_3_SLOT_SET(slot); 1491 DPRINTF(("pci_xhci: command 0x%x result: 0x%x", 1492 type, cmderr)); 1493 pci_xhci_insert_event(sc, &evtrb, 1); 1494 } 1495 1496 trb = pci_xhci_trb_next(sc, trb, &crcr); 1497 } 1498 1499 sc->opregs.crcr = crcr | (sc->opregs.crcr & XHCI_CRCR_LO_CA) | ccs; 1500 sc->opregs.crcr &= ~XHCI_CRCR_LO_CRR; 1501 return (error); 1502 } 1503 1504 static void 1505 pci_xhci_dump_trb(struct xhci_trb *trb) 1506 { 1507 static const char *trbtypes[] = { 1508 "RESERVED", 1509 "NORMAL", 1510 "SETUP_STAGE", 1511 "DATA_STAGE", 1512 "STATUS_STAGE", 1513 "ISOCH", 1514 "LINK", 1515 "EVENT_DATA", 1516 "NOOP", 1517 "ENABLE_SLOT", 1518 "DISABLE_SLOT", 1519 "ADDRESS_DEVICE", 1520 "CONFIGURE_EP", 1521 "EVALUATE_CTX", 1522 "RESET_EP", 1523 "STOP_EP", 1524 "SET_TR_DEQUEUE", 1525 "RESET_DEVICE", 1526 "FORCE_EVENT", 1527 "NEGOTIATE_BW", 1528 "SET_LATENCY_TOL", 1529 "GET_PORT_BW", 1530 "FORCE_HEADER", 1531 "NOOP_CMD" 1532 }; 1533 uint32_t type; 1534 1535 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3); 1536 DPRINTF(("pci_xhci: trb[@%p] type x%02x %s 0:x%016lx 2:x%08x 3:x%08x", 1537 trb, type, 1538 type <= XHCI_TRB_TYPE_NOOP_CMD ? trbtypes[type] : "INVALID", 1539 trb->qwTrb0, trb->dwTrb2, trb->dwTrb3)); 1540 } 1541 1542 static int 1543 pci_xhci_xfer_complete(struct pci_xhci_softc *sc, struct usb_data_xfer *xfer, 1544 uint32_t slot, uint32_t epid, int *do_intr) 1545 { 1546 struct pci_xhci_dev_emu *dev; 1547 struct pci_xhci_dev_ep *devep; 1548 struct xhci_dev_ctx *dev_ctx; 1549 struct xhci_endp_ctx *ep_ctx; 1550 struct xhci_trb *trb; 1551 struct xhci_trb evtrb; 1552 uint32_t trbflags; 1553 uint32_t edtla; 1554 int i, err; 1555 1556 dev = XHCI_SLOTDEV_PTR(sc, slot); 1557 devep = &dev->eps[epid]; 1558 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 1559 1560 assert(dev_ctx != NULL); 1561 1562 ep_ctx = &dev_ctx->ctx_ep[epid]; 1563 1564 err = XHCI_TRB_ERROR_SUCCESS; 1565 *do_intr = 0; 1566 edtla = 0; 1567 1568 /* go through list of TRBs and insert event(s) */ 1569 for (i = xfer->head; xfer->ndata > 0; ) { 1570 evtrb.qwTrb0 = (uint64_t)xfer->data[i].hci_data; 1571 trb = XHCI_GADDR(sc, evtrb.qwTrb0); 1572 trbflags = trb->dwTrb3; 1573 1574 DPRINTF(("pci_xhci: xfer[%d] done?%u:%d trb %x %016lx %x " 1575 "(err %d) IOC?%d", 1576 i, xfer->data[i].processed, xfer->data[i].blen, 1577 XHCI_TRB_3_TYPE_GET(trbflags), evtrb.qwTrb0, 1578 trbflags, err, 1579 trb->dwTrb3 & XHCI_TRB_3_IOC_BIT ? 1 : 0)); 1580 1581 if (!xfer->data[i].processed) { 1582 xfer->head = i; 1583 break; 1584 } 1585 1586 xfer->ndata--; 1587 edtla += xfer->data[i].bdone; 1588 1589 trb->dwTrb3 = (trb->dwTrb3 & ~0x1) | (xfer->data[i].ccs); 1590 1591 pci_xhci_update_ep_ring(sc, dev, devep, ep_ctx, 1592 xfer->data[i].streamid, xfer->data[i].trbnext, 1593 xfer->data[i].ccs); 1594 1595 /* Only interrupt if IOC or short packet */ 1596 if (!(trb->dwTrb3 & XHCI_TRB_3_IOC_BIT) && 1597 !((err == XHCI_TRB_ERROR_SHORT_PKT) && 1598 (trb->dwTrb3 & XHCI_TRB_3_ISP_BIT))) { 1599 1600 i = (i + 1) % USB_MAX_XFER_BLOCKS; 1601 continue; 1602 } 1603 1604 evtrb.dwTrb2 = XHCI_TRB_2_ERROR_SET(err) | 1605 XHCI_TRB_2_REM_SET(xfer->data[i].blen); 1606 1607 evtrb.dwTrb3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_EVENT_TRANSFER) | 1608 XHCI_TRB_3_SLOT_SET(slot) | XHCI_TRB_3_EP_SET(epid); 1609 1610 if (XHCI_TRB_3_TYPE_GET(trbflags) == XHCI_TRB_TYPE_EVENT_DATA) { 1611 DPRINTF(("pci_xhci EVENT_DATA edtla %u", edtla)); 1612 evtrb.qwTrb0 = trb->qwTrb0; 1613 evtrb.dwTrb2 = (edtla & 0xFFFFF) | 1614 XHCI_TRB_2_ERROR_SET(err); 1615 evtrb.dwTrb3 |= XHCI_TRB_3_ED_BIT; 1616 edtla = 0; 1617 } 1618 1619 *do_intr = 1; 1620 1621 err = pci_xhci_insert_event(sc, &evtrb, 0); 1622 if (err != XHCI_TRB_ERROR_SUCCESS) { 1623 break; 1624 } 1625 1626 i = (i + 1) % USB_MAX_XFER_BLOCKS; 1627 } 1628 1629 return (err); 1630 } 1631 1632 static void 1633 pci_xhci_update_ep_ring(struct pci_xhci_softc *sc, 1634 struct pci_xhci_dev_emu *dev __unused, struct pci_xhci_dev_ep *devep, 1635 struct xhci_endp_ctx *ep_ctx, uint32_t streamid, uint64_t ringaddr, int ccs) 1636 { 1637 1638 if (devep->ep_MaxPStreams != 0) { 1639 devep->ep_sctx[streamid].qwSctx0 = (ringaddr & ~0xFUL) | 1640 (ccs & 0x1); 1641 1642 devep->ep_sctx_trbs[streamid].ringaddr = ringaddr & ~0xFUL; 1643 devep->ep_sctx_trbs[streamid].ccs = ccs & 0x1; 1644 ep_ctx->qwEpCtx2 = (ep_ctx->qwEpCtx2 & ~0x1) | (ccs & 0x1); 1645 1646 DPRINTF(("xhci update ep-ring stream %d, addr %lx", 1647 streamid, devep->ep_sctx[streamid].qwSctx0)); 1648 } else { 1649 devep->ep_ringaddr = ringaddr & ~0xFUL; 1650 devep->ep_ccs = ccs & 0x1; 1651 devep->ep_tr = XHCI_GADDR(sc, ringaddr & ~0xFUL); 1652 ep_ctx->qwEpCtx2 = (ringaddr & ~0xFUL) | (ccs & 0x1); 1653 1654 DPRINTF(("xhci update ep-ring, addr %lx", 1655 (devep->ep_ringaddr | devep->ep_ccs))); 1656 } 1657 } 1658 1659 /* 1660 * Outstanding transfer still in progress (device NAK'd earlier) so retry 1661 * the transfer again to see if it succeeds. 1662 */ 1663 static int 1664 pci_xhci_try_usb_xfer(struct pci_xhci_softc *sc, 1665 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep, 1666 struct xhci_endp_ctx *ep_ctx, uint32_t slot, uint32_t epid) 1667 { 1668 struct usb_data_xfer *xfer; 1669 int err; 1670 int do_intr; 1671 1672 ep_ctx->dwEpCtx0 = FIELD_REPLACE( 1673 ep_ctx->dwEpCtx0, XHCI_ST_EPCTX_RUNNING, 0x7, 0); 1674 1675 err = 0; 1676 do_intr = 0; 1677 1678 xfer = devep->ep_xfer; 1679 USB_DATA_XFER_LOCK(xfer); 1680 1681 /* outstanding requests queued up */ 1682 if (dev->dev_ue->ue_data != NULL) { 1683 err = dev->dev_ue->ue_data(dev->dev_sc, xfer, 1684 epid & 0x1 ? USB_XFER_IN : USB_XFER_OUT, epid/2); 1685 if (err == USB_ERR_CANCELLED) { 1686 if (USB_DATA_GET_ERRCODE(&xfer->data[xfer->head]) == 1687 USB_NAK) 1688 err = XHCI_TRB_ERROR_SUCCESS; 1689 } else { 1690 err = pci_xhci_xfer_complete(sc, xfer, slot, epid, 1691 &do_intr); 1692 if (err == XHCI_TRB_ERROR_SUCCESS && do_intr) { 1693 pci_xhci_assert_interrupt(sc); 1694 } 1695 1696 1697 /* XXX should not do it if error? */ 1698 USB_DATA_XFER_RESET(xfer); 1699 } 1700 } 1701 1702 USB_DATA_XFER_UNLOCK(xfer); 1703 1704 1705 return (err); 1706 } 1707 1708 1709 static int 1710 pci_xhci_handle_transfer(struct pci_xhci_softc *sc, 1711 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep, 1712 struct xhci_endp_ctx *ep_ctx, struct xhci_trb *trb, uint32_t slot, 1713 uint32_t epid, uint64_t addr, uint32_t ccs, uint32_t streamid) 1714 { 1715 struct xhci_trb *setup_trb; 1716 struct usb_data_xfer *xfer; 1717 struct usb_data_xfer_block *xfer_block; 1718 uint64_t val; 1719 uint32_t trbflags; 1720 int do_intr, err; 1721 int do_retry; 1722 1723 ep_ctx->dwEpCtx0 = FIELD_REPLACE(ep_ctx->dwEpCtx0, 1724 XHCI_ST_EPCTX_RUNNING, 0x7, 0); 1725 1726 xfer = devep->ep_xfer; 1727 USB_DATA_XFER_LOCK(xfer); 1728 1729 DPRINTF(("pci_xhci handle_transfer slot %u", slot)); 1730 1731 retry: 1732 err = XHCI_TRB_ERROR_INVALID; 1733 do_retry = 0; 1734 do_intr = 0; 1735 setup_trb = NULL; 1736 1737 while (1) { 1738 pci_xhci_dump_trb(trb); 1739 1740 trbflags = trb->dwTrb3; 1741 1742 if (XHCI_TRB_3_TYPE_GET(trbflags) != XHCI_TRB_TYPE_LINK && 1743 (trbflags & XHCI_TRB_3_CYCLE_BIT) != 1744 (ccs & XHCI_TRB_3_CYCLE_BIT)) { 1745 DPRINTF(("Cycle-bit changed trbflags %x, ccs %x", 1746 trbflags & XHCI_TRB_3_CYCLE_BIT, ccs)); 1747 break; 1748 } 1749 1750 xfer_block = NULL; 1751 1752 switch (XHCI_TRB_3_TYPE_GET(trbflags)) { 1753 case XHCI_TRB_TYPE_LINK: 1754 if (trb->dwTrb3 & XHCI_TRB_3_TC_BIT) 1755 ccs ^= 0x1; 1756 1757 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1758 (void *)addr, ccs); 1759 xfer_block->processed = 1; 1760 break; 1761 1762 case XHCI_TRB_TYPE_SETUP_STAGE: 1763 if ((trbflags & XHCI_TRB_3_IDT_BIT) == 0 || 1764 XHCI_TRB_2_BYTES_GET(trb->dwTrb2) != 8) { 1765 DPRINTF(("pci_xhci: invalid setup trb")); 1766 err = XHCI_TRB_ERROR_TRB; 1767 goto errout; 1768 } 1769 setup_trb = trb; 1770 1771 val = trb->qwTrb0; 1772 if (!xfer->ureq) 1773 xfer->ureq = malloc( 1774 sizeof(struct usb_device_request)); 1775 memcpy(xfer->ureq, &val, 1776 sizeof(struct usb_device_request)); 1777 1778 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1779 (void *)addr, ccs); 1780 xfer_block->processed = 1; 1781 break; 1782 1783 case XHCI_TRB_TYPE_NORMAL: 1784 case XHCI_TRB_TYPE_ISOCH: 1785 if (setup_trb != NULL) { 1786 DPRINTF(("pci_xhci: trb not supposed to be in " 1787 "ctl scope")); 1788 err = XHCI_TRB_ERROR_TRB; 1789 goto errout; 1790 } 1791 /* fall through */ 1792 1793 case XHCI_TRB_TYPE_DATA_STAGE: 1794 xfer_block = usb_data_xfer_append(xfer, 1795 (void *)(trbflags & XHCI_TRB_3_IDT_BIT ? 1796 &trb->qwTrb0 : XHCI_GADDR(sc, trb->qwTrb0)), 1797 trb->dwTrb2 & 0x1FFFF, (void *)addr, ccs); 1798 break; 1799 1800 case XHCI_TRB_TYPE_STATUS_STAGE: 1801 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1802 (void *)addr, ccs); 1803 break; 1804 1805 case XHCI_TRB_TYPE_NOOP: 1806 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1807 (void *)addr, ccs); 1808 xfer_block->processed = 1; 1809 break; 1810 1811 case XHCI_TRB_TYPE_EVENT_DATA: 1812 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1813 (void *)addr, ccs); 1814 if ((epid > 1) && (trbflags & XHCI_TRB_3_IOC_BIT)) { 1815 xfer_block->processed = 1; 1816 } 1817 break; 1818 1819 default: 1820 DPRINTF(("pci_xhci: handle xfer unexpected trb type " 1821 "0x%x", 1822 XHCI_TRB_3_TYPE_GET(trbflags))); 1823 err = XHCI_TRB_ERROR_TRB; 1824 goto errout; 1825 } 1826 1827 trb = pci_xhci_trb_next(sc, trb, &addr); 1828 1829 DPRINTF(("pci_xhci: next trb: 0x%lx", (uint64_t)trb)); 1830 1831 if (xfer_block) { 1832 xfer_block->trbnext = addr; 1833 xfer_block->streamid = streamid; 1834 } 1835 1836 if (!setup_trb && !(trbflags & XHCI_TRB_3_CHAIN_BIT) && 1837 XHCI_TRB_3_TYPE_GET(trbflags) != XHCI_TRB_TYPE_LINK) { 1838 break; 1839 } 1840 1841 /* handle current batch that requires interrupt on complete */ 1842 if (trbflags & XHCI_TRB_3_IOC_BIT) { 1843 DPRINTF(("pci_xhci: trb IOC bit set")); 1844 if (epid == 1) 1845 do_retry = 1; 1846 break; 1847 } 1848 } 1849 1850 DPRINTF(("pci_xhci[%d]: xfer->ndata %u", __LINE__, xfer->ndata)); 1851 1852 if (xfer->ndata <= 0) 1853 goto errout; 1854 1855 if (epid == 1) { 1856 int usberr; 1857 1858 if (dev->dev_ue->ue_request != NULL) 1859 usberr = dev->dev_ue->ue_request(dev->dev_sc, xfer); 1860 else 1861 usberr = USB_ERR_NOT_STARTED; 1862 err = USB_TO_XHCI_ERR(usberr); 1863 if (err == XHCI_TRB_ERROR_SUCCESS || 1864 err == XHCI_TRB_ERROR_STALL || 1865 err == XHCI_TRB_ERROR_SHORT_PKT) { 1866 err = pci_xhci_xfer_complete(sc, xfer, slot, epid, 1867 &do_intr); 1868 if (err != XHCI_TRB_ERROR_SUCCESS) 1869 do_retry = 0; 1870 } 1871 1872 } else { 1873 /* handle data transfer */ 1874 pci_xhci_try_usb_xfer(sc, dev, devep, ep_ctx, slot, epid); 1875 err = XHCI_TRB_ERROR_SUCCESS; 1876 } 1877 1878 errout: 1879 if (err == XHCI_TRB_ERROR_EV_RING_FULL) 1880 DPRINTF(("pci_xhci[%d]: event ring full", __LINE__)); 1881 1882 if (!do_retry) 1883 USB_DATA_XFER_UNLOCK(xfer); 1884 1885 if (do_intr) 1886 pci_xhci_assert_interrupt(sc); 1887 1888 if (do_retry) { 1889 USB_DATA_XFER_RESET(xfer); 1890 DPRINTF(("pci_xhci[%d]: retry:continuing with next TRBs", 1891 __LINE__)); 1892 goto retry; 1893 } 1894 1895 if (epid == 1) 1896 USB_DATA_XFER_RESET(xfer); 1897 1898 return (err); 1899 } 1900 1901 static void 1902 pci_xhci_device_doorbell(struct pci_xhci_softc *sc, uint32_t slot, 1903 uint32_t epid, uint32_t streamid) 1904 { 1905 struct pci_xhci_dev_emu *dev; 1906 struct pci_xhci_dev_ep *devep; 1907 struct xhci_dev_ctx *dev_ctx; 1908 struct xhci_endp_ctx *ep_ctx; 1909 struct pci_xhci_trb_ring *sctx_tr; 1910 struct xhci_trb *trb; 1911 uint64_t ringaddr; 1912 uint32_t ccs; 1913 int error; 1914 1915 DPRINTF(("pci_xhci doorbell slot %u epid %u stream %u", 1916 slot, epid, streamid)); 1917 1918 if (slot == 0 || slot > XHCI_MAX_SLOTS) { 1919 DPRINTF(("pci_xhci: invalid doorbell slot %u", slot)); 1920 return; 1921 } 1922 1923 if (epid == 0 || epid >= XHCI_MAX_ENDPOINTS) { 1924 DPRINTF(("pci_xhci: invalid endpoint %u", epid)); 1925 return; 1926 } 1927 1928 dev = XHCI_SLOTDEV_PTR(sc, slot); 1929 devep = &dev->eps[epid]; 1930 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 1931 if (!dev_ctx) { 1932 return; 1933 } 1934 ep_ctx = &dev_ctx->ctx_ep[epid]; 1935 1936 sctx_tr = NULL; 1937 1938 DPRINTF(("pci_xhci: device doorbell ep[%u] %08x %08x %016lx %08x", 1939 epid, ep_ctx->dwEpCtx0, ep_ctx->dwEpCtx1, ep_ctx->qwEpCtx2, 1940 ep_ctx->dwEpCtx4)); 1941 1942 if (ep_ctx->qwEpCtx2 == 0) 1943 return; 1944 1945 /* handle pending transfers */ 1946 if (devep->ep_xfer->ndata > 0) { 1947 pci_xhci_try_usb_xfer(sc, dev, devep, ep_ctx, slot, epid); 1948 return; 1949 } 1950 1951 /* get next trb work item */ 1952 if (devep->ep_MaxPStreams != 0) { 1953 /* 1954 * Stream IDs of 0, 65535 (any stream), and 65534 1955 * (prime) are invalid. 1956 */ 1957 if (streamid == 0 || streamid == 65534 || streamid == 65535) { 1958 DPRINTF(("pci_xhci: invalid stream %u", streamid)); 1959 return; 1960 } 1961 1962 error = pci_xhci_find_stream(sc, ep_ctx, devep, streamid); 1963 if (error != XHCI_TRB_ERROR_SUCCESS) { 1964 DPRINTF(("pci_xhci: invalid stream %u: %d", 1965 streamid, error)); 1966 return; 1967 } 1968 sctx_tr = &devep->ep_sctx_trbs[streamid]; 1969 ringaddr = sctx_tr->ringaddr; 1970 ccs = sctx_tr->ccs; 1971 trb = XHCI_GADDR(sc, sctx_tr->ringaddr & ~0xFUL); 1972 DPRINTF(("doorbell, stream %u, ccs %lx, trb ccs %x", 1973 streamid, ep_ctx->qwEpCtx2 & XHCI_TRB_3_CYCLE_BIT, 1974 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT)); 1975 } else { 1976 if (streamid != 0) { 1977 DPRINTF(("pci_xhci: invalid stream %u", streamid)); 1978 return; 1979 } 1980 ringaddr = devep->ep_ringaddr; 1981 ccs = devep->ep_ccs; 1982 trb = devep->ep_tr; 1983 DPRINTF(("doorbell, ccs %lx, trb ccs %x", 1984 ep_ctx->qwEpCtx2 & XHCI_TRB_3_CYCLE_BIT, 1985 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT)); 1986 } 1987 1988 if (XHCI_TRB_3_TYPE_GET(trb->dwTrb3) == 0) { 1989 DPRINTF(("pci_xhci: ring %lx trb[%lx] EP %u is RESERVED?", 1990 ep_ctx->qwEpCtx2, devep->ep_ringaddr, epid)); 1991 return; 1992 } 1993 1994 pci_xhci_handle_transfer(sc, dev, devep, ep_ctx, trb, slot, epid, 1995 ringaddr, ccs, streamid); 1996 } 1997 1998 static void 1999 pci_xhci_dbregs_write(struct pci_xhci_softc *sc, uint64_t offset, 2000 uint64_t value) 2001 { 2002 2003 offset = (offset - sc->dboff) / sizeof(uint32_t); 2004 2005 DPRINTF(("pci_xhci: doorbell write offset 0x%lx: 0x%lx", 2006 offset, value)); 2007 2008 if (XHCI_HALTED(sc)) { 2009 DPRINTF(("pci_xhci: controller halted")); 2010 return; 2011 } 2012 2013 if (offset == 0) 2014 pci_xhci_complete_commands(sc); 2015 else if (sc->portregs != NULL) 2016 pci_xhci_device_doorbell(sc, offset, 2017 XHCI_DB_TARGET_GET(value), XHCI_DB_SID_GET(value)); 2018 } 2019 2020 static void 2021 pci_xhci_rtsregs_write(struct pci_xhci_softc *sc, uint64_t offset, 2022 uint64_t value) 2023 { 2024 struct pci_xhci_rtsregs *rts; 2025 2026 offset -= sc->rtsoff; 2027 2028 if (offset == 0) { 2029 DPRINTF(("pci_xhci attempted write to MFINDEX")); 2030 return; 2031 } 2032 2033 DPRINTF(("pci_xhci: runtime regs write offset 0x%lx: 0x%lx", 2034 offset, value)); 2035 2036 offset -= 0x20; /* start of intrreg */ 2037 2038 rts = &sc->rtsregs; 2039 2040 switch (offset) { 2041 case 0x00: 2042 if (value & XHCI_IMAN_INTR_PEND) 2043 rts->intrreg.iman &= ~XHCI_IMAN_INTR_PEND; 2044 rts->intrreg.iman = (value & XHCI_IMAN_INTR_ENA) | 2045 (rts->intrreg.iman & XHCI_IMAN_INTR_PEND); 2046 2047 if (!(value & XHCI_IMAN_INTR_ENA)) 2048 pci_xhci_deassert_interrupt(sc); 2049 2050 break; 2051 2052 case 0x04: 2053 rts->intrreg.imod = value; 2054 break; 2055 2056 case 0x08: 2057 rts->intrreg.erstsz = value & 0xFFFF; 2058 break; 2059 2060 case 0x10: 2061 /* ERSTBA low bits */ 2062 rts->intrreg.erstba = MASK_64_HI(sc->rtsregs.intrreg.erstba) | 2063 (value & ~0x3F); 2064 break; 2065 2066 case 0x14: 2067 /* ERSTBA high bits */ 2068 rts->intrreg.erstba = (value << 32) | 2069 MASK_64_LO(sc->rtsregs.intrreg.erstba); 2070 2071 rts->erstba_p = XHCI_GADDR(sc, 2072 sc->rtsregs.intrreg.erstba & ~0x3FUL); 2073 2074 rts->erst_p = XHCI_GADDR(sc, 2075 sc->rtsregs.erstba_p->qwEvrsTablePtr & ~0x3FUL); 2076 2077 rts->er_enq_idx = 0; 2078 rts->er_events_cnt = 0; 2079 2080 DPRINTF(("pci_xhci: wr erstba erst (%p) ptr 0x%lx, sz %u", 2081 rts->erstba_p, 2082 rts->erstba_p->qwEvrsTablePtr, 2083 rts->erstba_p->dwEvrsTableSize)); 2084 break; 2085 2086 case 0x18: 2087 /* ERDP low bits */ 2088 rts->intrreg.erdp = 2089 MASK_64_HI(sc->rtsregs.intrreg.erdp) | 2090 (rts->intrreg.erdp & XHCI_ERDP_LO_BUSY) | 2091 (value & ~0xF); 2092 if (value & XHCI_ERDP_LO_BUSY) { 2093 rts->intrreg.erdp &= ~XHCI_ERDP_LO_BUSY; 2094 rts->intrreg.iman &= ~XHCI_IMAN_INTR_PEND; 2095 } 2096 2097 rts->er_deq_seg = XHCI_ERDP_LO_SINDEX(value); 2098 2099 break; 2100 2101 case 0x1C: 2102 /* ERDP high bits */ 2103 rts->intrreg.erdp = (value << 32) | 2104 MASK_64_LO(sc->rtsregs.intrreg.erdp); 2105 2106 if (rts->er_events_cnt > 0) { 2107 uint64_t erdp; 2108 int erdp_i; 2109 2110 erdp = rts->intrreg.erdp & ~0xF; 2111 erdp_i = (erdp - rts->erstba_p->qwEvrsTablePtr) / 2112 sizeof(struct xhci_trb); 2113 2114 if (erdp_i <= rts->er_enq_idx) 2115 rts->er_events_cnt = rts->er_enq_idx - erdp_i; 2116 else 2117 rts->er_events_cnt = 2118 rts->erstba_p->dwEvrsTableSize - 2119 (erdp_i - rts->er_enq_idx); 2120 2121 DPRINTF(("pci_xhci: erdp 0x%lx, events cnt %u", 2122 erdp, rts->er_events_cnt)); 2123 } 2124 2125 break; 2126 2127 default: 2128 DPRINTF(("pci_xhci attempted write to RTS offset 0x%lx", 2129 offset)); 2130 break; 2131 } 2132 } 2133 2134 static uint64_t 2135 pci_xhci_portregs_read(struct pci_xhci_softc *sc, uint64_t offset) 2136 { 2137 struct pci_xhci_portregs *portregs; 2138 int port; 2139 uint32_t reg; 2140 2141 if (sc->portregs == NULL) 2142 return (0); 2143 2144 port = (offset - XHCI_PORTREGS_PORT0) / XHCI_PORTREGS_SETSZ; 2145 offset = (offset - XHCI_PORTREGS_PORT0) % XHCI_PORTREGS_SETSZ; 2146 2147 if (port > XHCI_MAX_DEVS) { 2148 DPRINTF(("pci_xhci: portregs_read port %d >= XHCI_MAX_DEVS", 2149 port)); 2150 2151 /* return default value for unused port */ 2152 return (XHCI_PS_SPEED_SET(3)); 2153 } 2154 2155 portregs = XHCI_PORTREG_PTR(sc, port); 2156 switch (offset) { 2157 case 0: 2158 reg = portregs->portsc; 2159 break; 2160 case 4: 2161 reg = portregs->portpmsc; 2162 break; 2163 case 8: 2164 reg = portregs->portli; 2165 break; 2166 case 12: 2167 reg = portregs->porthlpmc; 2168 break; 2169 default: 2170 DPRINTF(("pci_xhci: unaligned portregs read offset %#lx", 2171 offset)); 2172 reg = 0xffffffff; 2173 break; 2174 } 2175 2176 DPRINTF(("pci_xhci: portregs read offset 0x%lx port %u -> 0x%x", 2177 offset, port, reg)); 2178 2179 return (reg); 2180 } 2181 2182 static void 2183 pci_xhci_hostop_write(struct pci_xhci_softc *sc, uint64_t offset, 2184 uint64_t value) 2185 { 2186 offset -= XHCI_CAPLEN; 2187 2188 if (offset < 0x400) 2189 DPRINTF(("pci_xhci: hostop write offset 0x%lx: 0x%lx", 2190 offset, value)); 2191 2192 switch (offset) { 2193 case XHCI_USBCMD: 2194 sc->opregs.usbcmd = pci_xhci_usbcmd_write(sc, value & 0x3F0F); 2195 break; 2196 2197 case XHCI_USBSTS: 2198 /* clear bits on write */ 2199 sc->opregs.usbsts &= ~(value & 2200 (XHCI_STS_HSE|XHCI_STS_EINT|XHCI_STS_PCD|XHCI_STS_SSS| 2201 XHCI_STS_RSS|XHCI_STS_SRE|XHCI_STS_CNR)); 2202 break; 2203 2204 case XHCI_PAGESIZE: 2205 /* read only */ 2206 break; 2207 2208 case XHCI_DNCTRL: 2209 sc->opregs.dnctrl = value & 0xFFFF; 2210 break; 2211 2212 case XHCI_CRCR_LO: 2213 if (sc->opregs.crcr & XHCI_CRCR_LO_CRR) { 2214 sc->opregs.crcr &= ~(XHCI_CRCR_LO_CS|XHCI_CRCR_LO_CA); 2215 sc->opregs.crcr |= value & 2216 (XHCI_CRCR_LO_CS|XHCI_CRCR_LO_CA); 2217 } else { 2218 sc->opregs.crcr = MASK_64_HI(sc->opregs.crcr) | 2219 (value & (0xFFFFFFC0 | XHCI_CRCR_LO_RCS)); 2220 } 2221 break; 2222 2223 case XHCI_CRCR_HI: 2224 if (!(sc->opregs.crcr & XHCI_CRCR_LO_CRR)) { 2225 sc->opregs.crcr = MASK_64_LO(sc->opregs.crcr) | 2226 (value << 32); 2227 2228 sc->opregs.cr_p = XHCI_GADDR(sc, 2229 sc->opregs.crcr & ~0xF); 2230 } 2231 2232 if (sc->opregs.crcr & XHCI_CRCR_LO_CS) { 2233 /* Stop operation of Command Ring */ 2234 } 2235 2236 if (sc->opregs.crcr & XHCI_CRCR_LO_CA) { 2237 /* Abort command */ 2238 } 2239 2240 break; 2241 2242 case XHCI_DCBAAP_LO: 2243 sc->opregs.dcbaap = MASK_64_HI(sc->opregs.dcbaap) | 2244 (value & 0xFFFFFFC0); 2245 break; 2246 2247 case XHCI_DCBAAP_HI: 2248 sc->opregs.dcbaap = MASK_64_LO(sc->opregs.dcbaap) | 2249 (value << 32); 2250 sc->opregs.dcbaa_p = XHCI_GADDR(sc, sc->opregs.dcbaap & ~0x3FUL); 2251 2252 DPRINTF(("pci_xhci: opregs dcbaap = 0x%lx (vaddr 0x%lx)", 2253 sc->opregs.dcbaap, (uint64_t)sc->opregs.dcbaa_p)); 2254 break; 2255 2256 case XHCI_CONFIG: 2257 sc->opregs.config = value & 0x03FF; 2258 break; 2259 2260 default: 2261 if (offset >= 0x400) 2262 pci_xhci_portregs_write(sc, offset, value); 2263 2264 break; 2265 } 2266 } 2267 2268 2269 static void 2270 pci_xhci_write(struct vmctx *ctx __unused, int vcpu __unused, 2271 struct pci_devinst *pi, int baridx, uint64_t offset, int size __unused, 2272 uint64_t value) 2273 { 2274 struct pci_xhci_softc *sc; 2275 2276 sc = pi->pi_arg; 2277 2278 assert(baridx == 0); 2279 2280 pthread_mutex_lock(&sc->mtx); 2281 if (offset < XHCI_CAPLEN) /* read only registers */ 2282 WPRINTF(("pci_xhci: write RO-CAPs offset %ld", offset)); 2283 else if (offset < sc->dboff) 2284 pci_xhci_hostop_write(sc, offset, value); 2285 else if (offset < sc->rtsoff) 2286 pci_xhci_dbregs_write(sc, offset, value); 2287 else if (offset < sc->regsend) 2288 pci_xhci_rtsregs_write(sc, offset, value); 2289 else 2290 WPRINTF(("pci_xhci: write invalid offset %ld", offset)); 2291 2292 pthread_mutex_unlock(&sc->mtx); 2293 } 2294 2295 static uint64_t 2296 pci_xhci_hostcap_read(struct pci_xhci_softc *sc, uint64_t offset) 2297 { 2298 uint64_t value; 2299 2300 switch (offset) { 2301 case XHCI_CAPLENGTH: /* 0x00 */ 2302 value = sc->caplength; 2303 break; 2304 2305 case XHCI_HCSPARAMS1: /* 0x04 */ 2306 value = sc->hcsparams1; 2307 break; 2308 2309 case XHCI_HCSPARAMS2: /* 0x08 */ 2310 value = sc->hcsparams2; 2311 break; 2312 2313 case XHCI_HCSPARAMS3: /* 0x0C */ 2314 value = sc->hcsparams3; 2315 break; 2316 2317 case XHCI_HCSPARAMS0: /* 0x10 */ 2318 value = sc->hccparams1; 2319 break; 2320 2321 case XHCI_DBOFF: /* 0x14 */ 2322 value = sc->dboff; 2323 break; 2324 2325 case XHCI_RTSOFF: /* 0x18 */ 2326 value = sc->rtsoff; 2327 break; 2328 2329 case XHCI_HCCPRAMS2: /* 0x1C */ 2330 value = sc->hccparams2; 2331 break; 2332 2333 default: 2334 value = 0; 2335 break; 2336 } 2337 2338 DPRINTF(("pci_xhci: hostcap read offset 0x%lx -> 0x%lx", 2339 offset, value)); 2340 2341 return (value); 2342 } 2343 2344 static uint64_t 2345 pci_xhci_hostop_read(struct pci_xhci_softc *sc, uint64_t offset) 2346 { 2347 uint64_t value; 2348 2349 offset = (offset - XHCI_CAPLEN); 2350 2351 switch (offset) { 2352 case XHCI_USBCMD: /* 0x00 */ 2353 value = sc->opregs.usbcmd; 2354 break; 2355 2356 case XHCI_USBSTS: /* 0x04 */ 2357 value = sc->opregs.usbsts; 2358 break; 2359 2360 case XHCI_PAGESIZE: /* 0x08 */ 2361 value = sc->opregs.pgsz; 2362 break; 2363 2364 case XHCI_DNCTRL: /* 0x14 */ 2365 value = sc->opregs.dnctrl; 2366 break; 2367 2368 case XHCI_CRCR_LO: /* 0x18 */ 2369 value = sc->opregs.crcr & XHCI_CRCR_LO_CRR; 2370 break; 2371 2372 case XHCI_CRCR_HI: /* 0x1C */ 2373 value = 0; 2374 break; 2375 2376 case XHCI_DCBAAP_LO: /* 0x30 */ 2377 value = sc->opregs.dcbaap & 0xFFFFFFFF; 2378 break; 2379 2380 case XHCI_DCBAAP_HI: /* 0x34 */ 2381 value = (sc->opregs.dcbaap >> 32) & 0xFFFFFFFF; 2382 break; 2383 2384 case XHCI_CONFIG: /* 0x38 */ 2385 value = sc->opregs.config; 2386 break; 2387 2388 default: 2389 if (offset >= 0x400) 2390 value = pci_xhci_portregs_read(sc, offset); 2391 else 2392 value = 0; 2393 2394 break; 2395 } 2396 2397 if (offset < 0x400) 2398 DPRINTF(("pci_xhci: hostop read offset 0x%lx -> 0x%lx", 2399 offset, value)); 2400 2401 return (value); 2402 } 2403 2404 static uint64_t 2405 pci_xhci_dbregs_read(struct pci_xhci_softc *sc __unused, 2406 uint64_t offset __unused) 2407 { 2408 /* read doorbell always returns 0 */ 2409 return (0); 2410 } 2411 2412 static uint64_t 2413 pci_xhci_rtsregs_read(struct pci_xhci_softc *sc, uint64_t offset) 2414 { 2415 uint32_t value; 2416 2417 offset -= sc->rtsoff; 2418 value = 0; 2419 2420 if (offset == XHCI_MFINDEX) { 2421 value = sc->rtsregs.mfindex; 2422 } else if (offset >= 0x20) { 2423 int item; 2424 uint32_t *p; 2425 2426 offset -= 0x20; 2427 item = offset % 32; 2428 2429 assert(offset < sizeof(sc->rtsregs.intrreg)); 2430 2431 p = &sc->rtsregs.intrreg.iman; 2432 p += item / sizeof(uint32_t); 2433 value = *p; 2434 } 2435 2436 DPRINTF(("pci_xhci: rtsregs read offset 0x%lx -> 0x%x", 2437 offset, value)); 2438 2439 return (value); 2440 } 2441 2442 static uint64_t 2443 pci_xhci_xecp_read(struct pci_xhci_softc *sc, uint64_t offset) 2444 { 2445 uint32_t value; 2446 2447 offset -= sc->regsend; 2448 value = 0; 2449 2450 switch (offset) { 2451 case 0: 2452 /* rev major | rev minor | next-cap | cap-id */ 2453 value = (0x02 << 24) | (4 << 8) | XHCI_ID_PROTOCOLS; 2454 break; 2455 case 4: 2456 /* name string = "USB" */ 2457 value = 0x20425355; 2458 break; 2459 case 8: 2460 /* psic | proto-defined | compat # | compat offset */ 2461 value = ((XHCI_MAX_DEVS/2) << 8) | sc->usb2_port_start; 2462 break; 2463 case 12: 2464 break; 2465 case 16: 2466 /* rev major | rev minor | next-cap | cap-id */ 2467 value = (0x03 << 24) | XHCI_ID_PROTOCOLS; 2468 break; 2469 case 20: 2470 /* name string = "USB" */ 2471 value = 0x20425355; 2472 break; 2473 case 24: 2474 /* psic | proto-defined | compat # | compat offset */ 2475 value = ((XHCI_MAX_DEVS/2) << 8) | sc->usb3_port_start; 2476 break; 2477 case 28: 2478 break; 2479 default: 2480 DPRINTF(("pci_xhci: xecp invalid offset 0x%lx", offset)); 2481 break; 2482 } 2483 2484 DPRINTF(("pci_xhci: xecp read offset 0x%lx -> 0x%x", 2485 offset, value)); 2486 2487 return (value); 2488 } 2489 2490 2491 static uint64_t 2492 pci_xhci_read(struct vmctx *ctx __unused, int vcpu __unused, 2493 struct pci_devinst *pi, int baridx, uint64_t offset, int size) 2494 { 2495 struct pci_xhci_softc *sc; 2496 uint32_t value; 2497 2498 sc = pi->pi_arg; 2499 2500 assert(baridx == 0); 2501 2502 pthread_mutex_lock(&sc->mtx); 2503 if (offset < XHCI_CAPLEN) 2504 value = pci_xhci_hostcap_read(sc, offset); 2505 else if (offset < sc->dboff) 2506 value = pci_xhci_hostop_read(sc, offset); 2507 else if (offset < sc->rtsoff) 2508 value = pci_xhci_dbregs_read(sc, offset); 2509 else if (offset < sc->regsend) 2510 value = pci_xhci_rtsregs_read(sc, offset); 2511 else if (offset < (sc->regsend + 4*32)) 2512 value = pci_xhci_xecp_read(sc, offset); 2513 else { 2514 value = 0; 2515 WPRINTF(("pci_xhci: read invalid offset %ld", offset)); 2516 } 2517 2518 pthread_mutex_unlock(&sc->mtx); 2519 2520 switch (size) { 2521 case 1: 2522 value &= 0xFF; 2523 break; 2524 case 2: 2525 value &= 0xFFFF; 2526 break; 2527 case 4: 2528 value &= 0xFFFFFFFF; 2529 break; 2530 } 2531 2532 return (value); 2533 } 2534 2535 static void 2536 pci_xhci_reset_port(struct pci_xhci_softc *sc, int portn, int warm) 2537 { 2538 struct pci_xhci_portregs *port; 2539 struct pci_xhci_dev_emu *dev; 2540 struct xhci_trb evtrb; 2541 int error; 2542 2543 assert(portn <= XHCI_MAX_DEVS); 2544 2545 DPRINTF(("xhci reset port %d", portn)); 2546 2547 port = XHCI_PORTREG_PTR(sc, portn); 2548 dev = XHCI_DEVINST_PTR(sc, portn); 2549 if (dev) { 2550 port->portsc &= ~(XHCI_PS_PLS_MASK | XHCI_PS_PR | XHCI_PS_PRC); 2551 port->portsc |= XHCI_PS_PED | 2552 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed); 2553 2554 if (warm && dev->dev_ue->ue_usbver == 3) { 2555 port->portsc |= XHCI_PS_WRC; 2556 } 2557 2558 if ((port->portsc & XHCI_PS_PRC) == 0) { 2559 port->portsc |= XHCI_PS_PRC; 2560 2561 pci_xhci_set_evtrb(&evtrb, portn, 2562 XHCI_TRB_ERROR_SUCCESS, 2563 XHCI_TRB_EVENT_PORT_STS_CHANGE); 2564 error = pci_xhci_insert_event(sc, &evtrb, 1); 2565 if (error != XHCI_TRB_ERROR_SUCCESS) 2566 DPRINTF(("xhci reset port insert event " 2567 "failed")); 2568 } 2569 } 2570 } 2571 2572 static void 2573 pci_xhci_init_port(struct pci_xhci_softc *sc, int portn) 2574 { 2575 struct pci_xhci_portregs *port; 2576 struct pci_xhci_dev_emu *dev; 2577 2578 port = XHCI_PORTREG_PTR(sc, portn); 2579 dev = XHCI_DEVINST_PTR(sc, portn); 2580 if (dev) { 2581 port->portsc = XHCI_PS_CCS | /* connected */ 2582 XHCI_PS_PP; /* port power */ 2583 2584 if (dev->dev_ue->ue_usbver == 2) { 2585 port->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_POLL) | 2586 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed); 2587 } else { 2588 port->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_U0) | 2589 XHCI_PS_PED | /* enabled */ 2590 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed); 2591 } 2592 2593 DPRINTF(("Init port %d 0x%x", portn, port->portsc)); 2594 } else { 2595 port->portsc = XHCI_PS_PLS_SET(UPS_PORT_LS_RX_DET) | XHCI_PS_PP; 2596 DPRINTF(("Init empty port %d 0x%x", portn, port->portsc)); 2597 } 2598 } 2599 2600 static int 2601 pci_xhci_dev_intr(struct usb_hci *hci, int epctx) 2602 { 2603 struct pci_xhci_dev_emu *dev; 2604 struct xhci_dev_ctx *dev_ctx; 2605 struct xhci_trb evtrb; 2606 struct pci_xhci_softc *sc; 2607 struct pci_xhci_portregs *p; 2608 struct xhci_endp_ctx *ep_ctx; 2609 int error = 0; 2610 int dir_in; 2611 int epid; 2612 2613 dir_in = epctx & 0x80; 2614 epid = epctx & ~0x80; 2615 2616 /* HW endpoint contexts are 0-15; convert to epid based on dir */ 2617 epid = (epid * 2) + (dir_in ? 1 : 0); 2618 2619 assert(epid >= 1 && epid <= 31); 2620 2621 dev = hci->hci_sc; 2622 sc = dev->xsc; 2623 2624 /* check if device is ready; OS has to initialise it */ 2625 if (sc->rtsregs.erstba_p == NULL || 2626 (sc->opregs.usbcmd & XHCI_CMD_RS) == 0 || 2627 dev->dev_ctx == NULL) 2628 return (0); 2629 2630 p = XHCI_PORTREG_PTR(sc, hci->hci_port); 2631 2632 /* raise event if link U3 (suspended) state */ 2633 if (XHCI_PS_PLS_GET(p->portsc) == 3) { 2634 p->portsc &= ~XHCI_PS_PLS_MASK; 2635 p->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_RESUME); 2636 if ((p->portsc & XHCI_PS_PLC) != 0) 2637 return (0); 2638 2639 p->portsc |= XHCI_PS_PLC; 2640 2641 pci_xhci_set_evtrb(&evtrb, hci->hci_port, 2642 XHCI_TRB_ERROR_SUCCESS, XHCI_TRB_EVENT_PORT_STS_CHANGE); 2643 error = pci_xhci_insert_event(sc, &evtrb, 0); 2644 if (error != XHCI_TRB_ERROR_SUCCESS) 2645 goto done; 2646 } 2647 2648 dev_ctx = dev->dev_ctx; 2649 ep_ctx = &dev_ctx->ctx_ep[epid]; 2650 if ((ep_ctx->dwEpCtx0 & 0x7) == XHCI_ST_EPCTX_DISABLED) { 2651 DPRINTF(("xhci device interrupt on disabled endpoint %d", 2652 epid)); 2653 return (0); 2654 } 2655 2656 DPRINTF(("xhci device interrupt on endpoint %d", epid)); 2657 2658 pci_xhci_device_doorbell(sc, hci->hci_port, epid, 0); 2659 2660 done: 2661 return (error); 2662 } 2663 2664 static int 2665 pci_xhci_dev_event(struct usb_hci *hci, enum hci_usbev evid __unused, 2666 void *param __unused) 2667 { 2668 DPRINTF(("xhci device event port %d", hci->hci_port)); 2669 return (0); 2670 } 2671 2672 /* 2673 * Each controller contains a "slot" node which contains a list of 2674 * child nodes each of which is a device. Each slot node's name 2675 * corresponds to a specific controller slot. These nodes 2676 * contain a "device" variable identifying the device model of the 2677 * USB device. For example: 2678 * 2679 * pci.0.1.0 2680 * .device="xhci" 2681 * .slot 2682 * .1 2683 * .device="tablet" 2684 */ 2685 static int 2686 pci_xhci_legacy_config(nvlist_t *nvl, const char *opts) 2687 { 2688 char node_name[16]; 2689 nvlist_t *slots_nvl, *slot_nvl; 2690 char *cp, *opt, *str, *tofree; 2691 int slot; 2692 2693 if (opts == NULL) 2694 return (0); 2695 2696 slots_nvl = create_relative_config_node(nvl, "slot"); 2697 slot = 1; 2698 tofree = str = strdup(opts); 2699 while ((opt = strsep(&str, ",")) != NULL) { 2700 /* device[=<config>] */ 2701 cp = strchr(opt, '='); 2702 if (cp != NULL) { 2703 *cp = '\0'; 2704 cp++; 2705 } 2706 2707 snprintf(node_name, sizeof(node_name), "%d", slot); 2708 slot++; 2709 slot_nvl = create_relative_config_node(slots_nvl, node_name); 2710 set_config_value_node(slot_nvl, "device", opt); 2711 2712 /* 2713 * NB: Given that we split on commas above, the legacy 2714 * format only supports a single option. 2715 */ 2716 if (cp != NULL && *cp != '\0') 2717 pci_parse_legacy_config(slot_nvl, cp); 2718 } 2719 free(tofree); 2720 return (0); 2721 } 2722 2723 static int 2724 pci_xhci_parse_devices(struct pci_xhci_softc *sc, nvlist_t *nvl) 2725 { 2726 struct pci_xhci_dev_emu *dev; 2727 struct usb_devemu *ue; 2728 const nvlist_t *slots_nvl, *slot_nvl; 2729 const char *name, *device; 2730 char *cp; 2731 void *devsc, *cookie; 2732 long slot; 2733 int type, usb3_port, usb2_port, i, ndevices; 2734 2735 usb3_port = sc->usb3_port_start; 2736 usb2_port = sc->usb2_port_start; 2737 2738 sc->devices = calloc(XHCI_MAX_DEVS, sizeof(struct pci_xhci_dev_emu *)); 2739 sc->slots = calloc(XHCI_MAX_SLOTS, sizeof(struct pci_xhci_dev_emu *)); 2740 2741 /* port and slot numbering start from 1 */ 2742 sc->devices--; 2743 sc->slots--; 2744 2745 ndevices = 0; 2746 2747 slots_nvl = find_relative_config_node(nvl, "slot"); 2748 if (slots_nvl == NULL) 2749 goto portsfinal; 2750 2751 cookie = NULL; 2752 while ((name = nvlist_next(slots_nvl, &type, &cookie)) != NULL) { 2753 if (usb2_port == ((sc->usb2_port_start) + XHCI_MAX_DEVS/2) || 2754 usb3_port == ((sc->usb3_port_start) + XHCI_MAX_DEVS/2)) { 2755 WPRINTF(("pci_xhci max number of USB 2 or 3 " 2756 "devices reached, max %d", XHCI_MAX_DEVS/2)); 2757 goto bad; 2758 } 2759 2760 if (type != NV_TYPE_NVLIST) { 2761 EPRINTLN( 2762 "pci_xhci: config variable '%s' under slot node", 2763 name); 2764 goto bad; 2765 } 2766 2767 slot = strtol(name, &cp, 0); 2768 if (*cp != '\0' || slot <= 0 || slot > XHCI_MAX_SLOTS) { 2769 EPRINTLN("pci_xhci: invalid slot '%s'", name); 2770 goto bad; 2771 } 2772 2773 if (XHCI_SLOTDEV_PTR(sc, slot) != NULL) { 2774 EPRINTLN("pci_xhci: duplicate slot '%s'", name); 2775 goto bad; 2776 } 2777 2778 slot_nvl = nvlist_get_nvlist(slots_nvl, name); 2779 device = get_config_value_node(slot_nvl, "device"); 2780 if (device == NULL) { 2781 EPRINTLN( 2782 "pci_xhci: missing \"device\" value for slot '%s'", 2783 name); 2784 goto bad; 2785 } 2786 2787 ue = usb_emu_finddev(device); 2788 if (ue == NULL) { 2789 EPRINTLN("pci_xhci: unknown device model \"%s\"", 2790 device); 2791 goto bad; 2792 } 2793 2794 DPRINTF(("pci_xhci adding device %s", device)); 2795 2796 dev = calloc(1, sizeof(struct pci_xhci_dev_emu)); 2797 dev->xsc = sc; 2798 dev->hci.hci_sc = dev; 2799 dev->hci.hci_intr = pci_xhci_dev_intr; 2800 dev->hci.hci_event = pci_xhci_dev_event; 2801 2802 if (ue->ue_usbver == 2) { 2803 if (usb2_port == sc->usb2_port_start + 2804 XHCI_MAX_DEVS / 2) { 2805 WPRINTF(("pci_xhci max number of USB 2 devices " 2806 "reached, max %d", XHCI_MAX_DEVS / 2)); 2807 goto bad; 2808 } 2809 dev->hci.hci_port = usb2_port; 2810 usb2_port++; 2811 } else { 2812 if (usb3_port == sc->usb3_port_start + 2813 XHCI_MAX_DEVS / 2) { 2814 WPRINTF(("pci_xhci max number of USB 3 devices " 2815 "reached, max %d", XHCI_MAX_DEVS / 2)); 2816 goto bad; 2817 } 2818 dev->hci.hci_port = usb3_port; 2819 usb3_port++; 2820 } 2821 XHCI_DEVINST_PTR(sc, dev->hci.hci_port) = dev; 2822 2823 dev->hci.hci_address = 0; 2824 devsc = ue->ue_init(&dev->hci, nvl); 2825 if (devsc == NULL) { 2826 goto bad; 2827 } 2828 2829 dev->dev_ue = ue; 2830 dev->dev_sc = devsc; 2831 2832 XHCI_SLOTDEV_PTR(sc, slot) = dev; 2833 ndevices++; 2834 } 2835 2836 portsfinal: 2837 sc->portregs = calloc(XHCI_MAX_DEVS, sizeof(struct pci_xhci_portregs)); 2838 sc->portregs--; 2839 2840 if (ndevices > 0) { 2841 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 2842 pci_xhci_init_port(sc, i); 2843 } 2844 } else { 2845 WPRINTF(("pci_xhci no USB devices configured")); 2846 } 2847 return (0); 2848 2849 bad: 2850 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 2851 free(XHCI_DEVINST_PTR(sc, i)); 2852 } 2853 2854 free(sc->devices + 1); 2855 free(sc->slots + 1); 2856 2857 return (-1); 2858 } 2859 2860 static int 2861 pci_xhci_init(struct vmctx *ctx __unused, struct pci_devinst *pi, nvlist_t *nvl) 2862 { 2863 struct pci_xhci_softc *sc; 2864 int error; 2865 2866 if (xhci_in_use) { 2867 WPRINTF(("pci_xhci controller already defined")); 2868 return (-1); 2869 } 2870 xhci_in_use = 1; 2871 2872 sc = calloc(1, sizeof(struct pci_xhci_softc)); 2873 pi->pi_arg = sc; 2874 sc->xsc_pi = pi; 2875 2876 sc->usb2_port_start = (XHCI_MAX_DEVS/2) + 1; 2877 sc->usb3_port_start = 1; 2878 2879 /* discover devices */ 2880 error = pci_xhci_parse_devices(sc, nvl); 2881 if (error < 0) 2882 goto done; 2883 else 2884 error = 0; 2885 2886 sc->caplength = XHCI_SET_CAPLEN(XHCI_CAPLEN) | 2887 XHCI_SET_HCIVERSION(0x0100); 2888 sc->hcsparams1 = XHCI_SET_HCSP1_MAXPORTS(XHCI_MAX_DEVS) | 2889 XHCI_SET_HCSP1_MAXINTR(1) | /* interrupters */ 2890 XHCI_SET_HCSP1_MAXSLOTS(XHCI_MAX_SLOTS); 2891 sc->hcsparams2 = XHCI_SET_HCSP2_ERSTMAX(XHCI_ERST_MAX) | 2892 XHCI_SET_HCSP2_IST(0x04); 2893 sc->hcsparams3 = 0; /* no latency */ 2894 sc->hccparams1 = XHCI_SET_HCCP1_AC64(1) | /* 64-bit addrs */ 2895 XHCI_SET_HCCP1_NSS(1) | /* no 2nd-streams */ 2896 XHCI_SET_HCCP1_SPC(1) | /* short packet */ 2897 XHCI_SET_HCCP1_MAXPSA(XHCI_STREAMS_MAX); 2898 sc->hccparams2 = XHCI_SET_HCCP2_LEC(1) | 2899 XHCI_SET_HCCP2_U3C(1); 2900 sc->dboff = XHCI_SET_DOORBELL(XHCI_CAPLEN + XHCI_PORTREGS_START + 2901 XHCI_MAX_DEVS * sizeof(struct pci_xhci_portregs)); 2902 2903 /* dboff must be 32-bit aligned */ 2904 if (sc->dboff & 0x3) 2905 sc->dboff = (sc->dboff + 0x3) & ~0x3; 2906 2907 /* rtsoff must be 32-bytes aligned */ 2908 sc->rtsoff = XHCI_SET_RTSOFFSET(sc->dboff + (XHCI_MAX_SLOTS+1) * 32); 2909 if (sc->rtsoff & 0x1F) 2910 sc->rtsoff = (sc->rtsoff + 0x1F) & ~0x1F; 2911 2912 DPRINTF(("pci_xhci dboff: 0x%x, rtsoff: 0x%x", sc->dboff, 2913 sc->rtsoff)); 2914 2915 sc->opregs.usbsts = XHCI_STS_HCH; 2916 sc->opregs.pgsz = XHCI_PAGESIZE_4K; 2917 2918 pci_xhci_reset(sc); 2919 2920 sc->regsend = sc->rtsoff + 0x20 + 32; /* only 1 intrpter */ 2921 2922 /* 2923 * Set extended capabilities pointer to be after regsend; 2924 * value of xecp field is 32-bit offset. 2925 */ 2926 sc->hccparams1 |= XHCI_SET_HCCP1_XECP(sc->regsend/4); 2927 2928 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x1E31); 2929 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x8086); 2930 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_SERIALBUS); 2931 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_SERIALBUS_USB); 2932 pci_set_cfgdata8(pi, PCIR_PROGIF,PCIP_SERIALBUS_USB_XHCI); 2933 pci_set_cfgdata8(pi, PCI_USBREV, PCI_USB_REV_3_0); 2934 2935 pci_emul_add_msicap(pi, 1); 2936 2937 /* regsend + xecp registers */ 2938 pci_emul_alloc_bar(pi, 0, PCIBAR_MEM32, sc->regsend + 4*32); 2939 DPRINTF(("pci_xhci pci_emu_alloc: %d", sc->regsend + 4*32)); 2940 2941 2942 pci_lintr_request(pi); 2943 2944 pthread_mutex_init(&sc->mtx, NULL); 2945 2946 done: 2947 if (error) { 2948 free(sc); 2949 } 2950 2951 return (error); 2952 } 2953 2954 #ifdef BHYVE_SNAPSHOT 2955 static void 2956 pci_xhci_map_devs_slots(struct pci_xhci_softc *sc, int maps[]) 2957 { 2958 int i, j; 2959 struct pci_xhci_dev_emu *dev, *slot; 2960 2961 memset(maps, 0, sizeof(maps[0]) * XHCI_MAX_SLOTS); 2962 2963 for (i = 1; i <= XHCI_MAX_SLOTS; i++) { 2964 for (j = 1; j <= XHCI_MAX_DEVS; j++) { 2965 slot = XHCI_SLOTDEV_PTR(sc, i); 2966 dev = XHCI_DEVINST_PTR(sc, j); 2967 2968 if (slot == dev) 2969 maps[i] = j; 2970 } 2971 } 2972 } 2973 2974 static int 2975 pci_xhci_snapshot_ep(struct pci_xhci_softc *sc __unused, 2976 struct pci_xhci_dev_emu *dev, int idx, struct vm_snapshot_meta *meta) 2977 { 2978 int k; 2979 int ret; 2980 struct usb_data_xfer *xfer; 2981 struct usb_data_xfer_block *xfer_block; 2982 2983 /* some sanity checks */ 2984 if (meta->op == VM_SNAPSHOT_SAVE) 2985 xfer = dev->eps[idx].ep_xfer; 2986 2987 SNAPSHOT_VAR_OR_LEAVE(xfer, meta, ret, done); 2988 if (xfer == NULL) { 2989 ret = 0; 2990 goto done; 2991 } 2992 2993 if (meta->op == VM_SNAPSHOT_RESTORE) { 2994 pci_xhci_init_ep(dev, idx); 2995 xfer = dev->eps[idx].ep_xfer; 2996 } 2997 2998 /* save / restore proper */ 2999 for (k = 0; k < USB_MAX_XFER_BLOCKS; k++) { 3000 xfer_block = &xfer->data[k]; 3001 3002 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(xfer_block->buf, 3003 XHCI_GADDR_SIZE(xfer_block->buf), true, meta, ret, 3004 done); 3005 SNAPSHOT_VAR_OR_LEAVE(xfer_block->blen, meta, ret, done); 3006 SNAPSHOT_VAR_OR_LEAVE(xfer_block->bdone, meta, ret, done); 3007 SNAPSHOT_VAR_OR_LEAVE(xfer_block->processed, meta, ret, done); 3008 SNAPSHOT_VAR_OR_LEAVE(xfer_block->hci_data, meta, ret, done); 3009 SNAPSHOT_VAR_OR_LEAVE(xfer_block->ccs, meta, ret, done); 3010 SNAPSHOT_VAR_OR_LEAVE(xfer_block->streamid, meta, ret, done); 3011 SNAPSHOT_VAR_OR_LEAVE(xfer_block->trbnext, meta, ret, done); 3012 } 3013 3014 SNAPSHOT_VAR_OR_LEAVE(xfer->ureq, meta, ret, done); 3015 if (xfer->ureq) { 3016 /* xfer->ureq is not allocated at restore time */ 3017 if (meta->op == VM_SNAPSHOT_RESTORE) 3018 xfer->ureq = malloc(sizeof(struct usb_device_request)); 3019 3020 SNAPSHOT_BUF_OR_LEAVE(xfer->ureq, 3021 sizeof(struct usb_device_request), 3022 meta, ret, done); 3023 } 3024 3025 SNAPSHOT_VAR_OR_LEAVE(xfer->ndata, meta, ret, done); 3026 SNAPSHOT_VAR_OR_LEAVE(xfer->head, meta, ret, done); 3027 SNAPSHOT_VAR_OR_LEAVE(xfer->tail, meta, ret, done); 3028 3029 done: 3030 return (ret); 3031 } 3032 3033 static int 3034 pci_xhci_snapshot(struct vm_snapshot_meta *meta) 3035 { 3036 int i, j; 3037 int ret; 3038 int restore_idx; 3039 struct pci_devinst *pi; 3040 struct pci_xhci_softc *sc; 3041 struct pci_xhci_portregs *port; 3042 struct pci_xhci_dev_emu *dev; 3043 char dname[SNAP_DEV_NAME_LEN]; 3044 int maps[XHCI_MAX_SLOTS + 1]; 3045 3046 pi = meta->dev_data; 3047 sc = pi->pi_arg; 3048 3049 SNAPSHOT_VAR_OR_LEAVE(sc->caplength, meta, ret, done); 3050 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams1, meta, ret, done); 3051 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams2, meta, ret, done); 3052 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams3, meta, ret, done); 3053 SNAPSHOT_VAR_OR_LEAVE(sc->hccparams1, meta, ret, done); 3054 SNAPSHOT_VAR_OR_LEAVE(sc->dboff, meta, ret, done); 3055 SNAPSHOT_VAR_OR_LEAVE(sc->rtsoff, meta, ret, done); 3056 SNAPSHOT_VAR_OR_LEAVE(sc->hccparams2, meta, ret, done); 3057 SNAPSHOT_VAR_OR_LEAVE(sc->regsend, meta, ret, done); 3058 3059 /* opregs */ 3060 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.usbcmd, meta, ret, done); 3061 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.usbsts, meta, ret, done); 3062 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.pgsz, meta, ret, done); 3063 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.dnctrl, meta, ret, done); 3064 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.crcr, meta, ret, done); 3065 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.dcbaap, meta, ret, done); 3066 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.config, meta, ret, done); 3067 3068 /* opregs.cr_p */ 3069 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->opregs.cr_p, 3070 XHCI_GADDR_SIZE(sc->opregs.cr_p), true, meta, ret, done); 3071 3072 /* opregs.dcbaa_p */ 3073 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->opregs.dcbaa_p, 3074 XHCI_GADDR_SIZE(sc->opregs.dcbaa_p), true, meta, ret, done); 3075 3076 /* rtsregs */ 3077 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.mfindex, meta, ret, done); 3078 3079 /* rtsregs.intrreg */ 3080 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.iman, meta, ret, done); 3081 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.imod, meta, ret, done); 3082 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erstsz, meta, ret, done); 3083 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.rsvd, meta, ret, done); 3084 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erstba, meta, ret, done); 3085 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erdp, meta, ret, done); 3086 3087 /* rtsregs.erstba_p */ 3088 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->rtsregs.erstba_p, 3089 XHCI_GADDR_SIZE(sc->rtsregs.erstba_p), true, meta, ret, done); 3090 3091 /* rtsregs.erst_p */ 3092 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->rtsregs.erst_p, 3093 XHCI_GADDR_SIZE(sc->rtsregs.erst_p), true, meta, ret, done); 3094 3095 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_deq_seg, meta, ret, done); 3096 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_enq_idx, meta, ret, done); 3097 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_enq_seg, meta, ret, done); 3098 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_events_cnt, meta, ret, done); 3099 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.event_pcs, meta, ret, done); 3100 3101 /* sanity checking */ 3102 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 3103 dev = XHCI_DEVINST_PTR(sc, i); 3104 if (dev == NULL) 3105 continue; 3106 3107 if (meta->op == VM_SNAPSHOT_SAVE) 3108 restore_idx = i; 3109 SNAPSHOT_VAR_OR_LEAVE(restore_idx, meta, ret, done); 3110 3111 /* check if the restored device (when restoring) is sane */ 3112 if (restore_idx != i) { 3113 fprintf(stderr, "%s: idx not matching: actual: %d, " 3114 "expected: %d\r\n", __func__, restore_idx, i); 3115 ret = EINVAL; 3116 goto done; 3117 } 3118 3119 if (meta->op == VM_SNAPSHOT_SAVE) { 3120 memset(dname, 0, sizeof(dname)); 3121 strncpy(dname, dev->dev_ue->ue_emu, sizeof(dname) - 1); 3122 } 3123 3124 SNAPSHOT_BUF_OR_LEAVE(dname, sizeof(dname), meta, ret, done); 3125 3126 if (meta->op == VM_SNAPSHOT_RESTORE) { 3127 dname[sizeof(dname) - 1] = '\0'; 3128 if (strcmp(dev->dev_ue->ue_emu, dname)) { 3129 fprintf(stderr, "%s: device names mismatch: " 3130 "actual: %s, expected: %s\r\n", 3131 __func__, dname, dev->dev_ue->ue_emu); 3132 3133 ret = EINVAL; 3134 goto done; 3135 } 3136 } 3137 } 3138 3139 /* portregs */ 3140 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 3141 port = XHCI_PORTREG_PTR(sc, i); 3142 dev = XHCI_DEVINST_PTR(sc, i); 3143 3144 if (dev == NULL) 3145 continue; 3146 3147 SNAPSHOT_VAR_OR_LEAVE(port->portsc, meta, ret, done); 3148 SNAPSHOT_VAR_OR_LEAVE(port->portpmsc, meta, ret, done); 3149 SNAPSHOT_VAR_OR_LEAVE(port->portli, meta, ret, done); 3150 SNAPSHOT_VAR_OR_LEAVE(port->porthlpmc, meta, ret, done); 3151 } 3152 3153 /* slots */ 3154 if (meta->op == VM_SNAPSHOT_SAVE) 3155 pci_xhci_map_devs_slots(sc, maps); 3156 3157 for (i = 1; i <= XHCI_MAX_SLOTS; i++) { 3158 SNAPSHOT_VAR_OR_LEAVE(maps[i], meta, ret, done); 3159 3160 if (meta->op == VM_SNAPSHOT_SAVE) { 3161 dev = XHCI_SLOTDEV_PTR(sc, i); 3162 } else if (meta->op == VM_SNAPSHOT_RESTORE) { 3163 if (maps[i] != 0) 3164 dev = XHCI_DEVINST_PTR(sc, maps[i]); 3165 else 3166 dev = NULL; 3167 3168 XHCI_SLOTDEV_PTR(sc, i) = dev; 3169 } else { 3170 /* error */ 3171 ret = EINVAL; 3172 goto done; 3173 } 3174 3175 if (dev == NULL) 3176 continue; 3177 3178 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(dev->dev_ctx, 3179 XHCI_GADDR_SIZE(dev->dev_ctx), true, meta, ret, done); 3180 3181 if (dev->dev_ctx != NULL) { 3182 for (j = 1; j < XHCI_MAX_ENDPOINTS; j++) { 3183 ret = pci_xhci_snapshot_ep(sc, dev, j, meta); 3184 if (ret != 0) 3185 goto done; 3186 } 3187 } 3188 3189 SNAPSHOT_VAR_OR_LEAVE(dev->dev_slotstate, meta, ret, done); 3190 3191 /* devices[i]->dev_sc */ 3192 dev->dev_ue->ue_snapshot(dev->dev_sc, meta); 3193 3194 /* devices[i]->hci */ 3195 SNAPSHOT_VAR_OR_LEAVE(dev->hci.hci_address, meta, ret, done); 3196 SNAPSHOT_VAR_OR_LEAVE(dev->hci.hci_port, meta, ret, done); 3197 } 3198 3199 SNAPSHOT_VAR_OR_LEAVE(sc->usb2_port_start, meta, ret, done); 3200 SNAPSHOT_VAR_OR_LEAVE(sc->usb3_port_start, meta, ret, done); 3201 3202 done: 3203 return (ret); 3204 } 3205 #endif 3206 3207 static const struct pci_devemu pci_de_xhci = { 3208 .pe_emu = "xhci", 3209 .pe_init = pci_xhci_init, 3210 .pe_legacy_config = pci_xhci_legacy_config, 3211 .pe_barwrite = pci_xhci_write, 3212 .pe_barread = pci_xhci_read, 3213 #ifdef BHYVE_SNAPSHOT 3214 .pe_snapshot = pci_xhci_snapshot, 3215 #endif 3216 }; 3217 PCI_EMUL_SET(pci_de_xhci); 3218