1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2014 Leon Dang <ldang@nahannisys.com> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /* 29 XHCI options: 30 -s <n>,xhci,{devices} 31 32 devices: 33 tablet USB tablet mouse 34 */ 35 #include <sys/cdefs.h> 36 #include <sys/param.h> 37 #include <sys/uio.h> 38 #include <sys/types.h> 39 #include <sys/queue.h> 40 41 #include <stdio.h> 42 #include <stdlib.h> 43 #include <stdint.h> 44 #include <string.h> 45 #include <errno.h> 46 #include <pthread.h> 47 #include <unistd.h> 48 49 #include <dev/usb/usbdi.h> 50 #include <dev/usb/usb.h> 51 #include <dev/usb/usb_freebsd.h> 52 #include <xhcireg.h> 53 54 #include "bhyverun.h" 55 #include "config.h" 56 #include "debug.h" 57 #include "pci_emul.h" 58 #include "pci_xhci.h" 59 #ifdef BHYVE_SNAPSHOT 60 #include "snapshot.h" 61 #endif 62 #include "usb_emul.h" 63 64 65 static int xhci_debug = 0; 66 #define DPRINTF(params) if (xhci_debug) PRINTLN params 67 #define WPRINTF(params) PRINTLN params 68 69 70 #define XHCI_NAME "xhci" 71 #define XHCI_MAX_DEVS 8 /* 4 USB3 + 4 USB2 devs */ 72 73 #define XHCI_MAX_SLOTS 64 /* min allowed by Windows drivers */ 74 75 /* 76 * XHCI data structures can be up to 64k, but limit paddr_guest2host mapping 77 * to 4k to avoid going over the guest physical memory barrier. 78 */ 79 #define XHCI_PADDR_SZ 4096 /* paddr_guest2host max size */ 80 81 #define XHCI_ERST_MAX 0 /* max 2^entries event ring seg tbl */ 82 83 #define XHCI_CAPLEN (4*8) /* offset of op register space */ 84 #define XHCI_HCCPRAMS2 0x1C /* offset of HCCPARAMS2 register */ 85 #define XHCI_PORTREGS_START 0x400 86 #define XHCI_DOORBELL_MAX 256 87 88 #define XHCI_STREAMS_MAX 1 /* 4-15 in XHCI spec */ 89 90 /* caplength and hci-version registers */ 91 #define XHCI_SET_CAPLEN(x) ((x) & 0xFF) 92 #define XHCI_SET_HCIVERSION(x) (((x) & 0xFFFF) << 16) 93 #define XHCI_GET_HCIVERSION(x) (((x) >> 16) & 0xFFFF) 94 95 /* hcsparams1 register */ 96 #define XHCI_SET_HCSP1_MAXSLOTS(x) ((x) & 0xFF) 97 #define XHCI_SET_HCSP1_MAXINTR(x) (((x) & 0x7FF) << 8) 98 #define XHCI_SET_HCSP1_MAXPORTS(x) (((x) & 0xFF) << 24) 99 100 /* hcsparams2 register */ 101 #define XHCI_SET_HCSP2_IST(x) ((x) & 0x0F) 102 #define XHCI_SET_HCSP2_ERSTMAX(x) (((x) & 0x0F) << 4) 103 #define XHCI_SET_HCSP2_MAXSCRATCH_HI(x) (((x) & 0x1F) << 21) 104 #define XHCI_SET_HCSP2_MAXSCRATCH_LO(x) (((x) & 0x1F) << 27) 105 106 /* hcsparams3 register */ 107 #define XHCI_SET_HCSP3_U1EXITLATENCY(x) ((x) & 0xFF) 108 #define XHCI_SET_HCSP3_U2EXITLATENCY(x) (((x) & 0xFFFF) << 16) 109 110 /* hccparams1 register */ 111 #define XHCI_SET_HCCP1_AC64(x) ((x) & 0x01) 112 #define XHCI_SET_HCCP1_BNC(x) (((x) & 0x01) << 1) 113 #define XHCI_SET_HCCP1_CSZ(x) (((x) & 0x01) << 2) 114 #define XHCI_SET_HCCP1_PPC(x) (((x) & 0x01) << 3) 115 #define XHCI_SET_HCCP1_PIND(x) (((x) & 0x01) << 4) 116 #define XHCI_SET_HCCP1_LHRC(x) (((x) & 0x01) << 5) 117 #define XHCI_SET_HCCP1_LTC(x) (((x) & 0x01) << 6) 118 #define XHCI_SET_HCCP1_NSS(x) (((x) & 0x01) << 7) 119 #define XHCI_SET_HCCP1_PAE(x) (((x) & 0x01) << 8) 120 #define XHCI_SET_HCCP1_SPC(x) (((x) & 0x01) << 9) 121 #define XHCI_SET_HCCP1_SEC(x) (((x) & 0x01) << 10) 122 #define XHCI_SET_HCCP1_CFC(x) (((x) & 0x01) << 11) 123 #define XHCI_SET_HCCP1_MAXPSA(x) (((x) & 0x0F) << 12) 124 #define XHCI_SET_HCCP1_XECP(x) (((x) & 0xFFFF) << 16) 125 126 /* hccparams2 register */ 127 #define XHCI_SET_HCCP2_U3C(x) ((x) & 0x01) 128 #define XHCI_SET_HCCP2_CMC(x) (((x) & 0x01) << 1) 129 #define XHCI_SET_HCCP2_FSC(x) (((x) & 0x01) << 2) 130 #define XHCI_SET_HCCP2_CTC(x) (((x) & 0x01) << 3) 131 #define XHCI_SET_HCCP2_LEC(x) (((x) & 0x01) << 4) 132 #define XHCI_SET_HCCP2_CIC(x) (((x) & 0x01) << 5) 133 134 /* other registers */ 135 #define XHCI_SET_DOORBELL(x) ((x) & ~0x03) 136 #define XHCI_SET_RTSOFFSET(x) ((x) & ~0x0F) 137 138 /* register masks */ 139 #define XHCI_PS_PLS_MASK (0xF << 5) /* port link state */ 140 #define XHCI_PS_SPEED_MASK (0xF << 10) /* port speed */ 141 #define XHCI_PS_PIC_MASK (0x3 << 14) /* port indicator */ 142 143 /* port register set */ 144 #define XHCI_PORTREGS_BASE 0x400 /* base offset */ 145 #define XHCI_PORTREGS_PORT0 0x3F0 146 #define XHCI_PORTREGS_SETSZ 0x10 /* size of a set */ 147 148 #define MASK_64_HI(x) ((x) & ~0xFFFFFFFFULL) 149 #define MASK_64_LO(x) ((x) & 0xFFFFFFFFULL) 150 151 #define FIELD_REPLACE(a,b,m,s) (((a) & ~((m) << (s))) | \ 152 (((b) & (m)) << (s))) 153 #define FIELD_COPY(a,b,m,s) (((a) & ~((m) << (s))) | \ 154 (((b) & ((m) << (s))))) 155 156 #define SNAP_DEV_NAME_LEN 128 157 158 struct pci_xhci_trb_ring { 159 uint64_t ringaddr; /* current dequeue guest address */ 160 uint32_t ccs; /* consumer cycle state */ 161 }; 162 163 /* device endpoint transfer/stream rings */ 164 struct pci_xhci_dev_ep { 165 union { 166 struct xhci_trb *_epu_tr; 167 struct xhci_stream_ctx *_epu_sctx; 168 } _ep_trbsctx; 169 #define ep_tr _ep_trbsctx._epu_tr 170 #define ep_sctx _ep_trbsctx._epu_sctx 171 172 /* 173 * Caches the value of MaxPStreams from the endpoint context 174 * when an endpoint is initialized and is used to validate the 175 * use of ep_ringaddr vs ep_sctx_trbs[] as well as the length 176 * of ep_sctx_trbs[]. 177 */ 178 uint32_t ep_MaxPStreams; 179 union { 180 struct pci_xhci_trb_ring _epu_trb; 181 struct pci_xhci_trb_ring *_epu_sctx_trbs; 182 } _ep_trb_rings; 183 #define ep_ringaddr _ep_trb_rings._epu_trb.ringaddr 184 #define ep_ccs _ep_trb_rings._epu_trb.ccs 185 #define ep_sctx_trbs _ep_trb_rings._epu_sctx_trbs 186 187 struct usb_data_xfer *ep_xfer; /* transfer chain */ 188 }; 189 190 /* device context base address array: maps slot->device context */ 191 struct xhci_dcbaa { 192 uint64_t dcba[USB_MAX_DEVICES+1]; /* xhci_dev_ctx ptrs */ 193 }; 194 195 /* port status registers */ 196 struct pci_xhci_portregs { 197 uint32_t portsc; /* port status and control */ 198 uint32_t portpmsc; /* port pwr mgmt status & control */ 199 uint32_t portli; /* port link info */ 200 uint32_t porthlpmc; /* port hardware LPM control */ 201 } __packed; 202 #define XHCI_PS_SPEED_SET(x) (((x) & 0xF) << 10) 203 204 /* xHC operational registers */ 205 struct pci_xhci_opregs { 206 uint32_t usbcmd; /* usb command */ 207 uint32_t usbsts; /* usb status */ 208 uint32_t pgsz; /* page size */ 209 uint32_t dnctrl; /* device notification control */ 210 uint64_t crcr; /* command ring control */ 211 uint64_t dcbaap; /* device ctx base addr array ptr */ 212 uint32_t config; /* configure */ 213 214 /* guest mapped addresses: */ 215 struct xhci_trb *cr_p; /* crcr dequeue */ 216 struct xhci_dcbaa *dcbaa_p; /* dev ctx array ptr */ 217 }; 218 219 /* xHC runtime registers */ 220 struct pci_xhci_rtsregs { 221 uint32_t mfindex; /* microframe index */ 222 struct { /* interrupter register set */ 223 uint32_t iman; /* interrupter management */ 224 uint32_t imod; /* interrupter moderation */ 225 uint32_t erstsz; /* event ring segment table size */ 226 uint32_t rsvd; 227 uint64_t erstba; /* event ring seg-tbl base addr */ 228 uint64_t erdp; /* event ring dequeue ptr */ 229 } intrreg __packed; 230 231 /* guest mapped addresses */ 232 struct xhci_event_ring_seg *erstba_p; 233 struct xhci_trb *erst_p; /* event ring segment tbl */ 234 int er_deq_seg; /* event ring dequeue segment */ 235 int er_enq_idx; /* event ring enqueue index - xHCI */ 236 int er_enq_seg; /* event ring enqueue segment */ 237 uint32_t er_events_cnt; /* number of events in ER */ 238 uint32_t event_pcs; /* producer cycle state flag */ 239 }; 240 241 242 struct pci_xhci_softc; 243 244 245 /* 246 * USB device emulation container. 247 * This is referenced from usb_hci->hci_sc; 1 pci_xhci_dev_emu for each 248 * emulated device instance. 249 */ 250 struct pci_xhci_dev_emu { 251 struct pci_xhci_softc *xsc; 252 253 /* XHCI contexts */ 254 struct xhci_dev_ctx *dev_ctx; 255 struct pci_xhci_dev_ep eps[XHCI_MAX_ENDPOINTS]; 256 int dev_slotstate; 257 258 struct usb_devemu *dev_ue; /* USB emulated dev */ 259 void *dev_sc; /* device's softc */ 260 261 struct usb_hci hci; 262 }; 263 264 struct pci_xhci_softc { 265 struct pci_devinst *xsc_pi; 266 267 pthread_mutex_t mtx; 268 269 uint32_t caplength; /* caplen & hciversion */ 270 uint32_t hcsparams1; /* structural parameters 1 */ 271 uint32_t hcsparams2; /* structural parameters 2 */ 272 uint32_t hcsparams3; /* structural parameters 3 */ 273 uint32_t hccparams1; /* capability parameters 1 */ 274 uint32_t dboff; /* doorbell offset */ 275 uint32_t rtsoff; /* runtime register space offset */ 276 uint32_t hccparams2; /* capability parameters 2 */ 277 278 uint32_t regsend; /* end of configuration registers */ 279 280 struct pci_xhci_opregs opregs; 281 struct pci_xhci_rtsregs rtsregs; 282 283 struct pci_xhci_portregs *portregs; 284 struct pci_xhci_dev_emu **devices; /* XHCI[port] = device */ 285 struct pci_xhci_dev_emu **slots; /* slots assigned from 1 */ 286 287 int usb2_port_start; 288 int usb3_port_start; 289 }; 290 291 292 /* port and slot numbering start from 1 */ 293 #define XHCI_PORTREG_PTR(x,n) &((x)->portregs[(n) - 1]) 294 #define XHCI_DEVINST_PTR(x,n) ((x)->devices[(n) - 1]) 295 #define XHCI_SLOTDEV_PTR(x,n) ((x)->slots[(n) - 1]) 296 297 #define XHCI_HALTED(sc) ((sc)->opregs.usbsts & XHCI_STS_HCH) 298 299 #define XHCI_GADDR_SIZE(a) (XHCI_PADDR_SZ - \ 300 (((uint64_t) (a)) & (XHCI_PADDR_SZ - 1))) 301 #define XHCI_GADDR(sc,a) paddr_guest2host((sc)->xsc_pi->pi_vmctx, \ 302 (a), XHCI_GADDR_SIZE(a)) 303 304 static int xhci_in_use; 305 306 /* map USB errors to XHCI */ 307 static const int xhci_usb_errors[USB_ERR_MAX] = { 308 [USB_ERR_NORMAL_COMPLETION] = XHCI_TRB_ERROR_SUCCESS, 309 [USB_ERR_PENDING_REQUESTS] = XHCI_TRB_ERROR_RESOURCE, 310 [USB_ERR_NOT_STARTED] = XHCI_TRB_ERROR_ENDP_NOT_ON, 311 [USB_ERR_INVAL] = XHCI_TRB_ERROR_INVALID, 312 [USB_ERR_NOMEM] = XHCI_TRB_ERROR_RESOURCE, 313 [USB_ERR_CANCELLED] = XHCI_TRB_ERROR_STOPPED, 314 [USB_ERR_BAD_ADDRESS] = XHCI_TRB_ERROR_PARAMETER, 315 [USB_ERR_BAD_BUFSIZE] = XHCI_TRB_ERROR_PARAMETER, 316 [USB_ERR_BAD_FLAG] = XHCI_TRB_ERROR_PARAMETER, 317 [USB_ERR_NO_CALLBACK] = XHCI_TRB_ERROR_STALL, 318 [USB_ERR_IN_USE] = XHCI_TRB_ERROR_RESOURCE, 319 [USB_ERR_NO_ADDR] = XHCI_TRB_ERROR_RESOURCE, 320 [USB_ERR_NO_PIPE] = XHCI_TRB_ERROR_RESOURCE, 321 [USB_ERR_ZERO_NFRAMES] = XHCI_TRB_ERROR_UNDEFINED, 322 [USB_ERR_ZERO_MAXP] = XHCI_TRB_ERROR_UNDEFINED, 323 [USB_ERR_SET_ADDR_FAILED] = XHCI_TRB_ERROR_RESOURCE, 324 [USB_ERR_NO_POWER] = XHCI_TRB_ERROR_ENDP_NOT_ON, 325 [USB_ERR_TOO_DEEP] = XHCI_TRB_ERROR_RESOURCE, 326 [USB_ERR_IOERROR] = XHCI_TRB_ERROR_TRB, 327 [USB_ERR_NOT_CONFIGURED] = XHCI_TRB_ERROR_ENDP_NOT_ON, 328 [USB_ERR_TIMEOUT] = XHCI_TRB_ERROR_CMD_ABORTED, 329 [USB_ERR_SHORT_XFER] = XHCI_TRB_ERROR_SHORT_PKT, 330 [USB_ERR_STALLED] = XHCI_TRB_ERROR_STALL, 331 [USB_ERR_INTERRUPTED] = XHCI_TRB_ERROR_CMD_ABORTED, 332 [USB_ERR_DMA_LOAD_FAILED] = XHCI_TRB_ERROR_DATA_BUF, 333 [USB_ERR_BAD_CONTEXT] = XHCI_TRB_ERROR_TRB, 334 [USB_ERR_NO_ROOT_HUB] = XHCI_TRB_ERROR_UNDEFINED, 335 [USB_ERR_NO_INTR_THREAD] = XHCI_TRB_ERROR_UNDEFINED, 336 [USB_ERR_NOT_LOCKED] = XHCI_TRB_ERROR_UNDEFINED, 337 }; 338 #define USB_TO_XHCI_ERR(e) ((e) < USB_ERR_MAX ? xhci_usb_errors[(e)] : \ 339 XHCI_TRB_ERROR_INVALID) 340 341 static int pci_xhci_insert_event(struct pci_xhci_softc *sc, 342 struct xhci_trb *evtrb, int do_intr); 343 static void pci_xhci_dump_trb(struct xhci_trb *trb); 344 static void pci_xhci_assert_interrupt(struct pci_xhci_softc *sc); 345 static void pci_xhci_reset_slot(struct pci_xhci_softc *sc, int slot); 346 static void pci_xhci_reset_port(struct pci_xhci_softc *sc, int portn, int warm); 347 static void pci_xhci_update_ep_ring(struct pci_xhci_softc *sc, 348 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep, 349 struct xhci_endp_ctx *ep_ctx, uint32_t streamid, 350 uint64_t ringaddr, int ccs); 351 352 static void 353 pci_xhci_set_evtrb(struct xhci_trb *evtrb, uint64_t port, uint32_t errcode, 354 uint32_t evtype) 355 { 356 evtrb->qwTrb0 = port << 24; 357 evtrb->dwTrb2 = XHCI_TRB_2_ERROR_SET(errcode); 358 evtrb->dwTrb3 = XHCI_TRB_3_TYPE_SET(evtype); 359 } 360 361 362 /* controller reset */ 363 static void 364 pci_xhci_reset(struct pci_xhci_softc *sc) 365 { 366 int i; 367 368 sc->rtsregs.er_enq_idx = 0; 369 sc->rtsregs.er_events_cnt = 0; 370 sc->rtsregs.event_pcs = 1; 371 372 for (i = 1; i <= XHCI_MAX_SLOTS; i++) { 373 pci_xhci_reset_slot(sc, i); 374 } 375 } 376 377 static uint32_t 378 pci_xhci_usbcmd_write(struct pci_xhci_softc *sc, uint32_t cmd) 379 { 380 int do_intr = 0; 381 int i; 382 383 if (cmd & XHCI_CMD_RS) { 384 do_intr = (sc->opregs.usbcmd & XHCI_CMD_RS) == 0; 385 386 sc->opregs.usbcmd |= XHCI_CMD_RS; 387 sc->opregs.usbsts &= ~XHCI_STS_HCH; 388 sc->opregs.usbsts |= XHCI_STS_PCD; 389 390 /* Queue port change event on controller run from stop */ 391 if (do_intr) 392 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 393 struct pci_xhci_dev_emu *dev; 394 struct pci_xhci_portregs *port; 395 struct xhci_trb evtrb; 396 397 if ((dev = XHCI_DEVINST_PTR(sc, i)) == NULL) 398 continue; 399 400 port = XHCI_PORTREG_PTR(sc, i); 401 port->portsc |= XHCI_PS_CSC | XHCI_PS_CCS; 402 port->portsc &= ~XHCI_PS_PLS_MASK; 403 404 /* 405 * XHCI 4.19.3 USB2 RxDetect->Polling, 406 * USB3 Polling->U0 407 */ 408 if (dev->dev_ue->ue_usbver == 2) 409 port->portsc |= 410 XHCI_PS_PLS_SET(UPS_PORT_LS_POLL); 411 else 412 port->portsc |= 413 XHCI_PS_PLS_SET(UPS_PORT_LS_U0); 414 415 pci_xhci_set_evtrb(&evtrb, i, 416 XHCI_TRB_ERROR_SUCCESS, 417 XHCI_TRB_EVENT_PORT_STS_CHANGE); 418 419 if (pci_xhci_insert_event(sc, &evtrb, 0) != 420 XHCI_TRB_ERROR_SUCCESS) 421 break; 422 } 423 } else { 424 sc->opregs.usbcmd &= ~XHCI_CMD_RS; 425 sc->opregs.usbsts |= XHCI_STS_HCH; 426 sc->opregs.usbsts &= ~XHCI_STS_PCD; 427 } 428 429 /* start execution of schedule; stop when set to 0 */ 430 cmd |= sc->opregs.usbcmd & XHCI_CMD_RS; 431 432 if (cmd & XHCI_CMD_HCRST) { 433 /* reset controller */ 434 pci_xhci_reset(sc); 435 cmd &= ~XHCI_CMD_HCRST; 436 } 437 438 cmd &= ~(XHCI_CMD_CSS | XHCI_CMD_CRS); 439 440 if (do_intr) 441 pci_xhci_assert_interrupt(sc); 442 443 return (cmd); 444 } 445 446 static void 447 pci_xhci_portregs_write(struct pci_xhci_softc *sc, uint64_t offset, 448 uint64_t value) 449 { 450 struct xhci_trb evtrb; 451 struct pci_xhci_portregs *p; 452 int port; 453 uint32_t oldpls, newpls; 454 455 if (sc->portregs == NULL) 456 return; 457 458 port = (offset - XHCI_PORTREGS_PORT0) / XHCI_PORTREGS_SETSZ; 459 offset = (offset - XHCI_PORTREGS_PORT0) % XHCI_PORTREGS_SETSZ; 460 461 DPRINTF(("pci_xhci: portregs wr offset 0x%lx, port %u: 0x%lx", 462 offset, port, value)); 463 464 assert(port >= 0); 465 466 if (port > XHCI_MAX_DEVS) { 467 DPRINTF(("pci_xhci: portregs_write port %d > ndevices", 468 port)); 469 return; 470 } 471 472 if (XHCI_DEVINST_PTR(sc, port) == NULL) { 473 DPRINTF(("pci_xhci: portregs_write to unattached port %d", 474 port)); 475 } 476 477 p = XHCI_PORTREG_PTR(sc, port); 478 switch (offset) { 479 case 0: 480 /* port reset or warm reset */ 481 if (value & (XHCI_PS_PR | XHCI_PS_WPR)) { 482 pci_xhci_reset_port(sc, port, value & XHCI_PS_WPR); 483 break; 484 } 485 486 if ((p->portsc & XHCI_PS_PP) == 0) { 487 WPRINTF(("pci_xhci: portregs_write to unpowered " 488 "port %d", port)); 489 break; 490 } 491 492 /* Port status and control register */ 493 oldpls = XHCI_PS_PLS_GET(p->portsc); 494 newpls = XHCI_PS_PLS_GET(value); 495 496 p->portsc &= XHCI_PS_PED | XHCI_PS_PLS_MASK | 497 XHCI_PS_SPEED_MASK | XHCI_PS_PIC_MASK; 498 499 if (XHCI_DEVINST_PTR(sc, port)) 500 p->portsc |= XHCI_PS_CCS; 501 502 p->portsc |= (value & 503 ~(XHCI_PS_OCA | 504 XHCI_PS_PR | 505 XHCI_PS_PED | 506 XHCI_PS_PLS_MASK | /* link state */ 507 XHCI_PS_SPEED_MASK | 508 XHCI_PS_PIC_MASK | /* port indicator */ 509 XHCI_PS_LWS | XHCI_PS_DR | XHCI_PS_WPR)); 510 511 /* clear control bits */ 512 p->portsc &= ~(value & 513 (XHCI_PS_CSC | 514 XHCI_PS_PEC | 515 XHCI_PS_WRC | 516 XHCI_PS_OCC | 517 XHCI_PS_PRC | 518 XHCI_PS_PLC | 519 XHCI_PS_CEC | 520 XHCI_PS_CAS)); 521 522 /* port disable request; for USB3, don't care */ 523 if (value & XHCI_PS_PED) 524 DPRINTF(("Disable port %d request", port)); 525 526 if (!(value & XHCI_PS_LWS)) 527 break; 528 529 DPRINTF(("Port new PLS: %d", newpls)); 530 switch (newpls) { 531 case 0: /* U0 */ 532 case 3: /* U3 */ 533 if (oldpls != newpls) { 534 p->portsc &= ~XHCI_PS_PLS_MASK; 535 p->portsc |= XHCI_PS_PLS_SET(newpls) | 536 XHCI_PS_PLC; 537 538 if (oldpls != 0 && newpls == 0) { 539 pci_xhci_set_evtrb(&evtrb, port, 540 XHCI_TRB_ERROR_SUCCESS, 541 XHCI_TRB_EVENT_PORT_STS_CHANGE); 542 543 pci_xhci_insert_event(sc, &evtrb, 1); 544 } 545 } 546 break; 547 548 default: 549 DPRINTF(("Unhandled change port %d PLS %u", 550 port, newpls)); 551 break; 552 } 553 break; 554 case 4: 555 /* Port power management status and control register */ 556 p->portpmsc = value; 557 break; 558 case 8: 559 /* Port link information register */ 560 DPRINTF(("pci_xhci attempted write to PORTLI, port %d", 561 port)); 562 break; 563 case 12: 564 /* 565 * Port hardware LPM control register. 566 * For USB3, this register is reserved. 567 */ 568 p->porthlpmc = value; 569 break; 570 default: 571 DPRINTF(("pci_xhci: unaligned portreg write offset %#lx", 572 offset)); 573 break; 574 } 575 } 576 577 static struct xhci_dev_ctx * 578 pci_xhci_get_dev_ctx(struct pci_xhci_softc *sc, uint32_t slot) 579 { 580 uint64_t devctx_addr; 581 struct xhci_dev_ctx *devctx; 582 583 assert(slot > 0 && slot <= XHCI_MAX_DEVS); 584 assert(XHCI_SLOTDEV_PTR(sc, slot) != NULL); 585 assert(sc->opregs.dcbaa_p != NULL); 586 587 devctx_addr = sc->opregs.dcbaa_p->dcba[slot]; 588 589 if (devctx_addr == 0) { 590 DPRINTF(("get_dev_ctx devctx_addr == 0")); 591 return (NULL); 592 } 593 594 DPRINTF(("pci_xhci: get dev ctx, slot %u devctx addr %016lx", 595 slot, devctx_addr)); 596 devctx = XHCI_GADDR(sc, devctx_addr & ~0x3FUL); 597 598 return (devctx); 599 } 600 601 static struct xhci_trb * 602 pci_xhci_trb_next(struct pci_xhci_softc *sc, struct xhci_trb *curtrb, 603 uint64_t *guestaddr) 604 { 605 struct xhci_trb *next; 606 607 assert(curtrb != NULL); 608 609 if (XHCI_TRB_3_TYPE_GET(curtrb->dwTrb3) == XHCI_TRB_TYPE_LINK) { 610 if (guestaddr) 611 *guestaddr = curtrb->qwTrb0 & ~0xFUL; 612 613 next = XHCI_GADDR(sc, curtrb->qwTrb0 & ~0xFUL); 614 } else { 615 if (guestaddr) 616 *guestaddr += sizeof(struct xhci_trb) & ~0xFUL; 617 618 next = curtrb + 1; 619 } 620 621 return (next); 622 } 623 624 static void 625 pci_xhci_assert_interrupt(struct pci_xhci_softc *sc) 626 { 627 628 sc->rtsregs.intrreg.erdp |= XHCI_ERDP_LO_BUSY; 629 sc->rtsregs.intrreg.iman |= XHCI_IMAN_INTR_PEND; 630 sc->opregs.usbsts |= XHCI_STS_EINT; 631 632 /* only trigger interrupt if permitted */ 633 if ((sc->opregs.usbcmd & XHCI_CMD_INTE) && 634 (sc->rtsregs.intrreg.iman & XHCI_IMAN_INTR_ENA)) { 635 if (pci_msi_enabled(sc->xsc_pi)) 636 pci_generate_msi(sc->xsc_pi, 0); 637 else 638 pci_lintr_assert(sc->xsc_pi); 639 } 640 } 641 642 static void 643 pci_xhci_deassert_interrupt(struct pci_xhci_softc *sc) 644 { 645 646 if (!pci_msi_enabled(sc->xsc_pi)) 647 pci_lintr_assert(sc->xsc_pi); 648 } 649 650 static void 651 pci_xhci_init_ep(struct pci_xhci_dev_emu *dev, int epid) 652 { 653 struct xhci_dev_ctx *dev_ctx; 654 struct pci_xhci_dev_ep *devep; 655 struct xhci_endp_ctx *ep_ctx; 656 uint32_t i, pstreams; 657 658 dev_ctx = dev->dev_ctx; 659 ep_ctx = &dev_ctx->ctx_ep[epid]; 660 devep = &dev->eps[epid]; 661 pstreams = XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0); 662 if (pstreams > 0) { 663 DPRINTF(("init_ep %d with pstreams %d", epid, pstreams)); 664 assert(devep->ep_sctx_trbs == NULL); 665 666 devep->ep_sctx = XHCI_GADDR(dev->xsc, ep_ctx->qwEpCtx2 & 667 XHCI_EPCTX_2_TR_DQ_PTR_MASK); 668 devep->ep_sctx_trbs = calloc(pstreams, 669 sizeof(struct pci_xhci_trb_ring)); 670 for (i = 0; i < pstreams; i++) { 671 devep->ep_sctx_trbs[i].ringaddr = 672 devep->ep_sctx[i].qwSctx0 & 673 XHCI_SCTX_0_TR_DQ_PTR_MASK; 674 devep->ep_sctx_trbs[i].ccs = 675 XHCI_SCTX_0_DCS_GET(devep->ep_sctx[i].qwSctx0); 676 } 677 } else { 678 DPRINTF(("init_ep %d with no pstreams", epid)); 679 devep->ep_ringaddr = ep_ctx->qwEpCtx2 & 680 XHCI_EPCTX_2_TR_DQ_PTR_MASK; 681 devep->ep_ccs = XHCI_EPCTX_2_DCS_GET(ep_ctx->qwEpCtx2); 682 devep->ep_tr = XHCI_GADDR(dev->xsc, devep->ep_ringaddr); 683 DPRINTF(("init_ep tr DCS %x", devep->ep_ccs)); 684 } 685 devep->ep_MaxPStreams = pstreams; 686 687 if (devep->ep_xfer == NULL) { 688 devep->ep_xfer = malloc(sizeof(struct usb_data_xfer)); 689 USB_DATA_XFER_INIT(devep->ep_xfer); 690 } 691 } 692 693 static void 694 pci_xhci_disable_ep(struct pci_xhci_dev_emu *dev, int epid) 695 { 696 struct xhci_dev_ctx *dev_ctx; 697 struct pci_xhci_dev_ep *devep; 698 struct xhci_endp_ctx *ep_ctx; 699 700 DPRINTF(("pci_xhci disable_ep %d", epid)); 701 702 dev_ctx = dev->dev_ctx; 703 ep_ctx = &dev_ctx->ctx_ep[epid]; 704 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_DISABLED; 705 706 devep = &dev->eps[epid]; 707 if (devep->ep_MaxPStreams > 0) 708 free(devep->ep_sctx_trbs); 709 710 if (devep->ep_xfer != NULL) { 711 free(devep->ep_xfer); 712 devep->ep_xfer = NULL; 713 } 714 715 memset(devep, 0, sizeof(struct pci_xhci_dev_ep)); 716 } 717 718 719 /* reset device at slot and data structures related to it */ 720 static void 721 pci_xhci_reset_slot(struct pci_xhci_softc *sc, int slot) 722 { 723 struct pci_xhci_dev_emu *dev; 724 725 dev = XHCI_SLOTDEV_PTR(sc, slot); 726 727 if (!dev) { 728 DPRINTF(("xhci reset unassigned slot (%d)?", slot)); 729 } else { 730 dev->dev_slotstate = XHCI_ST_DISABLED; 731 } 732 733 /* TODO: reset ring buffer pointers */ 734 } 735 736 static int 737 pci_xhci_insert_event(struct pci_xhci_softc *sc, struct xhci_trb *evtrb, 738 int do_intr) 739 { 740 struct pci_xhci_rtsregs *rts; 741 uint64_t erdp; 742 int erdp_idx; 743 int err; 744 struct xhci_trb *evtrbptr; 745 746 err = XHCI_TRB_ERROR_SUCCESS; 747 748 rts = &sc->rtsregs; 749 750 erdp = rts->intrreg.erdp & ~0xF; 751 erdp_idx = (erdp - rts->erstba_p[rts->er_deq_seg].qwEvrsTablePtr) / 752 sizeof(struct xhci_trb); 753 754 DPRINTF(("pci_xhci: insert event 0[%lx] 2[%x] 3[%x]", 755 evtrb->qwTrb0, evtrb->dwTrb2, evtrb->dwTrb3)); 756 DPRINTF(("\terdp idx %d/seg %d, enq idx %d/seg %d, pcs %u", 757 erdp_idx, rts->er_deq_seg, rts->er_enq_idx, 758 rts->er_enq_seg, rts->event_pcs)); 759 DPRINTF(("\t(erdp=0x%lx, erst=0x%lx, tblsz=%u, do_intr %d)", 760 erdp, rts->erstba_p->qwEvrsTablePtr, 761 rts->erstba_p->dwEvrsTableSize, do_intr)); 762 763 evtrbptr = &rts->erst_p[rts->er_enq_idx]; 764 765 /* TODO: multi-segment table */ 766 if (rts->er_events_cnt >= rts->erstba_p->dwEvrsTableSize) { 767 DPRINTF(("pci_xhci[%d] cannot insert event; ring full", 768 __LINE__)); 769 err = XHCI_TRB_ERROR_EV_RING_FULL; 770 goto done; 771 } 772 773 if (rts->er_events_cnt == rts->erstba_p->dwEvrsTableSize - 1) { 774 struct xhci_trb errev; 775 776 if ((evtrbptr->dwTrb3 & 0x1) == (rts->event_pcs & 0x1)) { 777 778 DPRINTF(("pci_xhci[%d] insert evt err: ring full", 779 __LINE__)); 780 781 errev.qwTrb0 = 0; 782 errev.dwTrb2 = XHCI_TRB_2_ERROR_SET( 783 XHCI_TRB_ERROR_EV_RING_FULL); 784 errev.dwTrb3 = XHCI_TRB_3_TYPE_SET( 785 XHCI_TRB_EVENT_HOST_CTRL) | 786 rts->event_pcs; 787 rts->er_events_cnt++; 788 memcpy(&rts->erst_p[rts->er_enq_idx], &errev, 789 sizeof(struct xhci_trb)); 790 rts->er_enq_idx = (rts->er_enq_idx + 1) % 791 rts->erstba_p->dwEvrsTableSize; 792 err = XHCI_TRB_ERROR_EV_RING_FULL; 793 do_intr = 1; 794 795 goto done; 796 } 797 } else { 798 rts->er_events_cnt++; 799 } 800 801 evtrb->dwTrb3 &= ~XHCI_TRB_3_CYCLE_BIT; 802 evtrb->dwTrb3 |= rts->event_pcs; 803 804 memcpy(&rts->erst_p[rts->er_enq_idx], evtrb, sizeof(struct xhci_trb)); 805 rts->er_enq_idx = (rts->er_enq_idx + 1) % 806 rts->erstba_p->dwEvrsTableSize; 807 808 if (rts->er_enq_idx == 0) 809 rts->event_pcs ^= 1; 810 811 done: 812 if (do_intr) 813 pci_xhci_assert_interrupt(sc); 814 815 return (err); 816 } 817 818 static uint32_t 819 pci_xhci_cmd_enable_slot(struct pci_xhci_softc *sc, uint32_t *slot) 820 { 821 struct pci_xhci_dev_emu *dev; 822 uint32_t cmderr; 823 int i; 824 825 cmderr = XHCI_TRB_ERROR_NO_SLOTS; 826 if (sc->portregs != NULL) 827 for (i = 1; i <= XHCI_MAX_SLOTS; i++) { 828 dev = XHCI_SLOTDEV_PTR(sc, i); 829 if (dev && dev->dev_slotstate == XHCI_ST_DISABLED) { 830 *slot = i; 831 dev->dev_slotstate = XHCI_ST_ENABLED; 832 cmderr = XHCI_TRB_ERROR_SUCCESS; 833 dev->hci.hci_address = i; 834 break; 835 } 836 } 837 838 DPRINTF(("pci_xhci enable slot (error=%d) slot %u", 839 cmderr != XHCI_TRB_ERROR_SUCCESS, *slot)); 840 841 return (cmderr); 842 } 843 844 static uint32_t 845 pci_xhci_cmd_disable_slot(struct pci_xhci_softc *sc, uint32_t slot) 846 { 847 struct pci_xhci_dev_emu *dev; 848 uint32_t cmderr; 849 850 DPRINTF(("pci_xhci disable slot %u", slot)); 851 852 cmderr = XHCI_TRB_ERROR_NO_SLOTS; 853 if (sc->portregs == NULL) 854 goto done; 855 856 if (slot > XHCI_MAX_SLOTS) { 857 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 858 goto done; 859 } 860 861 dev = XHCI_SLOTDEV_PTR(sc, slot); 862 if (dev) { 863 if (dev->dev_slotstate == XHCI_ST_DISABLED) { 864 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 865 } else { 866 dev->dev_slotstate = XHCI_ST_DISABLED; 867 cmderr = XHCI_TRB_ERROR_SUCCESS; 868 /* TODO: reset events and endpoints */ 869 } 870 } else 871 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 872 873 done: 874 return (cmderr); 875 } 876 877 static uint32_t 878 pci_xhci_cmd_reset_device(struct pci_xhci_softc *sc, uint32_t slot) 879 { 880 struct pci_xhci_dev_emu *dev; 881 struct xhci_dev_ctx *dev_ctx; 882 struct xhci_endp_ctx *ep_ctx; 883 uint32_t cmderr; 884 int i; 885 886 cmderr = XHCI_TRB_ERROR_NO_SLOTS; 887 if (sc->portregs == NULL) 888 goto done; 889 890 DPRINTF(("pci_xhci reset device slot %u", slot)); 891 892 dev = XHCI_SLOTDEV_PTR(sc, slot); 893 if (!dev || dev->dev_slotstate == XHCI_ST_DISABLED) 894 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 895 else { 896 dev->dev_slotstate = XHCI_ST_DEFAULT; 897 898 dev->hci.hci_address = 0; 899 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 900 901 /* slot state */ 902 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE( 903 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_DEFAULT, 904 0x1F, 27); 905 906 /* number of contexts */ 907 dev_ctx->ctx_slot.dwSctx0 = FIELD_REPLACE( 908 dev_ctx->ctx_slot.dwSctx0, 1, 0x1F, 27); 909 910 /* reset all eps other than ep-0 */ 911 for (i = 2; i <= 31; i++) { 912 ep_ctx = &dev_ctx->ctx_ep[i]; 913 ep_ctx->dwEpCtx0 = FIELD_REPLACE( ep_ctx->dwEpCtx0, 914 XHCI_ST_EPCTX_DISABLED, 0x7, 0); 915 } 916 917 cmderr = XHCI_TRB_ERROR_SUCCESS; 918 } 919 920 pci_xhci_reset_slot(sc, slot); 921 922 done: 923 return (cmderr); 924 } 925 926 static uint32_t 927 pci_xhci_cmd_address_device(struct pci_xhci_softc *sc, uint32_t slot, 928 struct xhci_trb *trb) 929 { 930 struct pci_xhci_dev_emu *dev; 931 struct xhci_input_dev_ctx *input_ctx; 932 struct xhci_slot_ctx *islot_ctx; 933 struct xhci_dev_ctx *dev_ctx; 934 struct xhci_endp_ctx *ep0_ctx; 935 uint32_t cmderr; 936 937 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL); 938 islot_ctx = &input_ctx->ctx_slot; 939 ep0_ctx = &input_ctx->ctx_ep[1]; 940 941 cmderr = XHCI_TRB_ERROR_SUCCESS; 942 943 DPRINTF(("pci_xhci: address device, input ctl: D 0x%08x A 0x%08x,", 944 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1)); 945 DPRINTF((" slot %08x %08x %08x %08x", 946 islot_ctx->dwSctx0, islot_ctx->dwSctx1, 947 islot_ctx->dwSctx2, islot_ctx->dwSctx3)); 948 DPRINTF((" ep0 %08x %08x %016lx %08x", 949 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, 950 ep0_ctx->dwEpCtx4)); 951 952 /* when setting address: drop-ctx=0, add-ctx=slot+ep0 */ 953 if ((input_ctx->ctx_input.dwInCtx0 != 0) || 954 (input_ctx->ctx_input.dwInCtx1 & 0x03) != 0x03) { 955 DPRINTF(("pci_xhci: address device, input ctl invalid")); 956 cmderr = XHCI_TRB_ERROR_TRB; 957 goto done; 958 } 959 960 /* assign address to slot */ 961 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 962 963 DPRINTF(("pci_xhci: address device, dev ctx")); 964 DPRINTF((" slot %08x %08x %08x %08x", 965 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 966 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 967 968 dev = XHCI_SLOTDEV_PTR(sc, slot); 969 assert(dev != NULL); 970 971 dev->hci.hci_address = slot; 972 dev->dev_ctx = dev_ctx; 973 974 if (dev->dev_ue->ue_reset == NULL || 975 dev->dev_ue->ue_reset(dev->dev_sc) < 0) { 976 cmderr = XHCI_TRB_ERROR_ENDP_NOT_ON; 977 goto done; 978 } 979 980 memcpy(&dev_ctx->ctx_slot, islot_ctx, sizeof(struct xhci_slot_ctx)); 981 982 dev_ctx->ctx_slot.dwSctx3 = 983 XHCI_SCTX_3_SLOT_STATE_SET(XHCI_ST_SLCTX_ADDRESSED) | 984 XHCI_SCTX_3_DEV_ADDR_SET(slot); 985 986 memcpy(&dev_ctx->ctx_ep[1], ep0_ctx, sizeof(struct xhci_endp_ctx)); 987 ep0_ctx = &dev_ctx->ctx_ep[1]; 988 ep0_ctx->dwEpCtx0 = (ep0_ctx->dwEpCtx0 & ~0x7) | 989 XHCI_EPCTX_0_EPSTATE_SET(XHCI_ST_EPCTX_RUNNING); 990 991 pci_xhci_init_ep(dev, 1); 992 993 dev->dev_slotstate = XHCI_ST_ADDRESSED; 994 995 DPRINTF(("pci_xhci: address device, output ctx")); 996 DPRINTF((" slot %08x %08x %08x %08x", 997 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 998 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 999 DPRINTF((" ep0 %08x %08x %016lx %08x", 1000 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, 1001 ep0_ctx->dwEpCtx4)); 1002 1003 done: 1004 return (cmderr); 1005 } 1006 1007 static uint32_t 1008 pci_xhci_cmd_config_ep(struct pci_xhci_softc *sc, uint32_t slot, 1009 struct xhci_trb *trb) 1010 { 1011 struct xhci_input_dev_ctx *input_ctx; 1012 struct pci_xhci_dev_emu *dev; 1013 struct xhci_dev_ctx *dev_ctx; 1014 struct xhci_endp_ctx *ep_ctx, *iep_ctx; 1015 uint32_t cmderr; 1016 int i; 1017 1018 cmderr = XHCI_TRB_ERROR_SUCCESS; 1019 1020 DPRINTF(("pci_xhci config_ep slot %u", slot)); 1021 1022 dev = XHCI_SLOTDEV_PTR(sc, slot); 1023 assert(dev != NULL); 1024 1025 if ((trb->dwTrb3 & XHCI_TRB_3_DCEP_BIT) != 0) { 1026 DPRINTF(("pci_xhci config_ep - deconfigure ep slot %u", 1027 slot)); 1028 if (dev->dev_ue->ue_stop != NULL) 1029 dev->dev_ue->ue_stop(dev->dev_sc); 1030 1031 dev->dev_slotstate = XHCI_ST_ADDRESSED; 1032 1033 dev->hci.hci_address = 0; 1034 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 1035 1036 /* number of contexts */ 1037 dev_ctx->ctx_slot.dwSctx0 = FIELD_REPLACE( 1038 dev_ctx->ctx_slot.dwSctx0, 1, 0x1F, 27); 1039 1040 /* slot state */ 1041 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE( 1042 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_ADDRESSED, 1043 0x1F, 27); 1044 1045 /* disable endpoints */ 1046 for (i = 2; i < 32; i++) 1047 pci_xhci_disable_ep(dev, i); 1048 1049 cmderr = XHCI_TRB_ERROR_SUCCESS; 1050 1051 goto done; 1052 } 1053 1054 if (dev->dev_slotstate < XHCI_ST_ADDRESSED) { 1055 DPRINTF(("pci_xhci: config_ep slotstate x%x != addressed", 1056 dev->dev_slotstate)); 1057 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; 1058 goto done; 1059 } 1060 1061 /* In addressed/configured state; 1062 * for each drop endpoint ctx flag: 1063 * ep->state = DISABLED 1064 * for each add endpoint ctx flag: 1065 * cp(ep-in, ep-out) 1066 * ep->state = RUNNING 1067 * for each drop+add endpoint flag: 1068 * reset ep resources 1069 * cp(ep-in, ep-out) 1070 * ep->state = RUNNING 1071 * if input->DisabledCtx[2-31] < 30: (at least 1 ep not disabled) 1072 * slot->state = configured 1073 */ 1074 1075 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL); 1076 dev_ctx = dev->dev_ctx; 1077 DPRINTF(("pci_xhci: config_ep inputctx: D:x%08x A:x%08x 7:x%08x", 1078 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1, 1079 input_ctx->ctx_input.dwInCtx7)); 1080 1081 for (i = 2; i <= 31; i++) { 1082 ep_ctx = &dev_ctx->ctx_ep[i]; 1083 1084 if (input_ctx->ctx_input.dwInCtx0 & 1085 XHCI_INCTX_0_DROP_MASK(i)) { 1086 DPRINTF((" config ep - dropping ep %d", i)); 1087 pci_xhci_disable_ep(dev, i); 1088 } 1089 1090 if (input_ctx->ctx_input.dwInCtx1 & 1091 XHCI_INCTX_1_ADD_MASK(i)) { 1092 iep_ctx = &input_ctx->ctx_ep[i]; 1093 1094 DPRINTF((" enable ep[%d] %08x %08x %016lx %08x", 1095 i, iep_ctx->dwEpCtx0, iep_ctx->dwEpCtx1, 1096 iep_ctx->qwEpCtx2, iep_ctx->dwEpCtx4)); 1097 1098 memcpy(ep_ctx, iep_ctx, sizeof(struct xhci_endp_ctx)); 1099 1100 pci_xhci_init_ep(dev, i); 1101 1102 /* ep state */ 1103 ep_ctx->dwEpCtx0 = FIELD_REPLACE( 1104 ep_ctx->dwEpCtx0, XHCI_ST_EPCTX_RUNNING, 0x7, 0); 1105 } 1106 } 1107 1108 /* slot state to configured */ 1109 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE( 1110 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_CONFIGURED, 0x1F, 27); 1111 dev_ctx->ctx_slot.dwSctx0 = FIELD_COPY( 1112 dev_ctx->ctx_slot.dwSctx0, input_ctx->ctx_slot.dwSctx0, 0x1F, 27); 1113 dev->dev_slotstate = XHCI_ST_CONFIGURED; 1114 1115 DPRINTF(("EP configured; slot %u [0]=0x%08x [1]=0x%08x [2]=0x%08x " 1116 "[3]=0x%08x", 1117 slot, dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 1118 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 1119 1120 done: 1121 return (cmderr); 1122 } 1123 1124 static uint32_t 1125 pci_xhci_cmd_reset_ep(struct pci_xhci_softc *sc, uint32_t slot, 1126 struct xhci_trb *trb) 1127 { 1128 struct pci_xhci_dev_emu *dev; 1129 struct pci_xhci_dev_ep *devep; 1130 struct xhci_dev_ctx *dev_ctx; 1131 struct xhci_endp_ctx *ep_ctx; 1132 uint32_t cmderr, epid; 1133 uint32_t type; 1134 1135 epid = XHCI_TRB_3_EP_GET(trb->dwTrb3); 1136 1137 DPRINTF(("pci_xhci: reset ep %u: slot %u", epid, slot)); 1138 1139 cmderr = XHCI_TRB_ERROR_SUCCESS; 1140 1141 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3); 1142 1143 dev = XHCI_SLOTDEV_PTR(sc, slot); 1144 assert(dev != NULL); 1145 1146 if (type == XHCI_TRB_TYPE_STOP_EP && 1147 (trb->dwTrb3 & XHCI_TRB_3_SUSP_EP_BIT) != 0) { 1148 /* XXX suspend endpoint for 10ms */ 1149 } 1150 1151 if (epid < 1 || epid > 31) { 1152 DPRINTF(("pci_xhci: reset ep: invalid epid %u", epid)); 1153 cmderr = XHCI_TRB_ERROR_TRB; 1154 goto done; 1155 } 1156 1157 devep = &dev->eps[epid]; 1158 if (devep->ep_xfer != NULL) 1159 USB_DATA_XFER_RESET(devep->ep_xfer); 1160 1161 dev_ctx = dev->dev_ctx; 1162 assert(dev_ctx != NULL); 1163 1164 ep_ctx = &dev_ctx->ctx_ep[epid]; 1165 1166 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_STOPPED; 1167 1168 if (devep->ep_MaxPStreams == 0) 1169 ep_ctx->qwEpCtx2 = devep->ep_ringaddr | devep->ep_ccs; 1170 1171 DPRINTF(("pci_xhci: reset ep[%u] %08x %08x %016lx %08x", 1172 epid, ep_ctx->dwEpCtx0, ep_ctx->dwEpCtx1, ep_ctx->qwEpCtx2, 1173 ep_ctx->dwEpCtx4)); 1174 1175 if (type == XHCI_TRB_TYPE_RESET_EP && 1176 (dev->dev_ue->ue_reset == NULL || 1177 dev->dev_ue->ue_reset(dev->dev_sc) < 0)) { 1178 cmderr = XHCI_TRB_ERROR_ENDP_NOT_ON; 1179 goto done; 1180 } 1181 1182 done: 1183 return (cmderr); 1184 } 1185 1186 1187 static uint32_t 1188 pci_xhci_find_stream(struct pci_xhci_softc *sc, struct xhci_endp_ctx *ep, 1189 struct pci_xhci_dev_ep *devep, uint32_t streamid) 1190 { 1191 struct xhci_stream_ctx *sctx; 1192 1193 if (devep->ep_MaxPStreams == 0) 1194 return (XHCI_TRB_ERROR_TRB); 1195 1196 if (devep->ep_MaxPStreams > XHCI_STREAMS_MAX) 1197 return (XHCI_TRB_ERROR_INVALID_SID); 1198 1199 if (XHCI_EPCTX_0_LSA_GET(ep->dwEpCtx0) == 0) { 1200 DPRINTF(("pci_xhci: find_stream; LSA bit not set")); 1201 return (XHCI_TRB_ERROR_INVALID_SID); 1202 } 1203 1204 /* only support primary stream */ 1205 if (streamid > devep->ep_MaxPStreams) 1206 return (XHCI_TRB_ERROR_STREAM_TYPE); 1207 1208 sctx = (struct xhci_stream_ctx *)XHCI_GADDR(sc, ep->qwEpCtx2 & ~0xFUL) + 1209 streamid; 1210 if (!XHCI_SCTX_0_SCT_GET(sctx->qwSctx0)) 1211 return (XHCI_TRB_ERROR_STREAM_TYPE); 1212 1213 return (XHCI_TRB_ERROR_SUCCESS); 1214 } 1215 1216 1217 static uint32_t 1218 pci_xhci_cmd_set_tr(struct pci_xhci_softc *sc, uint32_t slot, 1219 struct xhci_trb *trb) 1220 { 1221 struct pci_xhci_dev_emu *dev; 1222 struct pci_xhci_dev_ep *devep; 1223 struct xhci_dev_ctx *dev_ctx; 1224 struct xhci_endp_ctx *ep_ctx; 1225 uint32_t cmderr, epid; 1226 uint32_t streamid; 1227 1228 cmderr = XHCI_TRB_ERROR_SUCCESS; 1229 1230 dev = XHCI_SLOTDEV_PTR(sc, slot); 1231 assert(dev != NULL); 1232 1233 DPRINTF(("pci_xhci set_tr: new-tr x%016lx, SCT %u DCS %u", 1234 (trb->qwTrb0 & ~0xF), (uint32_t)((trb->qwTrb0 >> 1) & 0x7), 1235 (uint32_t)(trb->qwTrb0 & 0x1))); 1236 DPRINTF((" stream-id %u, slot %u, epid %u, C %u", 1237 (trb->dwTrb2 >> 16) & 0xFFFF, 1238 XHCI_TRB_3_SLOT_GET(trb->dwTrb3), 1239 XHCI_TRB_3_EP_GET(trb->dwTrb3), trb->dwTrb3 & 0x1)); 1240 1241 epid = XHCI_TRB_3_EP_GET(trb->dwTrb3); 1242 if (epid < 1 || epid > 31) { 1243 DPRINTF(("pci_xhci: set_tr_deq: invalid epid %u", epid)); 1244 cmderr = XHCI_TRB_ERROR_TRB; 1245 goto done; 1246 } 1247 1248 dev_ctx = dev->dev_ctx; 1249 assert(dev_ctx != NULL); 1250 1251 ep_ctx = &dev_ctx->ctx_ep[epid]; 1252 devep = &dev->eps[epid]; 1253 1254 switch (XHCI_EPCTX_0_EPSTATE_GET(ep_ctx->dwEpCtx0)) { 1255 case XHCI_ST_EPCTX_STOPPED: 1256 case XHCI_ST_EPCTX_ERROR: 1257 break; 1258 default: 1259 DPRINTF(("pci_xhci cmd set_tr invalid state %x", 1260 XHCI_EPCTX_0_EPSTATE_GET(ep_ctx->dwEpCtx0))); 1261 cmderr = XHCI_TRB_ERROR_CONTEXT_STATE; 1262 goto done; 1263 } 1264 1265 streamid = XHCI_TRB_2_STREAM_GET(trb->dwTrb2); 1266 if (devep->ep_MaxPStreams > 0) { 1267 cmderr = pci_xhci_find_stream(sc, ep_ctx, devep, streamid); 1268 if (cmderr == XHCI_TRB_ERROR_SUCCESS) { 1269 assert(devep->ep_sctx != NULL); 1270 1271 devep->ep_sctx[streamid].qwSctx0 = trb->qwTrb0; 1272 devep->ep_sctx_trbs[streamid].ringaddr = 1273 trb->qwTrb0 & ~0xF; 1274 devep->ep_sctx_trbs[streamid].ccs = 1275 XHCI_EPCTX_2_DCS_GET(trb->qwTrb0); 1276 } 1277 } else { 1278 if (streamid != 0) { 1279 DPRINTF(("pci_xhci cmd set_tr streamid %x != 0", 1280 streamid)); 1281 } 1282 ep_ctx->qwEpCtx2 = trb->qwTrb0 & ~0xFUL; 1283 devep->ep_ringaddr = ep_ctx->qwEpCtx2 & ~0xFUL; 1284 devep->ep_ccs = trb->qwTrb0 & 0x1; 1285 devep->ep_tr = XHCI_GADDR(sc, devep->ep_ringaddr); 1286 1287 DPRINTF(("pci_xhci set_tr first TRB:")); 1288 pci_xhci_dump_trb(devep->ep_tr); 1289 } 1290 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_STOPPED; 1291 1292 done: 1293 return (cmderr); 1294 } 1295 1296 static uint32_t 1297 pci_xhci_cmd_eval_ctx(struct pci_xhci_softc *sc, uint32_t slot, 1298 struct xhci_trb *trb) 1299 { 1300 struct xhci_input_dev_ctx *input_ctx; 1301 struct xhci_slot_ctx *islot_ctx; 1302 struct xhci_dev_ctx *dev_ctx; 1303 struct xhci_endp_ctx *ep0_ctx; 1304 uint32_t cmderr; 1305 1306 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL); 1307 islot_ctx = &input_ctx->ctx_slot; 1308 ep0_ctx = &input_ctx->ctx_ep[1]; 1309 1310 cmderr = XHCI_TRB_ERROR_SUCCESS; 1311 DPRINTF(("pci_xhci: eval ctx, input ctl: D 0x%08x A 0x%08x,", 1312 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1)); 1313 DPRINTF((" slot %08x %08x %08x %08x", 1314 islot_ctx->dwSctx0, islot_ctx->dwSctx1, 1315 islot_ctx->dwSctx2, islot_ctx->dwSctx3)); 1316 DPRINTF((" ep0 %08x %08x %016lx %08x", 1317 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, 1318 ep0_ctx->dwEpCtx4)); 1319 1320 /* this command expects drop-ctx=0 & add-ctx=slot+ep0 */ 1321 if ((input_ctx->ctx_input.dwInCtx0 != 0) || 1322 (input_ctx->ctx_input.dwInCtx1 & 0x03) == 0) { 1323 DPRINTF(("pci_xhci: eval ctx, input ctl invalid")); 1324 cmderr = XHCI_TRB_ERROR_TRB; 1325 goto done; 1326 } 1327 1328 /* assign address to slot; in this emulation, slot_id = address */ 1329 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 1330 1331 DPRINTF(("pci_xhci: eval ctx, dev ctx")); 1332 DPRINTF((" slot %08x %08x %08x %08x", 1333 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 1334 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 1335 1336 if (input_ctx->ctx_input.dwInCtx1 & 0x01) { /* slot ctx */ 1337 /* set max exit latency */ 1338 dev_ctx->ctx_slot.dwSctx1 = FIELD_COPY( 1339 dev_ctx->ctx_slot.dwSctx1, input_ctx->ctx_slot.dwSctx1, 1340 0xFFFF, 0); 1341 1342 /* set interrupter target */ 1343 dev_ctx->ctx_slot.dwSctx2 = FIELD_COPY( 1344 dev_ctx->ctx_slot.dwSctx2, input_ctx->ctx_slot.dwSctx2, 1345 0x3FF, 22); 1346 } 1347 if (input_ctx->ctx_input.dwInCtx1 & 0x02) { /* control ctx */ 1348 /* set max packet size */ 1349 dev_ctx->ctx_ep[1].dwEpCtx1 = FIELD_COPY( 1350 dev_ctx->ctx_ep[1].dwEpCtx1, ep0_ctx->dwEpCtx1, 1351 0xFFFF, 16); 1352 1353 ep0_ctx = &dev_ctx->ctx_ep[1]; 1354 } 1355 1356 DPRINTF(("pci_xhci: eval ctx, output ctx")); 1357 DPRINTF((" slot %08x %08x %08x %08x", 1358 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, 1359 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3)); 1360 DPRINTF((" ep0 %08x %08x %016lx %08x", 1361 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, 1362 ep0_ctx->dwEpCtx4)); 1363 1364 done: 1365 return (cmderr); 1366 } 1367 1368 static int 1369 pci_xhci_complete_commands(struct pci_xhci_softc *sc) 1370 { 1371 struct xhci_trb evtrb; 1372 struct xhci_trb *trb; 1373 uint64_t crcr; 1374 uint32_t ccs; /* cycle state (XHCI 4.9.2) */ 1375 uint32_t type; 1376 uint32_t slot; 1377 uint32_t cmderr; 1378 int error; 1379 1380 error = 0; 1381 sc->opregs.crcr |= XHCI_CRCR_LO_CRR; 1382 1383 trb = sc->opregs.cr_p; 1384 ccs = sc->opregs.crcr & XHCI_CRCR_LO_RCS; 1385 crcr = sc->opregs.crcr & ~0xF; 1386 1387 while (1) { 1388 sc->opregs.cr_p = trb; 1389 1390 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3); 1391 1392 if ((trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT) != 1393 (ccs & XHCI_TRB_3_CYCLE_BIT)) 1394 break; 1395 1396 DPRINTF(("pci_xhci: cmd type 0x%x, Trb0 x%016lx dwTrb2 x%08x" 1397 " dwTrb3 x%08x, TRB_CYCLE %u/ccs %u", 1398 type, trb->qwTrb0, trb->dwTrb2, trb->dwTrb3, 1399 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT, ccs)); 1400 1401 cmderr = XHCI_TRB_ERROR_SUCCESS; 1402 evtrb.dwTrb2 = 0; 1403 evtrb.dwTrb3 = (ccs & XHCI_TRB_3_CYCLE_BIT) | 1404 XHCI_TRB_3_TYPE_SET(XHCI_TRB_EVENT_CMD_COMPLETE); 1405 slot = 0; 1406 1407 switch (type) { 1408 case XHCI_TRB_TYPE_LINK: /* 0x06 */ 1409 if (trb->dwTrb3 & XHCI_TRB_3_TC_BIT) 1410 ccs ^= XHCI_CRCR_LO_RCS; 1411 break; 1412 1413 case XHCI_TRB_TYPE_ENABLE_SLOT: /* 0x09 */ 1414 cmderr = pci_xhci_cmd_enable_slot(sc, &slot); 1415 break; 1416 1417 case XHCI_TRB_TYPE_DISABLE_SLOT: /* 0x0A */ 1418 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1419 cmderr = pci_xhci_cmd_disable_slot(sc, slot); 1420 break; 1421 1422 case XHCI_TRB_TYPE_ADDRESS_DEVICE: /* 0x0B */ 1423 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1424 cmderr = pci_xhci_cmd_address_device(sc, slot, trb); 1425 break; 1426 1427 case XHCI_TRB_TYPE_CONFIGURE_EP: /* 0x0C */ 1428 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1429 cmderr = pci_xhci_cmd_config_ep(sc, slot, trb); 1430 break; 1431 1432 case XHCI_TRB_TYPE_EVALUATE_CTX: /* 0x0D */ 1433 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1434 cmderr = pci_xhci_cmd_eval_ctx(sc, slot, trb); 1435 break; 1436 1437 case XHCI_TRB_TYPE_RESET_EP: /* 0x0E */ 1438 DPRINTF(("Reset Endpoint on slot %d", slot)); 1439 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1440 cmderr = pci_xhci_cmd_reset_ep(sc, slot, trb); 1441 break; 1442 1443 case XHCI_TRB_TYPE_STOP_EP: /* 0x0F */ 1444 DPRINTF(("Stop Endpoint on slot %d", slot)); 1445 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1446 cmderr = pci_xhci_cmd_reset_ep(sc, slot, trb); 1447 break; 1448 1449 case XHCI_TRB_TYPE_SET_TR_DEQUEUE: /* 0x10 */ 1450 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1451 cmderr = pci_xhci_cmd_set_tr(sc, slot, trb); 1452 break; 1453 1454 case XHCI_TRB_TYPE_RESET_DEVICE: /* 0x11 */ 1455 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); 1456 cmderr = pci_xhci_cmd_reset_device(sc, slot); 1457 break; 1458 1459 case XHCI_TRB_TYPE_FORCE_EVENT: /* 0x12 */ 1460 /* TODO: */ 1461 break; 1462 1463 case XHCI_TRB_TYPE_NEGOTIATE_BW: /* 0x13 */ 1464 break; 1465 1466 case XHCI_TRB_TYPE_SET_LATENCY_TOL: /* 0x14 */ 1467 break; 1468 1469 case XHCI_TRB_TYPE_GET_PORT_BW: /* 0x15 */ 1470 break; 1471 1472 case XHCI_TRB_TYPE_FORCE_HEADER: /* 0x16 */ 1473 break; 1474 1475 case XHCI_TRB_TYPE_NOOP_CMD: /* 0x17 */ 1476 break; 1477 1478 default: 1479 DPRINTF(("pci_xhci: unsupported cmd %x", type)); 1480 break; 1481 } 1482 1483 if (type != XHCI_TRB_TYPE_LINK) { 1484 /* 1485 * insert command completion event and assert intr 1486 */ 1487 evtrb.qwTrb0 = crcr; 1488 evtrb.dwTrb2 |= XHCI_TRB_2_ERROR_SET(cmderr); 1489 evtrb.dwTrb3 |= XHCI_TRB_3_SLOT_SET(slot); 1490 DPRINTF(("pci_xhci: command 0x%x result: 0x%x", 1491 type, cmderr)); 1492 pci_xhci_insert_event(sc, &evtrb, 1); 1493 } 1494 1495 trb = pci_xhci_trb_next(sc, trb, &crcr); 1496 } 1497 1498 sc->opregs.crcr = crcr | (sc->opregs.crcr & XHCI_CRCR_LO_CA) | ccs; 1499 sc->opregs.crcr &= ~XHCI_CRCR_LO_CRR; 1500 return (error); 1501 } 1502 1503 static void 1504 pci_xhci_dump_trb(struct xhci_trb *trb) 1505 { 1506 static const char *trbtypes[] = { 1507 "RESERVED", 1508 "NORMAL", 1509 "SETUP_STAGE", 1510 "DATA_STAGE", 1511 "STATUS_STAGE", 1512 "ISOCH", 1513 "LINK", 1514 "EVENT_DATA", 1515 "NOOP", 1516 "ENABLE_SLOT", 1517 "DISABLE_SLOT", 1518 "ADDRESS_DEVICE", 1519 "CONFIGURE_EP", 1520 "EVALUATE_CTX", 1521 "RESET_EP", 1522 "STOP_EP", 1523 "SET_TR_DEQUEUE", 1524 "RESET_DEVICE", 1525 "FORCE_EVENT", 1526 "NEGOTIATE_BW", 1527 "SET_LATENCY_TOL", 1528 "GET_PORT_BW", 1529 "FORCE_HEADER", 1530 "NOOP_CMD" 1531 }; 1532 uint32_t type; 1533 1534 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3); 1535 DPRINTF(("pci_xhci: trb[@%p] type x%02x %s 0:x%016lx 2:x%08x 3:x%08x", 1536 trb, type, 1537 type <= XHCI_TRB_TYPE_NOOP_CMD ? trbtypes[type] : "INVALID", 1538 trb->qwTrb0, trb->dwTrb2, trb->dwTrb3)); 1539 } 1540 1541 static int 1542 pci_xhci_xfer_complete(struct pci_xhci_softc *sc, struct usb_data_xfer *xfer, 1543 uint32_t slot, uint32_t epid, int *do_intr) 1544 { 1545 struct pci_xhci_dev_emu *dev; 1546 struct pci_xhci_dev_ep *devep; 1547 struct xhci_dev_ctx *dev_ctx; 1548 struct xhci_endp_ctx *ep_ctx; 1549 struct xhci_trb *trb; 1550 struct xhci_trb evtrb; 1551 uint32_t trbflags; 1552 uint32_t edtla; 1553 int i, err; 1554 1555 dev = XHCI_SLOTDEV_PTR(sc, slot); 1556 devep = &dev->eps[epid]; 1557 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 1558 1559 assert(dev_ctx != NULL); 1560 1561 ep_ctx = &dev_ctx->ctx_ep[epid]; 1562 1563 err = XHCI_TRB_ERROR_SUCCESS; 1564 *do_intr = 0; 1565 edtla = 0; 1566 1567 /* go through list of TRBs and insert event(s) */ 1568 for (i = xfer->head; xfer->ndata > 0; ) { 1569 evtrb.qwTrb0 = (uint64_t)xfer->data[i].hci_data; 1570 trb = XHCI_GADDR(sc, evtrb.qwTrb0); 1571 trbflags = trb->dwTrb3; 1572 1573 DPRINTF(("pci_xhci: xfer[%d] done?%u:%d trb %x %016lx %x " 1574 "(err %d) IOC?%d", 1575 i, xfer->data[i].processed, xfer->data[i].blen, 1576 XHCI_TRB_3_TYPE_GET(trbflags), evtrb.qwTrb0, 1577 trbflags, err, 1578 trb->dwTrb3 & XHCI_TRB_3_IOC_BIT ? 1 : 0)); 1579 1580 if (!xfer->data[i].processed) { 1581 xfer->head = i; 1582 break; 1583 } 1584 1585 xfer->ndata--; 1586 edtla += xfer->data[i].bdone; 1587 1588 trb->dwTrb3 = (trb->dwTrb3 & ~0x1) | (xfer->data[i].ccs); 1589 1590 pci_xhci_update_ep_ring(sc, dev, devep, ep_ctx, 1591 xfer->data[i].streamid, xfer->data[i].trbnext, 1592 xfer->data[i].ccs); 1593 1594 /* Only interrupt if IOC or short packet */ 1595 if (!(trb->dwTrb3 & XHCI_TRB_3_IOC_BIT) && 1596 !((err == XHCI_TRB_ERROR_SHORT_PKT) && 1597 (trb->dwTrb3 & XHCI_TRB_3_ISP_BIT))) { 1598 1599 i = (i + 1) % USB_MAX_XFER_BLOCKS; 1600 continue; 1601 } 1602 1603 evtrb.dwTrb2 = XHCI_TRB_2_ERROR_SET(err) | 1604 XHCI_TRB_2_REM_SET(xfer->data[i].blen); 1605 1606 evtrb.dwTrb3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_EVENT_TRANSFER) | 1607 XHCI_TRB_3_SLOT_SET(slot) | XHCI_TRB_3_EP_SET(epid); 1608 1609 if (XHCI_TRB_3_TYPE_GET(trbflags) == XHCI_TRB_TYPE_EVENT_DATA) { 1610 DPRINTF(("pci_xhci EVENT_DATA edtla %u", edtla)); 1611 evtrb.qwTrb0 = trb->qwTrb0; 1612 evtrb.dwTrb2 = (edtla & 0xFFFFF) | 1613 XHCI_TRB_2_ERROR_SET(err); 1614 evtrb.dwTrb3 |= XHCI_TRB_3_ED_BIT; 1615 edtla = 0; 1616 } 1617 1618 *do_intr = 1; 1619 1620 err = pci_xhci_insert_event(sc, &evtrb, 0); 1621 if (err != XHCI_TRB_ERROR_SUCCESS) { 1622 break; 1623 } 1624 1625 i = (i + 1) % USB_MAX_XFER_BLOCKS; 1626 } 1627 1628 return (err); 1629 } 1630 1631 static void 1632 pci_xhci_update_ep_ring(struct pci_xhci_softc *sc, 1633 struct pci_xhci_dev_emu *dev __unused, struct pci_xhci_dev_ep *devep, 1634 struct xhci_endp_ctx *ep_ctx, uint32_t streamid, uint64_t ringaddr, int ccs) 1635 { 1636 1637 if (devep->ep_MaxPStreams != 0) { 1638 devep->ep_sctx[streamid].qwSctx0 = (ringaddr & ~0xFUL) | 1639 (ccs & 0x1); 1640 1641 devep->ep_sctx_trbs[streamid].ringaddr = ringaddr & ~0xFUL; 1642 devep->ep_sctx_trbs[streamid].ccs = ccs & 0x1; 1643 ep_ctx->qwEpCtx2 = (ep_ctx->qwEpCtx2 & ~0x1) | (ccs & 0x1); 1644 1645 DPRINTF(("xhci update ep-ring stream %d, addr %lx", 1646 streamid, devep->ep_sctx[streamid].qwSctx0)); 1647 } else { 1648 devep->ep_ringaddr = ringaddr & ~0xFUL; 1649 devep->ep_ccs = ccs & 0x1; 1650 devep->ep_tr = XHCI_GADDR(sc, ringaddr & ~0xFUL); 1651 ep_ctx->qwEpCtx2 = (ringaddr & ~0xFUL) | (ccs & 0x1); 1652 1653 DPRINTF(("xhci update ep-ring, addr %lx", 1654 (devep->ep_ringaddr | devep->ep_ccs))); 1655 } 1656 } 1657 1658 /* 1659 * Outstanding transfer still in progress (device NAK'd earlier) so retry 1660 * the transfer again to see if it succeeds. 1661 */ 1662 static int 1663 pci_xhci_try_usb_xfer(struct pci_xhci_softc *sc, 1664 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep, 1665 struct xhci_endp_ctx *ep_ctx, uint32_t slot, uint32_t epid) 1666 { 1667 struct usb_data_xfer *xfer; 1668 int err; 1669 int do_intr; 1670 1671 ep_ctx->dwEpCtx0 = FIELD_REPLACE( 1672 ep_ctx->dwEpCtx0, XHCI_ST_EPCTX_RUNNING, 0x7, 0); 1673 1674 err = 0; 1675 do_intr = 0; 1676 1677 xfer = devep->ep_xfer; 1678 USB_DATA_XFER_LOCK(xfer); 1679 1680 /* outstanding requests queued up */ 1681 if (dev->dev_ue->ue_data != NULL) { 1682 err = dev->dev_ue->ue_data(dev->dev_sc, xfer, 1683 epid & 0x1 ? USB_XFER_IN : USB_XFER_OUT, epid/2); 1684 if (err == USB_ERR_CANCELLED) { 1685 if (USB_DATA_GET_ERRCODE(&xfer->data[xfer->head]) == 1686 USB_NAK) 1687 err = XHCI_TRB_ERROR_SUCCESS; 1688 } else { 1689 err = pci_xhci_xfer_complete(sc, xfer, slot, epid, 1690 &do_intr); 1691 if (err == XHCI_TRB_ERROR_SUCCESS && do_intr) { 1692 pci_xhci_assert_interrupt(sc); 1693 } 1694 1695 1696 /* XXX should not do it if error? */ 1697 USB_DATA_XFER_RESET(xfer); 1698 } 1699 } 1700 1701 USB_DATA_XFER_UNLOCK(xfer); 1702 1703 1704 return (err); 1705 } 1706 1707 1708 static int 1709 pci_xhci_handle_transfer(struct pci_xhci_softc *sc, 1710 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep, 1711 struct xhci_endp_ctx *ep_ctx, struct xhci_trb *trb, uint32_t slot, 1712 uint32_t epid, uint64_t addr, uint32_t ccs, uint32_t streamid) 1713 { 1714 struct xhci_trb *setup_trb; 1715 struct usb_data_xfer *xfer; 1716 struct usb_data_xfer_block *xfer_block; 1717 uint64_t val; 1718 uint32_t trbflags; 1719 int do_intr, err; 1720 int do_retry; 1721 1722 ep_ctx->dwEpCtx0 = FIELD_REPLACE(ep_ctx->dwEpCtx0, 1723 XHCI_ST_EPCTX_RUNNING, 0x7, 0); 1724 1725 xfer = devep->ep_xfer; 1726 USB_DATA_XFER_LOCK(xfer); 1727 1728 DPRINTF(("pci_xhci handle_transfer slot %u", slot)); 1729 1730 retry: 1731 err = XHCI_TRB_ERROR_INVALID; 1732 do_retry = 0; 1733 do_intr = 0; 1734 setup_trb = NULL; 1735 1736 while (1) { 1737 pci_xhci_dump_trb(trb); 1738 1739 trbflags = trb->dwTrb3; 1740 1741 if (XHCI_TRB_3_TYPE_GET(trbflags) != XHCI_TRB_TYPE_LINK && 1742 (trbflags & XHCI_TRB_3_CYCLE_BIT) != 1743 (ccs & XHCI_TRB_3_CYCLE_BIT)) { 1744 DPRINTF(("Cycle-bit changed trbflags %x, ccs %x", 1745 trbflags & XHCI_TRB_3_CYCLE_BIT, ccs)); 1746 break; 1747 } 1748 1749 xfer_block = NULL; 1750 1751 switch (XHCI_TRB_3_TYPE_GET(trbflags)) { 1752 case XHCI_TRB_TYPE_LINK: 1753 if (trb->dwTrb3 & XHCI_TRB_3_TC_BIT) 1754 ccs ^= 0x1; 1755 1756 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1757 (void *)addr, ccs); 1758 xfer_block->processed = 1; 1759 break; 1760 1761 case XHCI_TRB_TYPE_SETUP_STAGE: 1762 if ((trbflags & XHCI_TRB_3_IDT_BIT) == 0 || 1763 XHCI_TRB_2_BYTES_GET(trb->dwTrb2) != 8) { 1764 DPRINTF(("pci_xhci: invalid setup trb")); 1765 err = XHCI_TRB_ERROR_TRB; 1766 goto errout; 1767 } 1768 setup_trb = trb; 1769 1770 val = trb->qwTrb0; 1771 if (!xfer->ureq) 1772 xfer->ureq = malloc( 1773 sizeof(struct usb_device_request)); 1774 memcpy(xfer->ureq, &val, 1775 sizeof(struct usb_device_request)); 1776 1777 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1778 (void *)addr, ccs); 1779 xfer_block->processed = 1; 1780 break; 1781 1782 case XHCI_TRB_TYPE_NORMAL: 1783 case XHCI_TRB_TYPE_ISOCH: 1784 if (setup_trb != NULL) { 1785 DPRINTF(("pci_xhci: trb not supposed to be in " 1786 "ctl scope")); 1787 err = XHCI_TRB_ERROR_TRB; 1788 goto errout; 1789 } 1790 /* fall through */ 1791 1792 case XHCI_TRB_TYPE_DATA_STAGE: 1793 xfer_block = usb_data_xfer_append(xfer, 1794 (void *)(trbflags & XHCI_TRB_3_IDT_BIT ? 1795 &trb->qwTrb0 : XHCI_GADDR(sc, trb->qwTrb0)), 1796 trb->dwTrb2 & 0x1FFFF, (void *)addr, ccs); 1797 break; 1798 1799 case XHCI_TRB_TYPE_STATUS_STAGE: 1800 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1801 (void *)addr, ccs); 1802 break; 1803 1804 case XHCI_TRB_TYPE_NOOP: 1805 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1806 (void *)addr, ccs); 1807 xfer_block->processed = 1; 1808 break; 1809 1810 case XHCI_TRB_TYPE_EVENT_DATA: 1811 xfer_block = usb_data_xfer_append(xfer, NULL, 0, 1812 (void *)addr, ccs); 1813 if ((epid > 1) && (trbflags & XHCI_TRB_3_IOC_BIT)) { 1814 xfer_block->processed = 1; 1815 } 1816 break; 1817 1818 default: 1819 DPRINTF(("pci_xhci: handle xfer unexpected trb type " 1820 "0x%x", 1821 XHCI_TRB_3_TYPE_GET(trbflags))); 1822 err = XHCI_TRB_ERROR_TRB; 1823 goto errout; 1824 } 1825 1826 trb = pci_xhci_trb_next(sc, trb, &addr); 1827 1828 DPRINTF(("pci_xhci: next trb: 0x%lx", (uint64_t)trb)); 1829 1830 if (xfer_block) { 1831 xfer_block->trbnext = addr; 1832 xfer_block->streamid = streamid; 1833 } 1834 1835 if (!setup_trb && !(trbflags & XHCI_TRB_3_CHAIN_BIT) && 1836 XHCI_TRB_3_TYPE_GET(trbflags) != XHCI_TRB_TYPE_LINK) { 1837 break; 1838 } 1839 1840 /* handle current batch that requires interrupt on complete */ 1841 if (trbflags & XHCI_TRB_3_IOC_BIT) { 1842 DPRINTF(("pci_xhci: trb IOC bit set")); 1843 if (epid == 1) 1844 do_retry = 1; 1845 break; 1846 } 1847 } 1848 1849 DPRINTF(("pci_xhci[%d]: xfer->ndata %u", __LINE__, xfer->ndata)); 1850 1851 if (xfer->ndata <= 0) 1852 goto errout; 1853 1854 if (epid == 1) { 1855 int usberr; 1856 1857 if (dev->dev_ue->ue_request != NULL) 1858 usberr = dev->dev_ue->ue_request(dev->dev_sc, xfer); 1859 else 1860 usberr = USB_ERR_NOT_STARTED; 1861 err = USB_TO_XHCI_ERR(usberr); 1862 if (err == XHCI_TRB_ERROR_SUCCESS || 1863 err == XHCI_TRB_ERROR_STALL || 1864 err == XHCI_TRB_ERROR_SHORT_PKT) { 1865 err = pci_xhci_xfer_complete(sc, xfer, slot, epid, 1866 &do_intr); 1867 if (err != XHCI_TRB_ERROR_SUCCESS) 1868 do_retry = 0; 1869 } 1870 1871 } else { 1872 /* handle data transfer */ 1873 pci_xhci_try_usb_xfer(sc, dev, devep, ep_ctx, slot, epid); 1874 err = XHCI_TRB_ERROR_SUCCESS; 1875 } 1876 1877 errout: 1878 if (err == XHCI_TRB_ERROR_EV_RING_FULL) 1879 DPRINTF(("pci_xhci[%d]: event ring full", __LINE__)); 1880 1881 if (!do_retry) 1882 USB_DATA_XFER_UNLOCK(xfer); 1883 1884 if (do_intr) 1885 pci_xhci_assert_interrupt(sc); 1886 1887 if (do_retry) { 1888 USB_DATA_XFER_RESET(xfer); 1889 DPRINTF(("pci_xhci[%d]: retry:continuing with next TRBs", 1890 __LINE__)); 1891 goto retry; 1892 } 1893 1894 if (epid == 1) 1895 USB_DATA_XFER_RESET(xfer); 1896 1897 return (err); 1898 } 1899 1900 static void 1901 pci_xhci_device_doorbell(struct pci_xhci_softc *sc, uint32_t slot, 1902 uint32_t epid, uint32_t streamid) 1903 { 1904 struct pci_xhci_dev_emu *dev; 1905 struct pci_xhci_dev_ep *devep; 1906 struct xhci_dev_ctx *dev_ctx; 1907 struct xhci_endp_ctx *ep_ctx; 1908 struct pci_xhci_trb_ring *sctx_tr; 1909 struct xhci_trb *trb; 1910 uint64_t ringaddr; 1911 uint32_t ccs; 1912 int error; 1913 1914 DPRINTF(("pci_xhci doorbell slot %u epid %u stream %u", 1915 slot, epid, streamid)); 1916 1917 if (slot == 0 || slot > XHCI_MAX_SLOTS) { 1918 DPRINTF(("pci_xhci: invalid doorbell slot %u", slot)); 1919 return; 1920 } 1921 1922 if (epid == 0 || epid >= XHCI_MAX_ENDPOINTS) { 1923 DPRINTF(("pci_xhci: invalid endpoint %u", epid)); 1924 return; 1925 } 1926 1927 dev = XHCI_SLOTDEV_PTR(sc, slot); 1928 devep = &dev->eps[epid]; 1929 dev_ctx = pci_xhci_get_dev_ctx(sc, slot); 1930 if (!dev_ctx) { 1931 return; 1932 } 1933 ep_ctx = &dev_ctx->ctx_ep[epid]; 1934 1935 sctx_tr = NULL; 1936 1937 DPRINTF(("pci_xhci: device doorbell ep[%u] %08x %08x %016lx %08x", 1938 epid, ep_ctx->dwEpCtx0, ep_ctx->dwEpCtx1, ep_ctx->qwEpCtx2, 1939 ep_ctx->dwEpCtx4)); 1940 1941 if (ep_ctx->qwEpCtx2 == 0) 1942 return; 1943 1944 /* handle pending transfers */ 1945 if (devep->ep_xfer->ndata > 0) { 1946 pci_xhci_try_usb_xfer(sc, dev, devep, ep_ctx, slot, epid); 1947 return; 1948 } 1949 1950 /* get next trb work item */ 1951 if (devep->ep_MaxPStreams != 0) { 1952 /* 1953 * Stream IDs of 0, 65535 (any stream), and 65534 1954 * (prime) are invalid. 1955 */ 1956 if (streamid == 0 || streamid == 65534 || streamid == 65535) { 1957 DPRINTF(("pci_xhci: invalid stream %u", streamid)); 1958 return; 1959 } 1960 1961 error = pci_xhci_find_stream(sc, ep_ctx, devep, streamid); 1962 if (error != XHCI_TRB_ERROR_SUCCESS) { 1963 DPRINTF(("pci_xhci: invalid stream %u: %d", 1964 streamid, error)); 1965 return; 1966 } 1967 sctx_tr = &devep->ep_sctx_trbs[streamid]; 1968 ringaddr = sctx_tr->ringaddr; 1969 ccs = sctx_tr->ccs; 1970 trb = XHCI_GADDR(sc, sctx_tr->ringaddr & ~0xFUL); 1971 DPRINTF(("doorbell, stream %u, ccs %lx, trb ccs %x", 1972 streamid, ep_ctx->qwEpCtx2 & XHCI_TRB_3_CYCLE_BIT, 1973 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT)); 1974 } else { 1975 if (streamid != 0) { 1976 DPRINTF(("pci_xhci: invalid stream %u", streamid)); 1977 return; 1978 } 1979 ringaddr = devep->ep_ringaddr; 1980 ccs = devep->ep_ccs; 1981 trb = devep->ep_tr; 1982 DPRINTF(("doorbell, ccs %lx, trb ccs %x", 1983 ep_ctx->qwEpCtx2 & XHCI_TRB_3_CYCLE_BIT, 1984 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT)); 1985 } 1986 1987 if (XHCI_TRB_3_TYPE_GET(trb->dwTrb3) == 0) { 1988 DPRINTF(("pci_xhci: ring %lx trb[%lx] EP %u is RESERVED?", 1989 ep_ctx->qwEpCtx2, devep->ep_ringaddr, epid)); 1990 return; 1991 } 1992 1993 pci_xhci_handle_transfer(sc, dev, devep, ep_ctx, trb, slot, epid, 1994 ringaddr, ccs, streamid); 1995 } 1996 1997 static void 1998 pci_xhci_dbregs_write(struct pci_xhci_softc *sc, uint64_t offset, 1999 uint64_t value) 2000 { 2001 2002 offset = (offset - sc->dboff) / sizeof(uint32_t); 2003 2004 DPRINTF(("pci_xhci: doorbell write offset 0x%lx: 0x%lx", 2005 offset, value)); 2006 2007 if (XHCI_HALTED(sc)) { 2008 DPRINTF(("pci_xhci: controller halted")); 2009 return; 2010 } 2011 2012 if (offset == 0) 2013 pci_xhci_complete_commands(sc); 2014 else if (sc->portregs != NULL) 2015 pci_xhci_device_doorbell(sc, offset, 2016 XHCI_DB_TARGET_GET(value), XHCI_DB_SID_GET(value)); 2017 } 2018 2019 static void 2020 pci_xhci_rtsregs_write(struct pci_xhci_softc *sc, uint64_t offset, 2021 uint64_t value) 2022 { 2023 struct pci_xhci_rtsregs *rts; 2024 2025 offset -= sc->rtsoff; 2026 2027 if (offset == 0) { 2028 DPRINTF(("pci_xhci attempted write to MFINDEX")); 2029 return; 2030 } 2031 2032 DPRINTF(("pci_xhci: runtime regs write offset 0x%lx: 0x%lx", 2033 offset, value)); 2034 2035 offset -= 0x20; /* start of intrreg */ 2036 2037 rts = &sc->rtsregs; 2038 2039 switch (offset) { 2040 case 0x00: 2041 if (value & XHCI_IMAN_INTR_PEND) 2042 rts->intrreg.iman &= ~XHCI_IMAN_INTR_PEND; 2043 rts->intrreg.iman = (value & XHCI_IMAN_INTR_ENA) | 2044 (rts->intrreg.iman & XHCI_IMAN_INTR_PEND); 2045 2046 if (!(value & XHCI_IMAN_INTR_ENA)) 2047 pci_xhci_deassert_interrupt(sc); 2048 2049 break; 2050 2051 case 0x04: 2052 rts->intrreg.imod = value; 2053 break; 2054 2055 case 0x08: 2056 rts->intrreg.erstsz = value & 0xFFFF; 2057 break; 2058 2059 case 0x10: 2060 /* ERSTBA low bits */ 2061 rts->intrreg.erstba = MASK_64_HI(sc->rtsregs.intrreg.erstba) | 2062 (value & ~0x3F); 2063 break; 2064 2065 case 0x14: 2066 /* ERSTBA high bits */ 2067 rts->intrreg.erstba = (value << 32) | 2068 MASK_64_LO(sc->rtsregs.intrreg.erstba); 2069 2070 rts->erstba_p = XHCI_GADDR(sc, 2071 sc->rtsregs.intrreg.erstba & ~0x3FUL); 2072 2073 rts->erst_p = XHCI_GADDR(sc, 2074 sc->rtsregs.erstba_p->qwEvrsTablePtr & ~0x3FUL); 2075 2076 rts->er_enq_idx = 0; 2077 rts->er_events_cnt = 0; 2078 2079 DPRINTF(("pci_xhci: wr erstba erst (%p) ptr 0x%lx, sz %u", 2080 rts->erstba_p, 2081 rts->erstba_p->qwEvrsTablePtr, 2082 rts->erstba_p->dwEvrsTableSize)); 2083 break; 2084 2085 case 0x18: 2086 /* ERDP low bits */ 2087 rts->intrreg.erdp = 2088 MASK_64_HI(sc->rtsregs.intrreg.erdp) | 2089 (rts->intrreg.erdp & XHCI_ERDP_LO_BUSY) | 2090 (value & ~0xF); 2091 if (value & XHCI_ERDP_LO_BUSY) { 2092 rts->intrreg.erdp &= ~XHCI_ERDP_LO_BUSY; 2093 rts->intrreg.iman &= ~XHCI_IMAN_INTR_PEND; 2094 } 2095 2096 rts->er_deq_seg = XHCI_ERDP_LO_SINDEX(value); 2097 2098 break; 2099 2100 case 0x1C: 2101 /* ERDP high bits */ 2102 rts->intrreg.erdp = (value << 32) | 2103 MASK_64_LO(sc->rtsregs.intrreg.erdp); 2104 2105 if (rts->er_events_cnt > 0) { 2106 uint64_t erdp; 2107 int erdp_i; 2108 2109 erdp = rts->intrreg.erdp & ~0xF; 2110 erdp_i = (erdp - rts->erstba_p->qwEvrsTablePtr) / 2111 sizeof(struct xhci_trb); 2112 2113 if (erdp_i <= rts->er_enq_idx) 2114 rts->er_events_cnt = rts->er_enq_idx - erdp_i; 2115 else 2116 rts->er_events_cnt = 2117 rts->erstba_p->dwEvrsTableSize - 2118 (erdp_i - rts->er_enq_idx); 2119 2120 DPRINTF(("pci_xhci: erdp 0x%lx, events cnt %u", 2121 erdp, rts->er_events_cnt)); 2122 } 2123 2124 break; 2125 2126 default: 2127 DPRINTF(("pci_xhci attempted write to RTS offset 0x%lx", 2128 offset)); 2129 break; 2130 } 2131 } 2132 2133 static uint64_t 2134 pci_xhci_portregs_read(struct pci_xhci_softc *sc, uint64_t offset) 2135 { 2136 struct pci_xhci_portregs *portregs; 2137 int port; 2138 uint32_t reg; 2139 2140 if (sc->portregs == NULL) 2141 return (0); 2142 2143 port = (offset - XHCI_PORTREGS_PORT0) / XHCI_PORTREGS_SETSZ; 2144 offset = (offset - XHCI_PORTREGS_PORT0) % XHCI_PORTREGS_SETSZ; 2145 2146 if (port > XHCI_MAX_DEVS) { 2147 DPRINTF(("pci_xhci: portregs_read port %d >= XHCI_MAX_DEVS", 2148 port)); 2149 2150 /* return default value for unused port */ 2151 return (XHCI_PS_SPEED_SET(3)); 2152 } 2153 2154 portregs = XHCI_PORTREG_PTR(sc, port); 2155 switch (offset) { 2156 case 0: 2157 reg = portregs->portsc; 2158 break; 2159 case 4: 2160 reg = portregs->portpmsc; 2161 break; 2162 case 8: 2163 reg = portregs->portli; 2164 break; 2165 case 12: 2166 reg = portregs->porthlpmc; 2167 break; 2168 default: 2169 DPRINTF(("pci_xhci: unaligned portregs read offset %#lx", 2170 offset)); 2171 reg = 0xffffffff; 2172 break; 2173 } 2174 2175 DPRINTF(("pci_xhci: portregs read offset 0x%lx port %u -> 0x%x", 2176 offset, port, reg)); 2177 2178 return (reg); 2179 } 2180 2181 static void 2182 pci_xhci_hostop_write(struct pci_xhci_softc *sc, uint64_t offset, 2183 uint64_t value) 2184 { 2185 offset -= XHCI_CAPLEN; 2186 2187 if (offset < 0x400) 2188 DPRINTF(("pci_xhci: hostop write offset 0x%lx: 0x%lx", 2189 offset, value)); 2190 2191 switch (offset) { 2192 case XHCI_USBCMD: 2193 sc->opregs.usbcmd = pci_xhci_usbcmd_write(sc, value & 0x3F0F); 2194 break; 2195 2196 case XHCI_USBSTS: 2197 /* clear bits on write */ 2198 sc->opregs.usbsts &= ~(value & 2199 (XHCI_STS_HSE|XHCI_STS_EINT|XHCI_STS_PCD|XHCI_STS_SSS| 2200 XHCI_STS_RSS|XHCI_STS_SRE|XHCI_STS_CNR)); 2201 break; 2202 2203 case XHCI_PAGESIZE: 2204 /* read only */ 2205 break; 2206 2207 case XHCI_DNCTRL: 2208 sc->opregs.dnctrl = value & 0xFFFF; 2209 break; 2210 2211 case XHCI_CRCR_LO: 2212 if (sc->opregs.crcr & XHCI_CRCR_LO_CRR) { 2213 sc->opregs.crcr &= ~(XHCI_CRCR_LO_CS|XHCI_CRCR_LO_CA); 2214 sc->opregs.crcr |= value & 2215 (XHCI_CRCR_LO_CS|XHCI_CRCR_LO_CA); 2216 } else { 2217 sc->opregs.crcr = MASK_64_HI(sc->opregs.crcr) | 2218 (value & (0xFFFFFFC0 | XHCI_CRCR_LO_RCS)); 2219 } 2220 break; 2221 2222 case XHCI_CRCR_HI: 2223 if (!(sc->opregs.crcr & XHCI_CRCR_LO_CRR)) { 2224 sc->opregs.crcr = MASK_64_LO(sc->opregs.crcr) | 2225 (value << 32); 2226 2227 sc->opregs.cr_p = XHCI_GADDR(sc, 2228 sc->opregs.crcr & ~0xF); 2229 } 2230 2231 if (sc->opregs.crcr & XHCI_CRCR_LO_CS) { 2232 /* Stop operation of Command Ring */ 2233 } 2234 2235 if (sc->opregs.crcr & XHCI_CRCR_LO_CA) { 2236 /* Abort command */ 2237 } 2238 2239 break; 2240 2241 case XHCI_DCBAAP_LO: 2242 sc->opregs.dcbaap = MASK_64_HI(sc->opregs.dcbaap) | 2243 (value & 0xFFFFFFC0); 2244 break; 2245 2246 case XHCI_DCBAAP_HI: 2247 sc->opregs.dcbaap = MASK_64_LO(sc->opregs.dcbaap) | 2248 (value << 32); 2249 sc->opregs.dcbaa_p = XHCI_GADDR(sc, sc->opregs.dcbaap & ~0x3FUL); 2250 2251 DPRINTF(("pci_xhci: opregs dcbaap = 0x%lx (vaddr 0x%lx)", 2252 sc->opregs.dcbaap, (uint64_t)sc->opregs.dcbaa_p)); 2253 break; 2254 2255 case XHCI_CONFIG: 2256 sc->opregs.config = value & 0x03FF; 2257 break; 2258 2259 default: 2260 if (offset >= 0x400) 2261 pci_xhci_portregs_write(sc, offset, value); 2262 2263 break; 2264 } 2265 } 2266 2267 2268 static void 2269 pci_xhci_write(struct pci_devinst *pi, int baridx, uint64_t offset, 2270 int size __unused, uint64_t value) 2271 { 2272 struct pci_xhci_softc *sc; 2273 2274 sc = pi->pi_arg; 2275 2276 assert(baridx == 0); 2277 2278 pthread_mutex_lock(&sc->mtx); 2279 if (offset < XHCI_CAPLEN) /* read only registers */ 2280 WPRINTF(("pci_xhci: write RO-CAPs offset %ld", offset)); 2281 else if (offset < sc->dboff) 2282 pci_xhci_hostop_write(sc, offset, value); 2283 else if (offset < sc->rtsoff) 2284 pci_xhci_dbregs_write(sc, offset, value); 2285 else if (offset < sc->regsend) 2286 pci_xhci_rtsregs_write(sc, offset, value); 2287 else 2288 WPRINTF(("pci_xhci: write invalid offset %ld", offset)); 2289 2290 pthread_mutex_unlock(&sc->mtx); 2291 } 2292 2293 static uint64_t 2294 pci_xhci_hostcap_read(struct pci_xhci_softc *sc, uint64_t offset) 2295 { 2296 uint64_t value; 2297 2298 switch (offset) { 2299 case XHCI_CAPLENGTH: /* 0x00 */ 2300 value = sc->caplength; 2301 break; 2302 2303 case XHCI_HCSPARAMS1: /* 0x04 */ 2304 value = sc->hcsparams1; 2305 break; 2306 2307 case XHCI_HCSPARAMS2: /* 0x08 */ 2308 value = sc->hcsparams2; 2309 break; 2310 2311 case XHCI_HCSPARAMS3: /* 0x0C */ 2312 value = sc->hcsparams3; 2313 break; 2314 2315 case XHCI_HCSPARAMS0: /* 0x10 */ 2316 value = sc->hccparams1; 2317 break; 2318 2319 case XHCI_DBOFF: /* 0x14 */ 2320 value = sc->dboff; 2321 break; 2322 2323 case XHCI_RTSOFF: /* 0x18 */ 2324 value = sc->rtsoff; 2325 break; 2326 2327 case XHCI_HCCPRAMS2: /* 0x1C */ 2328 value = sc->hccparams2; 2329 break; 2330 2331 default: 2332 value = 0; 2333 break; 2334 } 2335 2336 DPRINTF(("pci_xhci: hostcap read offset 0x%lx -> 0x%lx", 2337 offset, value)); 2338 2339 return (value); 2340 } 2341 2342 static uint64_t 2343 pci_xhci_hostop_read(struct pci_xhci_softc *sc, uint64_t offset) 2344 { 2345 uint64_t value; 2346 2347 offset = (offset - XHCI_CAPLEN); 2348 2349 switch (offset) { 2350 case XHCI_USBCMD: /* 0x00 */ 2351 value = sc->opregs.usbcmd; 2352 break; 2353 2354 case XHCI_USBSTS: /* 0x04 */ 2355 value = sc->opregs.usbsts; 2356 break; 2357 2358 case XHCI_PAGESIZE: /* 0x08 */ 2359 value = sc->opregs.pgsz; 2360 break; 2361 2362 case XHCI_DNCTRL: /* 0x14 */ 2363 value = sc->opregs.dnctrl; 2364 break; 2365 2366 case XHCI_CRCR_LO: /* 0x18 */ 2367 value = sc->opregs.crcr & XHCI_CRCR_LO_CRR; 2368 break; 2369 2370 case XHCI_CRCR_HI: /* 0x1C */ 2371 value = 0; 2372 break; 2373 2374 case XHCI_DCBAAP_LO: /* 0x30 */ 2375 value = sc->opregs.dcbaap & 0xFFFFFFFF; 2376 break; 2377 2378 case XHCI_DCBAAP_HI: /* 0x34 */ 2379 value = (sc->opregs.dcbaap >> 32) & 0xFFFFFFFF; 2380 break; 2381 2382 case XHCI_CONFIG: /* 0x38 */ 2383 value = sc->opregs.config; 2384 break; 2385 2386 default: 2387 if (offset >= 0x400) 2388 value = pci_xhci_portregs_read(sc, offset); 2389 else 2390 value = 0; 2391 2392 break; 2393 } 2394 2395 if (offset < 0x400) 2396 DPRINTF(("pci_xhci: hostop read offset 0x%lx -> 0x%lx", 2397 offset, value)); 2398 2399 return (value); 2400 } 2401 2402 static uint64_t 2403 pci_xhci_dbregs_read(struct pci_xhci_softc *sc __unused, 2404 uint64_t offset __unused) 2405 { 2406 /* read doorbell always returns 0 */ 2407 return (0); 2408 } 2409 2410 static uint64_t 2411 pci_xhci_rtsregs_read(struct pci_xhci_softc *sc, uint64_t offset) 2412 { 2413 uint32_t value; 2414 2415 offset -= sc->rtsoff; 2416 value = 0; 2417 2418 if (offset == XHCI_MFINDEX) { 2419 value = sc->rtsregs.mfindex; 2420 } else if (offset >= 0x20) { 2421 int item; 2422 uint32_t *p; 2423 2424 offset -= 0x20; 2425 item = offset % 32; 2426 2427 assert(offset < sizeof(sc->rtsregs.intrreg)); 2428 2429 p = &sc->rtsregs.intrreg.iman; 2430 p += item / sizeof(uint32_t); 2431 value = *p; 2432 } 2433 2434 DPRINTF(("pci_xhci: rtsregs read offset 0x%lx -> 0x%x", 2435 offset, value)); 2436 2437 return (value); 2438 } 2439 2440 static uint64_t 2441 pci_xhci_xecp_read(struct pci_xhci_softc *sc, uint64_t offset) 2442 { 2443 uint32_t value; 2444 2445 offset -= sc->regsend; 2446 value = 0; 2447 2448 switch (offset) { 2449 case 0: 2450 /* rev major | rev minor | next-cap | cap-id */ 2451 value = (0x02 << 24) | (4 << 8) | XHCI_ID_PROTOCOLS; 2452 break; 2453 case 4: 2454 /* name string = "USB" */ 2455 value = 0x20425355; 2456 break; 2457 case 8: 2458 /* psic | proto-defined | compat # | compat offset */ 2459 value = ((XHCI_MAX_DEVS/2) << 8) | sc->usb2_port_start; 2460 break; 2461 case 12: 2462 break; 2463 case 16: 2464 /* rev major | rev minor | next-cap | cap-id */ 2465 value = (0x03 << 24) | XHCI_ID_PROTOCOLS; 2466 break; 2467 case 20: 2468 /* name string = "USB" */ 2469 value = 0x20425355; 2470 break; 2471 case 24: 2472 /* psic | proto-defined | compat # | compat offset */ 2473 value = ((XHCI_MAX_DEVS/2) << 8) | sc->usb3_port_start; 2474 break; 2475 case 28: 2476 break; 2477 default: 2478 DPRINTF(("pci_xhci: xecp invalid offset 0x%lx", offset)); 2479 break; 2480 } 2481 2482 DPRINTF(("pci_xhci: xecp read offset 0x%lx -> 0x%x", 2483 offset, value)); 2484 2485 return (value); 2486 } 2487 2488 2489 static uint64_t 2490 pci_xhci_read(struct pci_devinst *pi, int baridx, uint64_t offset, int size) 2491 { 2492 struct pci_xhci_softc *sc; 2493 uint32_t value; 2494 2495 sc = pi->pi_arg; 2496 2497 assert(baridx == 0); 2498 2499 pthread_mutex_lock(&sc->mtx); 2500 if (offset < XHCI_CAPLEN) 2501 value = pci_xhci_hostcap_read(sc, offset); 2502 else if (offset < sc->dboff) 2503 value = pci_xhci_hostop_read(sc, offset); 2504 else if (offset < sc->rtsoff) 2505 value = pci_xhci_dbregs_read(sc, offset); 2506 else if (offset < sc->regsend) 2507 value = pci_xhci_rtsregs_read(sc, offset); 2508 else if (offset < (sc->regsend + 4*32)) 2509 value = pci_xhci_xecp_read(sc, offset); 2510 else { 2511 value = 0; 2512 WPRINTF(("pci_xhci: read invalid offset %ld", offset)); 2513 } 2514 2515 pthread_mutex_unlock(&sc->mtx); 2516 2517 switch (size) { 2518 case 1: 2519 value &= 0xFF; 2520 break; 2521 case 2: 2522 value &= 0xFFFF; 2523 break; 2524 case 4: 2525 value &= 0xFFFFFFFF; 2526 break; 2527 } 2528 2529 return (value); 2530 } 2531 2532 static void 2533 pci_xhci_reset_port(struct pci_xhci_softc *sc, int portn, int warm) 2534 { 2535 struct pci_xhci_portregs *port; 2536 struct pci_xhci_dev_emu *dev; 2537 struct xhci_trb evtrb; 2538 int error; 2539 2540 assert(portn <= XHCI_MAX_DEVS); 2541 2542 DPRINTF(("xhci reset port %d", portn)); 2543 2544 port = XHCI_PORTREG_PTR(sc, portn); 2545 dev = XHCI_DEVINST_PTR(sc, portn); 2546 if (dev) { 2547 port->portsc &= ~(XHCI_PS_PLS_MASK | XHCI_PS_PR | XHCI_PS_PRC); 2548 port->portsc |= XHCI_PS_PED | 2549 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed); 2550 2551 if (warm && dev->dev_ue->ue_usbver == 3) { 2552 port->portsc |= XHCI_PS_WRC; 2553 } 2554 2555 if ((port->portsc & XHCI_PS_PRC) == 0) { 2556 port->portsc |= XHCI_PS_PRC; 2557 2558 pci_xhci_set_evtrb(&evtrb, portn, 2559 XHCI_TRB_ERROR_SUCCESS, 2560 XHCI_TRB_EVENT_PORT_STS_CHANGE); 2561 error = pci_xhci_insert_event(sc, &evtrb, 1); 2562 if (error != XHCI_TRB_ERROR_SUCCESS) 2563 DPRINTF(("xhci reset port insert event " 2564 "failed")); 2565 } 2566 } 2567 } 2568 2569 static void 2570 pci_xhci_init_port(struct pci_xhci_softc *sc, int portn) 2571 { 2572 struct pci_xhci_portregs *port; 2573 struct pci_xhci_dev_emu *dev; 2574 2575 port = XHCI_PORTREG_PTR(sc, portn); 2576 dev = XHCI_DEVINST_PTR(sc, portn); 2577 if (dev) { 2578 port->portsc = XHCI_PS_CCS | /* connected */ 2579 XHCI_PS_PP; /* port power */ 2580 2581 if (dev->dev_ue->ue_usbver == 2) { 2582 port->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_POLL) | 2583 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed); 2584 } else { 2585 port->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_U0) | 2586 XHCI_PS_PED | /* enabled */ 2587 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed); 2588 } 2589 2590 DPRINTF(("Init port %d 0x%x", portn, port->portsc)); 2591 } else { 2592 port->portsc = XHCI_PS_PLS_SET(UPS_PORT_LS_RX_DET) | XHCI_PS_PP; 2593 DPRINTF(("Init empty port %d 0x%x", portn, port->portsc)); 2594 } 2595 } 2596 2597 static int 2598 pci_xhci_dev_intr(struct usb_hci *hci, int epctx) 2599 { 2600 struct pci_xhci_dev_emu *dev; 2601 struct xhci_dev_ctx *dev_ctx; 2602 struct xhci_trb evtrb; 2603 struct pci_xhci_softc *sc; 2604 struct pci_xhci_portregs *p; 2605 struct xhci_endp_ctx *ep_ctx; 2606 int error = 0; 2607 int dir_in; 2608 int epid; 2609 2610 dir_in = epctx & 0x80; 2611 epid = epctx & ~0x80; 2612 2613 /* HW endpoint contexts are 0-15; convert to epid based on dir */ 2614 epid = (epid * 2) + (dir_in ? 1 : 0); 2615 2616 assert(epid >= 1 && epid <= 31); 2617 2618 dev = hci->hci_sc; 2619 sc = dev->xsc; 2620 2621 /* check if device is ready; OS has to initialise it */ 2622 if (sc->rtsregs.erstba_p == NULL || 2623 (sc->opregs.usbcmd & XHCI_CMD_RS) == 0 || 2624 dev->dev_ctx == NULL) 2625 return (0); 2626 2627 p = XHCI_PORTREG_PTR(sc, hci->hci_port); 2628 2629 /* raise event if link U3 (suspended) state */ 2630 if (XHCI_PS_PLS_GET(p->portsc) == 3) { 2631 p->portsc &= ~XHCI_PS_PLS_MASK; 2632 p->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_RESUME); 2633 if ((p->portsc & XHCI_PS_PLC) != 0) 2634 return (0); 2635 2636 p->portsc |= XHCI_PS_PLC; 2637 2638 pci_xhci_set_evtrb(&evtrb, hci->hci_port, 2639 XHCI_TRB_ERROR_SUCCESS, XHCI_TRB_EVENT_PORT_STS_CHANGE); 2640 error = pci_xhci_insert_event(sc, &evtrb, 0); 2641 if (error != XHCI_TRB_ERROR_SUCCESS) 2642 goto done; 2643 } 2644 2645 dev_ctx = dev->dev_ctx; 2646 ep_ctx = &dev_ctx->ctx_ep[epid]; 2647 if ((ep_ctx->dwEpCtx0 & 0x7) == XHCI_ST_EPCTX_DISABLED) { 2648 DPRINTF(("xhci device interrupt on disabled endpoint %d", 2649 epid)); 2650 return (0); 2651 } 2652 2653 DPRINTF(("xhci device interrupt on endpoint %d", epid)); 2654 2655 pci_xhci_device_doorbell(sc, hci->hci_port, epid, 0); 2656 2657 done: 2658 return (error); 2659 } 2660 2661 static int 2662 pci_xhci_dev_event(struct usb_hci *hci, enum hci_usbev evid __unused, 2663 void *param __unused) 2664 { 2665 DPRINTF(("xhci device event port %d", hci->hci_port)); 2666 return (0); 2667 } 2668 2669 /* 2670 * Each controller contains a "slot" node which contains a list of 2671 * child nodes each of which is a device. Each slot node's name 2672 * corresponds to a specific controller slot. These nodes 2673 * contain a "device" variable identifying the device model of the 2674 * USB device. For example: 2675 * 2676 * pci.0.1.0 2677 * .device="xhci" 2678 * .slot 2679 * .1 2680 * .device="tablet" 2681 */ 2682 static int 2683 pci_xhci_legacy_config(nvlist_t *nvl, const char *opts) 2684 { 2685 char node_name[16]; 2686 nvlist_t *slots_nvl, *slot_nvl; 2687 char *cp, *opt, *str, *tofree; 2688 int slot; 2689 2690 if (opts == NULL) 2691 return (0); 2692 2693 slots_nvl = create_relative_config_node(nvl, "slot"); 2694 slot = 1; 2695 tofree = str = strdup(opts); 2696 while ((opt = strsep(&str, ",")) != NULL) { 2697 /* device[=<config>] */ 2698 cp = strchr(opt, '='); 2699 if (cp != NULL) { 2700 *cp = '\0'; 2701 cp++; 2702 } 2703 2704 snprintf(node_name, sizeof(node_name), "%d", slot); 2705 slot++; 2706 slot_nvl = create_relative_config_node(slots_nvl, node_name); 2707 set_config_value_node(slot_nvl, "device", opt); 2708 2709 /* 2710 * NB: Given that we split on commas above, the legacy 2711 * format only supports a single option. 2712 */ 2713 if (cp != NULL && *cp != '\0') 2714 pci_parse_legacy_config(slot_nvl, cp); 2715 } 2716 free(tofree); 2717 return (0); 2718 } 2719 2720 static int 2721 pci_xhci_parse_devices(struct pci_xhci_softc *sc, nvlist_t *nvl) 2722 { 2723 struct pci_xhci_dev_emu *dev; 2724 struct usb_devemu *ue; 2725 const nvlist_t *slots_nvl, *slot_nvl; 2726 const char *name, *device; 2727 char *cp; 2728 void *devsc, *cookie; 2729 long slot; 2730 int type, usb3_port, usb2_port, i, ndevices; 2731 2732 usb3_port = sc->usb3_port_start; 2733 usb2_port = sc->usb2_port_start; 2734 2735 sc->devices = calloc(XHCI_MAX_DEVS, sizeof(struct pci_xhci_dev_emu *)); 2736 sc->slots = calloc(XHCI_MAX_SLOTS, sizeof(struct pci_xhci_dev_emu *)); 2737 2738 ndevices = 0; 2739 2740 slots_nvl = find_relative_config_node(nvl, "slot"); 2741 if (slots_nvl == NULL) 2742 goto portsfinal; 2743 2744 cookie = NULL; 2745 while ((name = nvlist_next(slots_nvl, &type, &cookie)) != NULL) { 2746 if (usb2_port == ((sc->usb2_port_start) + XHCI_MAX_DEVS/2) || 2747 usb3_port == ((sc->usb3_port_start) + XHCI_MAX_DEVS/2)) { 2748 WPRINTF(("pci_xhci max number of USB 2 or 3 " 2749 "devices reached, max %d", XHCI_MAX_DEVS/2)); 2750 goto bad; 2751 } 2752 2753 if (type != NV_TYPE_NVLIST) { 2754 EPRINTLN( 2755 "pci_xhci: config variable '%s' under slot node", 2756 name); 2757 goto bad; 2758 } 2759 2760 slot = strtol(name, &cp, 0); 2761 if (*cp != '\0' || slot <= 0 || slot > XHCI_MAX_SLOTS) { 2762 EPRINTLN("pci_xhci: invalid slot '%s'", name); 2763 goto bad; 2764 } 2765 2766 if (XHCI_SLOTDEV_PTR(sc, slot) != NULL) { 2767 EPRINTLN("pci_xhci: duplicate slot '%s'", name); 2768 goto bad; 2769 } 2770 2771 slot_nvl = nvlist_get_nvlist(slots_nvl, name); 2772 device = get_config_value_node(slot_nvl, "device"); 2773 if (device == NULL) { 2774 EPRINTLN( 2775 "pci_xhci: missing \"device\" value for slot '%s'", 2776 name); 2777 goto bad; 2778 } 2779 2780 ue = usb_emu_finddev(device); 2781 if (ue == NULL) { 2782 EPRINTLN("pci_xhci: unknown device model \"%s\"", 2783 device); 2784 goto bad; 2785 } 2786 2787 DPRINTF(("pci_xhci adding device %s", device)); 2788 2789 dev = calloc(1, sizeof(struct pci_xhci_dev_emu)); 2790 dev->xsc = sc; 2791 dev->hci.hci_sc = dev; 2792 dev->hci.hci_intr = pci_xhci_dev_intr; 2793 dev->hci.hci_event = pci_xhci_dev_event; 2794 2795 if (ue->ue_usbver == 2) { 2796 if (usb2_port == sc->usb2_port_start + 2797 XHCI_MAX_DEVS / 2) { 2798 WPRINTF(("pci_xhci max number of USB 2 devices " 2799 "reached, max %d", XHCI_MAX_DEVS / 2)); 2800 goto bad; 2801 } 2802 dev->hci.hci_port = usb2_port; 2803 usb2_port++; 2804 } else { 2805 if (usb3_port == sc->usb3_port_start + 2806 XHCI_MAX_DEVS / 2) { 2807 WPRINTF(("pci_xhci max number of USB 3 devices " 2808 "reached, max %d", XHCI_MAX_DEVS / 2)); 2809 goto bad; 2810 } 2811 dev->hci.hci_port = usb3_port; 2812 usb3_port++; 2813 } 2814 XHCI_DEVINST_PTR(sc, dev->hci.hci_port) = dev; 2815 2816 dev->hci.hci_address = 0; 2817 devsc = ue->ue_init(&dev->hci, nvl); 2818 if (devsc == NULL) { 2819 goto bad; 2820 } 2821 2822 dev->dev_ue = ue; 2823 dev->dev_sc = devsc; 2824 2825 XHCI_SLOTDEV_PTR(sc, slot) = dev; 2826 ndevices++; 2827 } 2828 2829 portsfinal: 2830 sc->portregs = calloc(XHCI_MAX_DEVS, sizeof(struct pci_xhci_portregs)); 2831 2832 if (ndevices > 0) { 2833 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 2834 pci_xhci_init_port(sc, i); 2835 } 2836 } else { 2837 WPRINTF(("pci_xhci no USB devices configured")); 2838 } 2839 return (0); 2840 2841 bad: 2842 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 2843 free(XHCI_DEVINST_PTR(sc, i)); 2844 } 2845 2846 free(sc->devices); 2847 free(sc->slots); 2848 2849 return (-1); 2850 } 2851 2852 static int 2853 pci_xhci_init(struct pci_devinst *pi, nvlist_t *nvl) 2854 { 2855 struct pci_xhci_softc *sc; 2856 int error; 2857 2858 if (xhci_in_use) { 2859 WPRINTF(("pci_xhci controller already defined")); 2860 return (-1); 2861 } 2862 xhci_in_use = 1; 2863 2864 sc = calloc(1, sizeof(struct pci_xhci_softc)); 2865 pi->pi_arg = sc; 2866 sc->xsc_pi = pi; 2867 2868 sc->usb2_port_start = (XHCI_MAX_DEVS/2) + 1; 2869 sc->usb3_port_start = 1; 2870 2871 /* discover devices */ 2872 error = pci_xhci_parse_devices(sc, nvl); 2873 if (error < 0) 2874 goto done; 2875 else 2876 error = 0; 2877 2878 sc->caplength = XHCI_SET_CAPLEN(XHCI_CAPLEN) | 2879 XHCI_SET_HCIVERSION(0x0100); 2880 sc->hcsparams1 = XHCI_SET_HCSP1_MAXPORTS(XHCI_MAX_DEVS) | 2881 XHCI_SET_HCSP1_MAXINTR(1) | /* interrupters */ 2882 XHCI_SET_HCSP1_MAXSLOTS(XHCI_MAX_SLOTS); 2883 sc->hcsparams2 = XHCI_SET_HCSP2_ERSTMAX(XHCI_ERST_MAX) | 2884 XHCI_SET_HCSP2_IST(0x04); 2885 sc->hcsparams3 = 0; /* no latency */ 2886 sc->hccparams1 = XHCI_SET_HCCP1_AC64(1) | /* 64-bit addrs */ 2887 XHCI_SET_HCCP1_NSS(1) | /* no 2nd-streams */ 2888 XHCI_SET_HCCP1_SPC(1) | /* short packet */ 2889 XHCI_SET_HCCP1_MAXPSA(XHCI_STREAMS_MAX); 2890 sc->hccparams2 = XHCI_SET_HCCP2_LEC(1) | 2891 XHCI_SET_HCCP2_U3C(1); 2892 sc->dboff = XHCI_SET_DOORBELL(XHCI_CAPLEN + XHCI_PORTREGS_START + 2893 XHCI_MAX_DEVS * sizeof(struct pci_xhci_portregs)); 2894 2895 /* dboff must be 32-bit aligned */ 2896 if (sc->dboff & 0x3) 2897 sc->dboff = (sc->dboff + 0x3) & ~0x3; 2898 2899 /* rtsoff must be 32-bytes aligned */ 2900 sc->rtsoff = XHCI_SET_RTSOFFSET(sc->dboff + (XHCI_MAX_SLOTS+1) * 32); 2901 if (sc->rtsoff & 0x1F) 2902 sc->rtsoff = (sc->rtsoff + 0x1F) & ~0x1F; 2903 2904 DPRINTF(("pci_xhci dboff: 0x%x, rtsoff: 0x%x", sc->dboff, 2905 sc->rtsoff)); 2906 2907 sc->opregs.usbsts = XHCI_STS_HCH; 2908 sc->opregs.pgsz = XHCI_PAGESIZE_4K; 2909 2910 pci_xhci_reset(sc); 2911 2912 sc->regsend = sc->rtsoff + 0x20 + 32; /* only 1 intrpter */ 2913 2914 /* 2915 * Set extended capabilities pointer to be after regsend; 2916 * value of xecp field is 32-bit offset. 2917 */ 2918 sc->hccparams1 |= XHCI_SET_HCCP1_XECP(sc->regsend/4); 2919 2920 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x1E31); 2921 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x8086); 2922 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_SERIALBUS); 2923 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_SERIALBUS_USB); 2924 pci_set_cfgdata8(pi, PCIR_PROGIF,PCIP_SERIALBUS_USB_XHCI); 2925 pci_set_cfgdata8(pi, PCI_USBREV, PCI_USB_REV_3_0); 2926 2927 pci_emul_add_msicap(pi, 1); 2928 2929 /* regsend + xecp registers */ 2930 pci_emul_alloc_bar(pi, 0, PCIBAR_MEM32, sc->regsend + 4*32); 2931 DPRINTF(("pci_xhci pci_emu_alloc: %d", sc->regsend + 4*32)); 2932 2933 2934 pci_lintr_request(pi); 2935 2936 pthread_mutex_init(&sc->mtx, NULL); 2937 2938 done: 2939 if (error) { 2940 free(sc); 2941 } 2942 2943 return (error); 2944 } 2945 2946 #ifdef BHYVE_SNAPSHOT 2947 static void 2948 pci_xhci_map_devs_slots(struct pci_xhci_softc *sc, int maps[]) 2949 { 2950 int i, j; 2951 struct pci_xhci_dev_emu *dev, *slot; 2952 2953 memset(maps, 0, sizeof(maps[0]) * XHCI_MAX_SLOTS); 2954 2955 for (i = 1; i <= XHCI_MAX_SLOTS; i++) { 2956 for (j = 1; j <= XHCI_MAX_DEVS; j++) { 2957 slot = XHCI_SLOTDEV_PTR(sc, i); 2958 dev = XHCI_DEVINST_PTR(sc, j); 2959 2960 if (slot == dev) 2961 maps[i] = j; 2962 } 2963 } 2964 } 2965 2966 static int 2967 pci_xhci_snapshot_ep(struct pci_xhci_softc *sc, struct pci_xhci_dev_emu *dev, 2968 int idx, struct vm_snapshot_meta *meta) 2969 { 2970 int k; 2971 int ret; 2972 struct usb_data_xfer *xfer; 2973 struct usb_data_xfer_block *xfer_block; 2974 2975 /* some sanity checks */ 2976 if (meta->op == VM_SNAPSHOT_SAVE) 2977 xfer = dev->eps[idx].ep_xfer; 2978 2979 SNAPSHOT_VAR_OR_LEAVE(xfer, meta, ret, done); 2980 if (xfer == NULL) { 2981 ret = 0; 2982 goto done; 2983 } 2984 2985 if (meta->op == VM_SNAPSHOT_RESTORE) { 2986 pci_xhci_init_ep(dev, idx); 2987 xfer = dev->eps[idx].ep_xfer; 2988 } 2989 2990 /* save / restore proper */ 2991 for (k = 0; k < USB_MAX_XFER_BLOCKS; k++) { 2992 xfer_block = &xfer->data[k]; 2993 2994 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->xsc_pi->pi_vmctx, 2995 xfer_block->buf, XHCI_GADDR_SIZE(xfer_block->buf), true, 2996 meta, ret, done); 2997 SNAPSHOT_VAR_OR_LEAVE(xfer_block->blen, meta, ret, done); 2998 SNAPSHOT_VAR_OR_LEAVE(xfer_block->bdone, meta, ret, done); 2999 SNAPSHOT_VAR_OR_LEAVE(xfer_block->processed, meta, ret, done); 3000 SNAPSHOT_VAR_OR_LEAVE(xfer_block->hci_data, meta, ret, done); 3001 SNAPSHOT_VAR_OR_LEAVE(xfer_block->ccs, meta, ret, done); 3002 SNAPSHOT_VAR_OR_LEAVE(xfer_block->streamid, meta, ret, done); 3003 SNAPSHOT_VAR_OR_LEAVE(xfer_block->trbnext, meta, ret, done); 3004 } 3005 3006 SNAPSHOT_VAR_OR_LEAVE(xfer->ureq, meta, ret, done); 3007 if (xfer->ureq) { 3008 /* xfer->ureq is not allocated at restore time */ 3009 if (meta->op == VM_SNAPSHOT_RESTORE) 3010 xfer->ureq = malloc(sizeof(struct usb_device_request)); 3011 3012 SNAPSHOT_BUF_OR_LEAVE(xfer->ureq, 3013 sizeof(struct usb_device_request), 3014 meta, ret, done); 3015 } 3016 3017 SNAPSHOT_VAR_OR_LEAVE(xfer->ndata, meta, ret, done); 3018 SNAPSHOT_VAR_OR_LEAVE(xfer->head, meta, ret, done); 3019 SNAPSHOT_VAR_OR_LEAVE(xfer->tail, meta, ret, done); 3020 3021 done: 3022 return (ret); 3023 } 3024 3025 static int 3026 pci_xhci_snapshot(struct vm_snapshot_meta *meta) 3027 { 3028 int i, j; 3029 int ret; 3030 int restore_idx; 3031 struct pci_devinst *pi; 3032 struct pci_xhci_softc *sc; 3033 struct pci_xhci_portregs *port; 3034 struct pci_xhci_dev_emu *dev; 3035 char dname[SNAP_DEV_NAME_LEN]; 3036 int maps[XHCI_MAX_SLOTS + 1]; 3037 3038 pi = meta->dev_data; 3039 sc = pi->pi_arg; 3040 3041 SNAPSHOT_VAR_OR_LEAVE(sc->caplength, meta, ret, done); 3042 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams1, meta, ret, done); 3043 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams2, meta, ret, done); 3044 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams3, meta, ret, done); 3045 SNAPSHOT_VAR_OR_LEAVE(sc->hccparams1, meta, ret, done); 3046 SNAPSHOT_VAR_OR_LEAVE(sc->dboff, meta, ret, done); 3047 SNAPSHOT_VAR_OR_LEAVE(sc->rtsoff, meta, ret, done); 3048 SNAPSHOT_VAR_OR_LEAVE(sc->hccparams2, meta, ret, done); 3049 SNAPSHOT_VAR_OR_LEAVE(sc->regsend, meta, ret, done); 3050 3051 /* opregs */ 3052 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.usbcmd, meta, ret, done); 3053 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.usbsts, meta, ret, done); 3054 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.pgsz, meta, ret, done); 3055 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.dnctrl, meta, ret, done); 3056 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.crcr, meta, ret, done); 3057 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.dcbaap, meta, ret, done); 3058 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.config, meta, ret, done); 3059 3060 /* opregs.cr_p */ 3061 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(pi->pi_vmctx, sc->opregs.cr_p, 3062 XHCI_GADDR_SIZE(sc->opregs.cr_p), true, meta, ret, done); 3063 3064 /* opregs.dcbaa_p */ 3065 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(pi->pi_vmctx, sc->opregs.dcbaa_p, 3066 XHCI_GADDR_SIZE(sc->opregs.dcbaa_p), true, meta, ret, done); 3067 3068 /* rtsregs */ 3069 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.mfindex, meta, ret, done); 3070 3071 /* rtsregs.intrreg */ 3072 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.iman, meta, ret, done); 3073 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.imod, meta, ret, done); 3074 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erstsz, meta, ret, done); 3075 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.rsvd, meta, ret, done); 3076 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erstba, meta, ret, done); 3077 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erdp, meta, ret, done); 3078 3079 /* rtsregs.erstba_p */ 3080 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(pi->pi_vmctx, sc->rtsregs.erstba_p, 3081 XHCI_GADDR_SIZE(sc->rtsregs.erstba_p), true, meta, ret, done); 3082 3083 /* rtsregs.erst_p */ 3084 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(pi->pi_vmctx, sc->rtsregs.erst_p, 3085 XHCI_GADDR_SIZE(sc->rtsregs.erst_p), true, meta, ret, done); 3086 3087 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_deq_seg, meta, ret, done); 3088 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_enq_idx, meta, ret, done); 3089 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_enq_seg, meta, ret, done); 3090 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_events_cnt, meta, ret, done); 3091 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.event_pcs, meta, ret, done); 3092 3093 /* sanity checking */ 3094 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 3095 dev = XHCI_DEVINST_PTR(sc, i); 3096 if (dev == NULL) 3097 continue; 3098 3099 if (meta->op == VM_SNAPSHOT_SAVE) 3100 restore_idx = i; 3101 SNAPSHOT_VAR_OR_LEAVE(restore_idx, meta, ret, done); 3102 3103 /* check if the restored device (when restoring) is sane */ 3104 if (restore_idx != i) { 3105 EPRINTLN("%s: idx not matching: actual: %d, " 3106 "expected: %d", __func__, restore_idx, i); 3107 ret = EINVAL; 3108 goto done; 3109 } 3110 3111 if (meta->op == VM_SNAPSHOT_SAVE) { 3112 memset(dname, 0, sizeof(dname)); 3113 strncpy(dname, dev->dev_ue->ue_emu, sizeof(dname) - 1); 3114 } 3115 3116 SNAPSHOT_BUF_OR_LEAVE(dname, sizeof(dname), meta, ret, done); 3117 3118 if (meta->op == VM_SNAPSHOT_RESTORE) { 3119 dname[sizeof(dname) - 1] = '\0'; 3120 if (strcmp(dev->dev_ue->ue_emu, dname)) { 3121 EPRINTLN("%s: device names mismatch: " 3122 "actual: %s, expected: %s", 3123 __func__, dname, dev->dev_ue->ue_emu); 3124 3125 ret = EINVAL; 3126 goto done; 3127 } 3128 } 3129 } 3130 3131 /* portregs */ 3132 for (i = 1; i <= XHCI_MAX_DEVS; i++) { 3133 port = XHCI_PORTREG_PTR(sc, i); 3134 dev = XHCI_DEVINST_PTR(sc, i); 3135 3136 if (dev == NULL) 3137 continue; 3138 3139 SNAPSHOT_VAR_OR_LEAVE(port->portsc, meta, ret, done); 3140 SNAPSHOT_VAR_OR_LEAVE(port->portpmsc, meta, ret, done); 3141 SNAPSHOT_VAR_OR_LEAVE(port->portli, meta, ret, done); 3142 SNAPSHOT_VAR_OR_LEAVE(port->porthlpmc, meta, ret, done); 3143 } 3144 3145 /* slots */ 3146 if (meta->op == VM_SNAPSHOT_SAVE) 3147 pci_xhci_map_devs_slots(sc, maps); 3148 3149 for (i = 1; i <= XHCI_MAX_SLOTS; i++) { 3150 SNAPSHOT_VAR_OR_LEAVE(maps[i], meta, ret, done); 3151 3152 if (meta->op == VM_SNAPSHOT_SAVE) { 3153 dev = XHCI_SLOTDEV_PTR(sc, i); 3154 } else if (meta->op == VM_SNAPSHOT_RESTORE) { 3155 if (maps[i] != 0) 3156 dev = XHCI_DEVINST_PTR(sc, maps[i]); 3157 else 3158 dev = NULL; 3159 3160 XHCI_SLOTDEV_PTR(sc, i) = dev; 3161 } else { 3162 /* error */ 3163 ret = EINVAL; 3164 goto done; 3165 } 3166 3167 if (dev == NULL) 3168 continue; 3169 3170 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(pi->pi_vmctx, dev->dev_ctx, 3171 XHCI_GADDR_SIZE(dev->dev_ctx), true, meta, ret, done); 3172 3173 if (dev->dev_ctx != NULL) { 3174 for (j = 1; j < XHCI_MAX_ENDPOINTS; j++) { 3175 ret = pci_xhci_snapshot_ep(sc, dev, j, meta); 3176 if (ret != 0) 3177 goto done; 3178 } 3179 } 3180 3181 SNAPSHOT_VAR_OR_LEAVE(dev->dev_slotstate, meta, ret, done); 3182 3183 /* devices[i]->dev_sc */ 3184 dev->dev_ue->ue_snapshot(dev->dev_sc, meta); 3185 3186 /* devices[i]->hci */ 3187 SNAPSHOT_VAR_OR_LEAVE(dev->hci.hci_address, meta, ret, done); 3188 SNAPSHOT_VAR_OR_LEAVE(dev->hci.hci_port, meta, ret, done); 3189 } 3190 3191 SNAPSHOT_VAR_OR_LEAVE(sc->usb2_port_start, meta, ret, done); 3192 SNAPSHOT_VAR_OR_LEAVE(sc->usb3_port_start, meta, ret, done); 3193 3194 done: 3195 return (ret); 3196 } 3197 #endif 3198 3199 static const struct pci_devemu pci_de_xhci = { 3200 .pe_emu = "xhci", 3201 .pe_init = pci_xhci_init, 3202 .pe_legacy_config = pci_xhci_legacy_config, 3203 .pe_barwrite = pci_xhci_write, 3204 .pe_barread = pci_xhci_read, 3205 #ifdef BHYVE_SNAPSHOT 3206 .pe_snapshot = pci_xhci_snapshot, 3207 #endif 3208 }; 3209 PCI_EMUL_SET(pci_de_xhci); 3210