1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2018, Joyent, Inc. 25 */ 26 27 /* 28 * EHCI Host Controller Driver (EHCI) 29 * 30 * The EHCI driver is a software driver which interfaces to the Universal 31 * Serial Bus layer (USBA) and the Host Controller (HC). The interface to 32 * the Host Controller is defined by the EHCI Host Controller Interface. 33 * 34 * This module contains the main EHCI driver code which handles all USB 35 * transfers, bandwidth allocations and other general functionalities. 36 */ 37 38 #include <sys/usb/hcd/ehci/ehcid.h> 39 #include <sys/usb/hcd/ehci/ehci_isoch.h> 40 #include <sys/usb/hcd/ehci/ehci_xfer.h> 41 42 /* 43 * EHCI MSI tunable: 44 * 45 * By default MSI is enabled on all supported platforms except for the 46 * EHCI controller of ULI1575 South bridge. 47 */ 48 boolean_t ehci_enable_msi = B_TRUE; 49 50 /* Pointer to the state structure */ 51 extern void *ehci_statep; 52 53 extern void ehci_handle_endpoint_reclaimation(ehci_state_t *); 54 55 extern uint_t ehci_vt62x2_workaround; 56 extern int force_ehci_off; 57 58 /* Adjustable variables for the size of the pools */ 59 int ehci_qh_pool_size = EHCI_QH_POOL_SIZE; 60 int ehci_qtd_pool_size = EHCI_QTD_POOL_SIZE; 61 62 /* 63 * Initialize the values which the order of 32ms intr qh are executed 64 * by the host controller in the lattice tree. 65 */ 66 static uchar_t ehci_index[EHCI_NUM_INTR_QH_LISTS] = 67 {0x00, 0x10, 0x08, 0x18, 68 0x04, 0x14, 0x0c, 0x1c, 69 0x02, 0x12, 0x0a, 0x1a, 70 0x06, 0x16, 0x0e, 0x1e, 71 0x01, 0x11, 0x09, 0x19, 72 0x05, 0x15, 0x0d, 0x1d, 73 0x03, 0x13, 0x0b, 0x1b, 74 0x07, 0x17, 0x0f, 0x1f}; 75 76 /* 77 * Initialize the values which are used to calculate start split mask 78 * for the low/full/high speed interrupt and isochronous endpoints. 79 */ 80 static uint_t ehci_start_split_mask[15] = { 81 /* 82 * For high/full/low speed usb devices. For high speed 83 * device with polling interval greater than or equal 84 * to 8us (125us). 85 */ 86 0x01, /* 00000001 */ 87 0x02, /* 00000010 */ 88 0x04, /* 00000100 */ 89 0x08, /* 00001000 */ 90 0x10, /* 00010000 */ 91 0x20, /* 00100000 */ 92 0x40, /* 01000000 */ 93 0x80, /* 10000000 */ 94 95 /* Only for high speed devices with polling interval 4us */ 96 0x11, /* 00010001 */ 97 0x22, /* 00100010 */ 98 0x44, /* 01000100 */ 99 0x88, /* 10001000 */ 100 101 /* Only for high speed devices with polling interval 2us */ 102 0x55, /* 01010101 */ 103 0xaa, /* 10101010 */ 104 105 /* Only for high speed devices with polling interval 1us */ 106 0xff /* 11111111 */ 107 }; 108 109 /* 110 * Initialize the values which are used to calculate complete split mask 111 * for the low/full speed interrupt and isochronous endpoints. 112 */ 113 static uint_t ehci_intr_complete_split_mask[7] = { 114 /* Only full/low speed devices */ 115 0x1c, /* 00011100 */ 116 0x38, /* 00111000 */ 117 0x70, /* 01110000 */ 118 0xe0, /* 11100000 */ 119 0x00, /* Need FSTN feature */ 120 0x00, /* Need FSTN feature */ 121 0x00 /* Need FSTN feature */ 122 }; 123 124 125 /* 126 * EHCI Internal Function Prototypes 127 */ 128 129 /* Host Controller Driver (HCD) initialization functions */ 130 void ehci_set_dma_attributes(ehci_state_t *ehcip); 131 int ehci_allocate_pools(ehci_state_t *ehcip); 132 void ehci_decode_ddi_dma_addr_bind_handle_result( 133 ehci_state_t *ehcip, 134 int result); 135 int ehci_map_regs(ehci_state_t *ehcip); 136 int ehci_register_intrs_and_init_mutex( 137 ehci_state_t *ehcip); 138 static int ehci_add_intrs(ehci_state_t *ehcip, 139 int intr_type); 140 int ehci_init_ctlr(ehci_state_t *ehcip, 141 int init_type); 142 static int ehci_take_control(ehci_state_t *ehcip); 143 static int ehci_init_periodic_frame_lst_table( 144 ehci_state_t *ehcip); 145 static void ehci_build_interrupt_lattice( 146 ehci_state_t *ehcip); 147 usba_hcdi_ops_t *ehci_alloc_hcdi_ops(ehci_state_t *ehcip); 148 149 /* Host Controller Driver (HCD) deinitialization functions */ 150 int ehci_cleanup(ehci_state_t *ehcip); 151 static void ehci_rem_intrs(ehci_state_t *ehcip); 152 int ehci_cpr_suspend(ehci_state_t *ehcip); 153 int ehci_cpr_resume(ehci_state_t *ehcip); 154 155 /* Bandwidth Allocation functions */ 156 int ehci_allocate_bandwidth(ehci_state_t *ehcip, 157 usba_pipe_handle_data_t *ph, 158 uint_t *pnode, 159 uchar_t *smask, 160 uchar_t *cmask); 161 static int ehci_allocate_high_speed_bandwidth( 162 ehci_state_t *ehcip, 163 usba_pipe_handle_data_t *ph, 164 uint_t *hnode, 165 uchar_t *smask, 166 uchar_t *cmask); 167 static int ehci_allocate_classic_tt_bandwidth( 168 ehci_state_t *ehcip, 169 usba_pipe_handle_data_t *ph, 170 uint_t pnode); 171 void ehci_deallocate_bandwidth(ehci_state_t *ehcip, 172 usba_pipe_handle_data_t *ph, 173 uint_t pnode, 174 uchar_t smask, 175 uchar_t cmask); 176 static void ehci_deallocate_high_speed_bandwidth( 177 ehci_state_t *ehcip, 178 usba_pipe_handle_data_t *ph, 179 uint_t hnode, 180 uchar_t smask, 181 uchar_t cmask); 182 static void ehci_deallocate_classic_tt_bandwidth( 183 ehci_state_t *ehcip, 184 usba_pipe_handle_data_t *ph, 185 uint_t pnode); 186 static int ehci_compute_high_speed_bandwidth( 187 ehci_state_t *ehcip, 188 usb_ep_descr_t *endpoint, 189 usb_port_status_t port_status, 190 uint_t *sbandwidth, 191 uint_t *cbandwidth); 192 static int ehci_compute_classic_bandwidth( 193 usb_ep_descr_t *endpoint, 194 usb_port_status_t port_status, 195 uint_t *bandwidth); 196 int ehci_adjust_polling_interval( 197 ehci_state_t *ehcip, 198 usb_ep_descr_t *endpoint, 199 usb_port_status_t port_status); 200 static int ehci_adjust_high_speed_polling_interval( 201 ehci_state_t *ehcip, 202 usb_ep_descr_t *endpoint); 203 static uint_t ehci_lattice_height(uint_t interval); 204 static uint_t ehci_lattice_parent(uint_t node); 205 static uint_t ehci_find_periodic_node( 206 uint_t leaf, 207 int interval); 208 static uint_t ehci_leftmost_leaf(uint_t node, 209 uint_t height); 210 static uint_t ehci_pow_2(uint_t x); 211 static uint_t ehci_log_2(uint_t x); 212 static int ehci_find_bestfit_hs_mask( 213 ehci_state_t *ehcip, 214 uchar_t *smask, 215 uint_t *pnode, 216 usb_ep_descr_t *endpoint, 217 uint_t bandwidth, 218 int interval); 219 static int ehci_find_bestfit_ls_intr_mask( 220 ehci_state_t *ehcip, 221 uchar_t *smask, 222 uchar_t *cmask, 223 uint_t *pnode, 224 uint_t sbandwidth, 225 uint_t cbandwidth, 226 int interval); 227 static int ehci_find_bestfit_sitd_in_mask( 228 ehci_state_t *ehcip, 229 uchar_t *smask, 230 uchar_t *cmask, 231 uint_t *pnode, 232 uint_t sbandwidth, 233 uint_t cbandwidth, 234 int interval); 235 static int ehci_find_bestfit_sitd_out_mask( 236 ehci_state_t *ehcip, 237 uchar_t *smask, 238 uint_t *pnode, 239 uint_t sbandwidth, 240 int interval); 241 static uint_t ehci_calculate_bw_availability_mask( 242 ehci_state_t *ehcip, 243 uint_t bandwidth, 244 int leaf, 245 int leaf_count, 246 uchar_t *bw_mask); 247 static void ehci_update_bw_availability( 248 ehci_state_t *ehcip, 249 int bandwidth, 250 int leftmost_leaf, 251 int leaf_count, 252 uchar_t mask); 253 254 /* Miscellaneous functions */ 255 ehci_state_t *ehci_obtain_state( 256 dev_info_t *dip); 257 int ehci_state_is_operational( 258 ehci_state_t *ehcip); 259 int ehci_do_soft_reset( 260 ehci_state_t *ehcip); 261 usb_req_attrs_t ehci_get_xfer_attrs(ehci_state_t *ehcip, 262 ehci_pipe_private_t *pp, 263 ehci_trans_wrapper_t *tw); 264 usb_frame_number_t ehci_get_current_frame_number( 265 ehci_state_t *ehcip); 266 static void ehci_cpr_cleanup( 267 ehci_state_t *ehcip); 268 int ehci_wait_for_sof( 269 ehci_state_t *ehcip); 270 void ehci_toggle_scheduler( 271 ehci_state_t *ehcip); 272 void ehci_print_caps(ehci_state_t *ehcip); 273 void ehci_print_regs(ehci_state_t *ehcip); 274 void ehci_print_qh(ehci_state_t *ehcip, 275 ehci_qh_t *qh); 276 void ehci_print_qtd(ehci_state_t *ehcip, 277 ehci_qtd_t *qtd); 278 void ehci_create_stats(ehci_state_t *ehcip); 279 void ehci_destroy_stats(ehci_state_t *ehcip); 280 void ehci_do_intrs_stats(ehci_state_t *ehcip, 281 int val); 282 void ehci_do_byte_stats(ehci_state_t *ehcip, 283 size_t len, 284 uint8_t attr, 285 uint8_t addr); 286 287 /* 288 * check if this ehci controller can support PM 289 */ 290 int 291 ehci_hcdi_pm_support(dev_info_t *dip) 292 { 293 ehci_state_t *ehcip = ddi_get_soft_state(ehci_statep, 294 ddi_get_instance(dip)); 295 296 if (((ehcip->ehci_vendor_id == PCI_VENDOR_NEC_COMBO) && 297 (ehcip->ehci_device_id == PCI_DEVICE_NEC_COMBO)) || 298 299 ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) && 300 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) || 301 302 (ehcip->ehci_vendor_id == PCI_VENDOR_VIA)) { 303 304 return (USB_SUCCESS); 305 } 306 307 return (USB_FAILURE); 308 } 309 310 void 311 ehci_dma_attr_workaround(ehci_state_t *ehcip) 312 { 313 /* 314 * Some Nvidia chips can not handle qh dma address above 2G. 315 * The bit 31 of the dma address might be omitted and it will 316 * cause system crash or other unpredicable result. So force 317 * the dma address allocated below 2G to make ehci work. 318 */ 319 if (PCI_VENDOR_NVIDIA == ehcip->ehci_vendor_id) { 320 switch (ehcip->ehci_device_id) { 321 case PCI_DEVICE_NVIDIA_CK804: 322 case PCI_DEVICE_NVIDIA_MCP04: 323 USB_DPRINTF_L2(PRINT_MASK_ATTA, 324 ehcip->ehci_log_hdl, 325 "ehci_dma_attr_workaround: NVIDIA dma " 326 "workaround enabled, force dma address " 327 "to be allocated below 2G"); 328 ehcip->ehci_dma_attr.dma_attr_addr_hi = 329 0x7fffffffull; 330 break; 331 default: 332 break; 333 334 } 335 } 336 } 337 338 /* 339 * Host Controller Driver (HCD) initialization functions 340 */ 341 342 /* 343 * ehci_set_dma_attributes: 344 * 345 * Set the limits in the DMA attributes structure. Most of the values used 346 * in the DMA limit structures are the default values as specified by the 347 * Writing PCI device drivers document. 348 */ 349 void 350 ehci_set_dma_attributes(ehci_state_t *ehcip) 351 { 352 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 353 "ehci_set_dma_attributes:"); 354 355 /* Initialize the DMA attributes */ 356 ehcip->ehci_dma_attr.dma_attr_version = DMA_ATTR_V0; 357 ehcip->ehci_dma_attr.dma_attr_addr_lo = 0x00000000ull; 358 ehcip->ehci_dma_attr.dma_attr_addr_hi = 0xfffffffeull; 359 360 /* 32 bit addressing */ 361 ehcip->ehci_dma_attr.dma_attr_count_max = EHCI_DMA_ATTR_COUNT_MAX; 362 363 /* Byte alignment */ 364 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 365 366 /* 367 * Since PCI specification is byte alignment, the 368 * burst size field should be set to 1 for PCI devices. 369 */ 370 ehcip->ehci_dma_attr.dma_attr_burstsizes = 0x1; 371 372 ehcip->ehci_dma_attr.dma_attr_minxfer = 0x1; 373 ehcip->ehci_dma_attr.dma_attr_maxxfer = EHCI_DMA_ATTR_MAX_XFER; 374 ehcip->ehci_dma_attr.dma_attr_seg = 0xffffffffull; 375 ehcip->ehci_dma_attr.dma_attr_sgllen = 1; 376 ehcip->ehci_dma_attr.dma_attr_granular = EHCI_DMA_ATTR_GRANULAR; 377 ehcip->ehci_dma_attr.dma_attr_flags = 0; 378 ehci_dma_attr_workaround(ehcip); 379 } 380 381 382 /* 383 * ehci_allocate_pools: 384 * 385 * Allocate the system memory for the Endpoint Descriptor (QH) and for the 386 * Transfer Descriptor (QTD) pools. Both QH and QTD structures must be aligned 387 * to a 16 byte boundary. 388 */ 389 int 390 ehci_allocate_pools(ehci_state_t *ehcip) 391 { 392 ddi_device_acc_attr_t dev_attr; 393 size_t real_length; 394 int result; 395 uint_t ccount; 396 int i; 397 398 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 399 "ehci_allocate_pools:"); 400 401 /* The host controller will be little endian */ 402 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 403 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 404 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 405 406 /* Byte alignment */ 407 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_TD_QH_ALIGNMENT; 408 409 /* Allocate the QTD pool DMA handle */ 410 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr, 411 DDI_DMA_SLEEP, 0, 412 &ehcip->ehci_qtd_pool_dma_handle) != DDI_SUCCESS) { 413 414 goto failure; 415 } 416 417 /* Allocate the memory for the QTD pool */ 418 if (ddi_dma_mem_alloc(ehcip->ehci_qtd_pool_dma_handle, 419 ehci_qtd_pool_size * sizeof (ehci_qtd_t), 420 &dev_attr, 421 DDI_DMA_CONSISTENT, 422 DDI_DMA_SLEEP, 423 0, 424 (caddr_t *)&ehcip->ehci_qtd_pool_addr, 425 &real_length, 426 &ehcip->ehci_qtd_pool_mem_handle)) { 427 428 goto failure; 429 } 430 431 /* Map the QTD pool into the I/O address space */ 432 result = ddi_dma_addr_bind_handle( 433 ehcip->ehci_qtd_pool_dma_handle, 434 NULL, 435 (caddr_t)ehcip->ehci_qtd_pool_addr, 436 real_length, 437 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 438 DDI_DMA_SLEEP, 439 NULL, 440 &ehcip->ehci_qtd_pool_cookie, 441 &ccount); 442 443 bzero((void *)ehcip->ehci_qtd_pool_addr, 444 ehci_qtd_pool_size * sizeof (ehci_qtd_t)); 445 446 /* Process the result */ 447 if (result == DDI_DMA_MAPPED) { 448 /* The cookie count should be 1 */ 449 if (ccount != 1) { 450 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 451 "ehci_allocate_pools: More than 1 cookie"); 452 453 goto failure; 454 } 455 } else { 456 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 457 "ehci_allocate_pools: Result = %d", result); 458 459 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result); 460 461 goto failure; 462 } 463 464 /* 465 * DMA addresses for QTD pools are bound 466 */ 467 ehcip->ehci_dma_addr_bind_flag |= EHCI_QTD_POOL_BOUND; 468 469 /* Initialize the QTD pool */ 470 for (i = 0; i < ehci_qtd_pool_size; i ++) { 471 Set_QTD(ehcip->ehci_qtd_pool_addr[i]. 472 qtd_state, EHCI_QTD_FREE); 473 } 474 475 /* Allocate the QTD pool DMA handle */ 476 if (ddi_dma_alloc_handle(ehcip->ehci_dip, 477 &ehcip->ehci_dma_attr, 478 DDI_DMA_SLEEP, 479 0, 480 &ehcip->ehci_qh_pool_dma_handle) != DDI_SUCCESS) { 481 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 482 "ehci_allocate_pools: ddi_dma_alloc_handle failed"); 483 484 goto failure; 485 } 486 487 /* Allocate the memory for the QH pool */ 488 if (ddi_dma_mem_alloc(ehcip->ehci_qh_pool_dma_handle, 489 ehci_qh_pool_size * sizeof (ehci_qh_t), 490 &dev_attr, 491 DDI_DMA_CONSISTENT, 492 DDI_DMA_SLEEP, 493 0, 494 (caddr_t *)&ehcip->ehci_qh_pool_addr, 495 &real_length, 496 &ehcip->ehci_qh_pool_mem_handle) != DDI_SUCCESS) { 497 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 498 "ehci_allocate_pools: ddi_dma_mem_alloc failed"); 499 500 goto failure; 501 } 502 503 result = ddi_dma_addr_bind_handle(ehcip->ehci_qh_pool_dma_handle, 504 NULL, 505 (caddr_t)ehcip->ehci_qh_pool_addr, 506 real_length, 507 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 508 DDI_DMA_SLEEP, 509 NULL, 510 &ehcip->ehci_qh_pool_cookie, 511 &ccount); 512 513 bzero((void *)ehcip->ehci_qh_pool_addr, 514 ehci_qh_pool_size * sizeof (ehci_qh_t)); 515 516 /* Process the result */ 517 if (result == DDI_DMA_MAPPED) { 518 /* The cookie count should be 1 */ 519 if (ccount != 1) { 520 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 521 "ehci_allocate_pools: More than 1 cookie"); 522 523 goto failure; 524 } 525 } else { 526 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result); 527 528 goto failure; 529 } 530 531 /* 532 * DMA addresses for QH pools are bound 533 */ 534 ehcip->ehci_dma_addr_bind_flag |= EHCI_QH_POOL_BOUND; 535 536 /* Initialize the QH pool */ 537 for (i = 0; i < ehci_qh_pool_size; i ++) { 538 Set_QH(ehcip->ehci_qh_pool_addr[i].qh_state, EHCI_QH_FREE); 539 } 540 541 /* Byte alignment */ 542 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 543 544 return (DDI_SUCCESS); 545 546 failure: 547 /* Byte alignment */ 548 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 549 550 return (DDI_FAILURE); 551 } 552 553 554 /* 555 * ehci_decode_ddi_dma_addr_bind_handle_result: 556 * 557 * Process the return values of ddi_dma_addr_bind_handle() 558 */ 559 void 560 ehci_decode_ddi_dma_addr_bind_handle_result( 561 ehci_state_t *ehcip, 562 int result) 563 { 564 USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl, 565 "ehci_decode_ddi_dma_addr_bind_handle_result:"); 566 567 switch (result) { 568 case DDI_DMA_PARTIAL_MAP: 569 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 570 "Partial transfers not allowed"); 571 break; 572 case DDI_DMA_INUSE: 573 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 574 "Handle is in use"); 575 break; 576 case DDI_DMA_NORESOURCES: 577 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 578 "No resources"); 579 break; 580 case DDI_DMA_NOMAPPING: 581 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 582 "No mapping"); 583 break; 584 case DDI_DMA_TOOBIG: 585 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 586 "Object is too big"); 587 break; 588 default: 589 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 590 "Unknown dma error"); 591 } 592 } 593 594 595 /* 596 * ehci_map_regs: 597 * 598 * The Host Controller (HC) contains a set of on-chip operational registers 599 * and which should be mapped into a non-cacheable portion of the system 600 * addressable space. 601 */ 602 int 603 ehci_map_regs(ehci_state_t *ehcip) 604 { 605 ddi_device_acc_attr_t attr; 606 uint16_t cmd_reg; 607 uint_t length; 608 609 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_map_regs:"); 610 611 /* Check to make sure we have memory access */ 612 if (pci_config_setup(ehcip->ehci_dip, 613 &ehcip->ehci_config_handle) != DDI_SUCCESS) { 614 615 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 616 "ehci_map_regs: Config error"); 617 618 return (DDI_FAILURE); 619 } 620 621 /* Make sure Memory Access Enable is set */ 622 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM); 623 624 if (!(cmd_reg & PCI_COMM_MAE)) { 625 626 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 627 "ehci_map_regs: Memory base address access disabled"); 628 629 return (DDI_FAILURE); 630 } 631 632 /* The host controller will be little endian */ 633 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 634 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 635 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 636 637 /* Map in EHCI Capability registers */ 638 if (ddi_regs_map_setup(ehcip->ehci_dip, 1, 639 (caddr_t *)&ehcip->ehci_capsp, 0, 640 sizeof (ehci_caps_t), &attr, 641 &ehcip->ehci_caps_handle) != DDI_SUCCESS) { 642 643 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 644 "ehci_map_regs: Map setup error"); 645 646 return (DDI_FAILURE); 647 } 648 649 length = ddi_get8(ehcip->ehci_caps_handle, 650 (uint8_t *)&ehcip->ehci_capsp->ehci_caps_length); 651 652 /* Free the original mapping */ 653 ddi_regs_map_free(&ehcip->ehci_caps_handle); 654 655 /* Re-map in EHCI Capability and Operational registers */ 656 if (ddi_regs_map_setup(ehcip->ehci_dip, 1, 657 (caddr_t *)&ehcip->ehci_capsp, 0, 658 length + sizeof (ehci_regs_t), &attr, 659 &ehcip->ehci_caps_handle) != DDI_SUCCESS) { 660 661 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 662 "ehci_map_regs: Map setup error"); 663 664 return (DDI_FAILURE); 665 } 666 667 /* Get the pointer to EHCI Operational Register */ 668 ehcip->ehci_regsp = (ehci_regs_t *) 669 ((uintptr_t)ehcip->ehci_capsp + length); 670 671 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 672 "ehci_map_regs: Capsp 0x%p Regsp 0x%p\n", 673 (void *)ehcip->ehci_capsp, (void *)ehcip->ehci_regsp); 674 675 return (DDI_SUCCESS); 676 } 677 678 /* 679 * The following simulated polling is for debugging purposes only. 680 * It is activated on x86 by setting usb-polling=true in GRUB or ehci.conf. 681 */ 682 static int 683 ehci_is_polled(dev_info_t *dip) 684 { 685 int ret; 686 char *propval; 687 688 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 689 "usb-polling", &propval) != DDI_SUCCESS) 690 691 return (0); 692 693 ret = (strcmp(propval, "true") == 0); 694 ddi_prop_free(propval); 695 696 return (ret); 697 } 698 699 static void 700 ehci_poll_intr(void *arg) 701 { 702 /* poll every msec */ 703 for (;;) { 704 (void) ehci_intr(arg, NULL); 705 delay(drv_usectohz(1000)); 706 } 707 } 708 709 /* 710 * ehci_register_intrs_and_init_mutex: 711 * 712 * Register interrupts and initialize each mutex and condition variables 713 */ 714 int 715 ehci_register_intrs_and_init_mutex(ehci_state_t *ehcip) 716 { 717 int intr_types; 718 719 #if defined(__x86) 720 uint8_t iline; 721 #endif 722 723 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 724 "ehci_register_intrs_and_init_mutex:"); 725 726 /* 727 * There is a known MSI hardware bug with the EHCI controller 728 * of ULI1575 southbridge. Hence MSI is disabled for this chip. 729 */ 730 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) && 731 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) { 732 ehcip->ehci_msi_enabled = B_FALSE; 733 } else { 734 /* Set the MSI enable flag from the global EHCI MSI tunable */ 735 ehcip->ehci_msi_enabled = ehci_enable_msi; 736 } 737 738 /* launch polling thread instead of enabling pci interrupt */ 739 if (ehci_is_polled(ehcip->ehci_dip)) { 740 extern pri_t maxclsyspri; 741 742 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 743 "ehci_register_intrs_and_init_mutex: " 744 "running in simulated polled mode"); 745 746 (void) thread_create(NULL, 0, ehci_poll_intr, ehcip, 0, &p0, 747 TS_RUN, maxclsyspri); 748 749 goto skip_intr; 750 } 751 752 #if defined(__x86) 753 /* 754 * Make sure that the interrupt pin is connected to the 755 * interrupt controller on x86. Interrupt line 255 means 756 * "unknown" or "not connected" (PCI spec 6.2.4, footnote 43). 757 * If we would return failure when interrupt line equals 255, then 758 * high speed devices will be routed to companion host controllers. 759 * However, it is not necessary to return failure here, and 760 * o/uhci codes don't check the interrupt line either. 761 * But it's good to log a message here for debug purposes. 762 */ 763 iline = pci_config_get8(ehcip->ehci_config_handle, 764 PCI_CONF_ILINE); 765 766 if (iline == 255) { 767 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 768 "ehci_register_intrs_and_init_mutex: " 769 "interrupt line value out of range (%d)", 770 iline); 771 } 772 #endif /* __x86 */ 773 774 /* Get supported interrupt types */ 775 if (ddi_intr_get_supported_types(ehcip->ehci_dip, 776 &intr_types) != DDI_SUCCESS) { 777 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 778 "ehci_register_intrs_and_init_mutex: " 779 "ddi_intr_get_supported_types failed"); 780 781 return (DDI_FAILURE); 782 } 783 784 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 785 "ehci_register_intrs_and_init_mutex: " 786 "supported interrupt types 0x%x", intr_types); 787 788 if ((intr_types & DDI_INTR_TYPE_MSI) && ehcip->ehci_msi_enabled) { 789 if (ehci_add_intrs(ehcip, DDI_INTR_TYPE_MSI) 790 != DDI_SUCCESS) { 791 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 792 "ehci_register_intrs_and_init_mutex: MSI " 793 "registration failed, trying FIXED interrupt \n"); 794 } else { 795 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 796 "ehci_register_intrs_and_init_mutex: " 797 "Using MSI interrupt type\n"); 798 799 ehcip->ehci_intr_type = DDI_INTR_TYPE_MSI; 800 ehcip->ehci_flags |= EHCI_INTR; 801 } 802 } 803 804 if ((!(ehcip->ehci_flags & EHCI_INTR)) && 805 (intr_types & DDI_INTR_TYPE_FIXED)) { 806 if (ehci_add_intrs(ehcip, DDI_INTR_TYPE_FIXED) 807 != DDI_SUCCESS) { 808 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 809 "ehci_register_intrs_and_init_mutex: " 810 "FIXED interrupt registration failed\n"); 811 812 return (DDI_FAILURE); 813 } 814 815 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 816 "ehci_register_intrs_and_init_mutex: " 817 "Using FIXED interrupt type\n"); 818 819 ehcip->ehci_intr_type = DDI_INTR_TYPE_FIXED; 820 ehcip->ehci_flags |= EHCI_INTR; 821 } 822 823 skip_intr: 824 /* Create prototype for advance on async schedule */ 825 cv_init(&ehcip->ehci_async_schedule_advance_cv, 826 NULL, CV_DRIVER, NULL); 827 828 return (DDI_SUCCESS); 829 } 830 831 832 /* 833 * ehci_add_intrs: 834 * 835 * Register FIXED or MSI interrupts. 836 */ 837 static int 838 ehci_add_intrs(ehci_state_t *ehcip, int intr_type) 839 { 840 int actual, avail, intr_size, count = 0; 841 int i, flag, ret; 842 843 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 844 "ehci_add_intrs: interrupt type 0x%x", intr_type); 845 846 /* Get number of interrupts */ 847 ret = ddi_intr_get_nintrs(ehcip->ehci_dip, intr_type, &count); 848 if ((ret != DDI_SUCCESS) || (count == 0)) { 849 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 850 "ehci_add_intrs: ddi_intr_get_nintrs() failure, " 851 "ret: %d, count: %d", ret, count); 852 853 return (DDI_FAILURE); 854 } 855 856 /* Get number of available interrupts */ 857 ret = ddi_intr_get_navail(ehcip->ehci_dip, intr_type, &avail); 858 if ((ret != DDI_SUCCESS) || (avail == 0)) { 859 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 860 "ehci_add_intrs: ddi_intr_get_navail() failure, " 861 "ret: %d, count: %d", ret, count); 862 863 return (DDI_FAILURE); 864 } 865 866 if (avail < count) { 867 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 868 "ehci_add_intrs: ehci_add_intrs: nintrs () " 869 "returned %d, navail returned %d\n", count, avail); 870 } 871 872 /* Allocate an array of interrupt handles */ 873 intr_size = count * sizeof (ddi_intr_handle_t); 874 ehcip->ehci_htable = kmem_zalloc(intr_size, KM_SLEEP); 875 876 flag = (intr_type == DDI_INTR_TYPE_MSI) ? 877 DDI_INTR_ALLOC_STRICT:DDI_INTR_ALLOC_NORMAL; 878 879 /* call ddi_intr_alloc() */ 880 ret = ddi_intr_alloc(ehcip->ehci_dip, ehcip->ehci_htable, 881 intr_type, 0, count, &actual, flag); 882 883 if ((ret != DDI_SUCCESS) || (actual == 0)) { 884 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 885 "ehci_add_intrs: ddi_intr_alloc() failed %d", ret); 886 887 kmem_free(ehcip->ehci_htable, intr_size); 888 889 return (DDI_FAILURE); 890 } 891 892 if (actual < count) { 893 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 894 "ehci_add_intrs: Requested: %d, Received: %d\n", 895 count, actual); 896 897 for (i = 0; i < actual; i++) 898 (void) ddi_intr_free(ehcip->ehci_htable[i]); 899 900 kmem_free(ehcip->ehci_htable, intr_size); 901 902 return (DDI_FAILURE); 903 } 904 905 ehcip->ehci_intr_cnt = actual; 906 907 if ((ret = ddi_intr_get_pri(ehcip->ehci_htable[0], 908 &ehcip->ehci_intr_pri)) != DDI_SUCCESS) { 909 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 910 "ehci_add_intrs: ddi_intr_get_pri() failed %d", ret); 911 912 for (i = 0; i < actual; i++) 913 (void) ddi_intr_free(ehcip->ehci_htable[i]); 914 915 kmem_free(ehcip->ehci_htable, intr_size); 916 917 return (DDI_FAILURE); 918 } 919 920 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 921 "ehci_add_intrs: Supported Interrupt priority 0x%x", 922 ehcip->ehci_intr_pri); 923 924 /* Test for high level mutex */ 925 if (ehcip->ehci_intr_pri >= ddi_intr_get_hilevel_pri()) { 926 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 927 "ehci_add_intrs: Hi level interrupt not supported"); 928 929 for (i = 0; i < actual; i++) 930 (void) ddi_intr_free(ehcip->ehci_htable[i]); 931 932 kmem_free(ehcip->ehci_htable, intr_size); 933 934 return (DDI_FAILURE); 935 } 936 937 /* Initialize the mutex */ 938 mutex_init(&ehcip->ehci_int_mutex, NULL, MUTEX_DRIVER, 939 DDI_INTR_PRI(ehcip->ehci_intr_pri)); 940 941 /* Call ddi_intr_add_handler() */ 942 for (i = 0; i < actual; i++) { 943 if ((ret = ddi_intr_add_handler(ehcip->ehci_htable[i], 944 ehci_intr, (caddr_t)ehcip, 945 (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 946 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 947 "ehci_add_intrs:ddi_intr_add_handler() " 948 "failed %d", ret); 949 950 for (i = 0; i < actual; i++) 951 (void) ddi_intr_free(ehcip->ehci_htable[i]); 952 953 mutex_destroy(&ehcip->ehci_int_mutex); 954 kmem_free(ehcip->ehci_htable, intr_size); 955 956 return (DDI_FAILURE); 957 } 958 } 959 960 if ((ret = ddi_intr_get_cap(ehcip->ehci_htable[0], 961 &ehcip->ehci_intr_cap)) != DDI_SUCCESS) { 962 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 963 "ehci_add_intrs: ddi_intr_get_cap() failed %d", ret); 964 965 for (i = 0; i < actual; i++) { 966 (void) ddi_intr_remove_handler(ehcip->ehci_htable[i]); 967 (void) ddi_intr_free(ehcip->ehci_htable[i]); 968 } 969 970 mutex_destroy(&ehcip->ehci_int_mutex); 971 kmem_free(ehcip->ehci_htable, intr_size); 972 973 return (DDI_FAILURE); 974 } 975 976 /* Enable all interrupts */ 977 if (ehcip->ehci_intr_cap & DDI_INTR_FLAG_BLOCK) { 978 /* Call ddi_intr_block_enable() for MSI interrupts */ 979 (void) ddi_intr_block_enable(ehcip->ehci_htable, 980 ehcip->ehci_intr_cnt); 981 } else { 982 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 983 for (i = 0; i < ehcip->ehci_intr_cnt; i++) 984 (void) ddi_intr_enable(ehcip->ehci_htable[i]); 985 } 986 987 return (DDI_SUCCESS); 988 } 989 990 991 /* 992 * ehci_init_hardware 993 * 994 * take control from BIOS, reset EHCI host controller, and check version, etc. 995 */ 996 int 997 ehci_init_hardware(ehci_state_t *ehcip) 998 { 999 int revision; 1000 uint16_t cmd_reg; 1001 int abort_on_BIOS_take_over_failure; 1002 1003 /* Take control from the BIOS */ 1004 if (ehci_take_control(ehcip) != USB_SUCCESS) { 1005 1006 /* read .conf file properties */ 1007 abort_on_BIOS_take_over_failure = 1008 ddi_prop_get_int(DDI_DEV_T_ANY, 1009 ehcip->ehci_dip, DDI_PROP_DONTPASS, 1010 "abort-on-BIOS-take-over-failure", 0); 1011 1012 if (abort_on_BIOS_take_over_failure) { 1013 1014 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1015 "Unable to take control from BIOS."); 1016 1017 return (DDI_FAILURE); 1018 } 1019 1020 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1021 "Unable to take control from BIOS. Failure is ignored."); 1022 } 1023 1024 /* set Memory Master Enable */ 1025 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM); 1026 cmd_reg |= (PCI_COMM_MAE | PCI_COMM_ME); 1027 pci_config_put16(ehcip->ehci_config_handle, PCI_CONF_COMM, cmd_reg); 1028 1029 /* Reset the EHCI host controller */ 1030 Set_OpReg(ehci_command, 1031 Get_OpReg(ehci_command) | EHCI_CMD_HOST_CTRL_RESET); 1032 1033 /* Wait 10ms for reset to complete */ 1034 drv_usecwait(EHCI_RESET_TIMEWAIT); 1035 1036 ASSERT(Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED); 1037 1038 /* Verify the version number */ 1039 revision = Get_16Cap(ehci_version); 1040 1041 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1042 "ehci_init_hardware: Revision 0x%x", revision); 1043 1044 /* 1045 * EHCI driver supports EHCI host controllers compliant to 1046 * 0.95 and higher revisions of EHCI specifications. 1047 */ 1048 if (revision < EHCI_REVISION_0_95) { 1049 1050 USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1051 "Revision 0x%x is not supported", revision); 1052 1053 return (DDI_FAILURE); 1054 } 1055 1056 if (ehcip->ehci_hc_soft_state == EHCI_CTLR_INIT_STATE) { 1057 1058 /* Initialize the Frame list base address area */ 1059 if (ehci_init_periodic_frame_lst_table(ehcip) != DDI_SUCCESS) { 1060 1061 return (DDI_FAILURE); 1062 } 1063 1064 /* 1065 * For performance reasons, do not insert anything into the 1066 * asynchronous list or activate the asynch list schedule until 1067 * there is a valid QH. 1068 */ 1069 ehcip->ehci_head_of_async_sched_list = NULL; 1070 1071 if ((ehcip->ehci_vendor_id == PCI_VENDOR_VIA) && 1072 (ehci_vt62x2_workaround & EHCI_VIA_ASYNC_SCHEDULE)) { 1073 /* 1074 * The driver is unable to reliably stop the asynch 1075 * list schedule on VIA VT6202 controllers, so we 1076 * always keep a dummy QH on the list. 1077 */ 1078 ehci_qh_t *dummy_async_qh = 1079 ehci_alloc_qh(ehcip, NULL, NULL); 1080 1081 Set_QH(dummy_async_qh->qh_link_ptr, 1082 ((ehci_qh_cpu_to_iommu(ehcip, dummy_async_qh) & 1083 EHCI_QH_LINK_PTR) | EHCI_QH_LINK_REF_QH)); 1084 1085 /* Set this QH to be the "head" of the circular list */ 1086 Set_QH(dummy_async_qh->qh_ctrl, 1087 Get_QH(dummy_async_qh->qh_ctrl) | 1088 EHCI_QH_CTRL_RECLAIM_HEAD); 1089 1090 Set_QH(dummy_async_qh->qh_next_qtd, 1091 EHCI_QH_NEXT_QTD_PTR_VALID); 1092 Set_QH(dummy_async_qh->qh_alt_next_qtd, 1093 EHCI_QH_ALT_NEXT_QTD_PTR_VALID); 1094 1095 ehcip->ehci_head_of_async_sched_list = dummy_async_qh; 1096 ehcip->ehci_open_async_count++; 1097 ehcip->ehci_async_req_count++; 1098 } 1099 } 1100 1101 return (DDI_SUCCESS); 1102 } 1103 1104 1105 /* 1106 * ehci_init_workaround 1107 * 1108 * some workarounds during initializing ehci 1109 */ 1110 int 1111 ehci_init_workaround(ehci_state_t *ehcip) 1112 { 1113 /* 1114 * Acer Labs Inc. M5273 EHCI controller does not send 1115 * interrupts unless the Root hub ports are routed to the EHCI 1116 * host controller; so route the ports now, before we test for 1117 * the presence of SOFs interrupts. 1118 */ 1119 if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) { 1120 /* Route all Root hub ports to EHCI host controller */ 1121 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI); 1122 } 1123 1124 /* 1125 * VIA chips have some issues and may not work reliably. 1126 * Revisions >= 0x80 are part of a southbridge and appear 1127 * to be reliable with the workaround. 1128 * For revisions < 0x80, if we were bound using class 1129 * complain, else proceed. This will allow the user to 1130 * bind ehci specifically to this chip and not have the 1131 * warnings 1132 */ 1133 if (ehcip->ehci_vendor_id == PCI_VENDOR_VIA) { 1134 1135 if (ehcip->ehci_rev_id >= PCI_VIA_REVISION_6212) { 1136 1137 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1138 "ehci_init_workaround: Applying VIA workarounds " 1139 "for the 6212 chip."); 1140 1141 } else if (strcmp(DEVI(ehcip->ehci_dip)->devi_binding_name, 1142 "pciclass,0c0320") == 0) { 1143 1144 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1145 "Due to recently discovered incompatibilities"); 1146 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1147 "with this USB controller, USB2.x transfer"); 1148 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1149 "support has been disabled. This device will"); 1150 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1151 "continue to function as a USB1.x controller."); 1152 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1153 "If you are interested in enabling USB2.x"); 1154 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1155 "support please, refer to the ehci(7D) man page."); 1156 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1157 "Please also refer to www.sun.com/io for"); 1158 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1159 "Solaris Ready products and to"); 1160 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1161 "www.sun.com/bigadmin/hcl for additional"); 1162 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1163 "compatible USB products."); 1164 1165 return (DDI_FAILURE); 1166 1167 } else if (ehci_vt62x2_workaround) { 1168 1169 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1170 "Applying VIA workarounds"); 1171 } 1172 } 1173 1174 return (DDI_SUCCESS); 1175 } 1176 1177 1178 /* 1179 * ehci_init_check_status 1180 * 1181 * Check if EHCI host controller is running 1182 */ 1183 int 1184 ehci_init_check_status(ehci_state_t *ehcip) 1185 { 1186 clock_t sof_time_wait; 1187 1188 /* 1189 * Get the number of clock ticks to wait. 1190 * This is based on the maximum time it takes for a frame list rollover 1191 * and maximum time wait for SOFs to begin. 1192 */ 1193 sof_time_wait = drv_usectohz((EHCI_NUM_PERIODIC_FRAME_LISTS * 1000) + 1194 EHCI_SOF_TIMEWAIT); 1195 1196 /* Tell the ISR to broadcast ehci_async_schedule_advance_cv */ 1197 ehcip->ehci_flags |= EHCI_CV_INTR; 1198 1199 /* We need to add a delay to allow the chip time to start running */ 1200 (void) cv_reltimedwait(&ehcip->ehci_async_schedule_advance_cv, 1201 &ehcip->ehci_int_mutex, sof_time_wait, TR_CLOCK_TICK); 1202 1203 /* 1204 * Check EHCI host controller is running, otherwise return failure. 1205 */ 1206 if ((ehcip->ehci_flags & EHCI_CV_INTR) || 1207 (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) { 1208 1209 USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1210 "No SOF interrupts have been received, this USB EHCI host" 1211 "controller is unusable"); 1212 1213 /* 1214 * Route all Root hub ports to Classic host 1215 * controller, in case this is an unusable ALI M5273 1216 * EHCI controller. 1217 */ 1218 if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) { 1219 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC); 1220 } 1221 1222 return (DDI_FAILURE); 1223 } 1224 1225 return (DDI_SUCCESS); 1226 } 1227 1228 1229 /* 1230 * ehci_init_ctlr: 1231 * 1232 * Initialize the Host Controller (HC). 1233 */ 1234 int 1235 ehci_init_ctlr(ehci_state_t *ehcip, int init_type) 1236 { 1237 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_init_ctlr:"); 1238 1239 if (init_type == EHCI_NORMAL_INITIALIZATION) { 1240 1241 if (ehci_init_hardware(ehcip) != DDI_SUCCESS) { 1242 1243 return (DDI_FAILURE); 1244 } 1245 } 1246 1247 /* 1248 * Check for Asynchronous schedule park capability feature. If this 1249 * feature is supported, then, program ehci command register with 1250 * appropriate values.. 1251 */ 1252 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_ASYNC_SCHED_PARK_CAP) { 1253 1254 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1255 "ehci_init_ctlr: Async park mode is supported"); 1256 1257 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) | 1258 (EHCI_CMD_ASYNC_PARK_ENABLE | 1259 EHCI_CMD_ASYNC_PARK_COUNT_3))); 1260 } 1261 1262 /* 1263 * Check for programmable periodic frame list feature. If this 1264 * feature is supported, then, program ehci command register with 1265 * 1024 frame list value. 1266 */ 1267 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_PROG_FRAME_LIST_FLAG) { 1268 1269 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1270 "ehci_init_ctlr: Variable programmable periodic " 1271 "frame list is supported"); 1272 1273 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) | 1274 EHCI_CMD_FRAME_1024_SIZE)); 1275 } 1276 1277 /* 1278 * Currently EHCI driver doesn't support 64 bit addressing. 1279 * 1280 * If we are using 64 bit addressing capability, then, program 1281 * ehci_ctrl_segment register with 4 Gigabyte segment where all 1282 * of the interface data structures are allocated. 1283 */ 1284 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_64BIT_ADDR_CAP) { 1285 1286 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1287 "ehci_init_ctlr: EHCI driver doesn't support " 1288 "64 bit addressing"); 1289 } 1290 1291 /* 64 bit addressing is not support */ 1292 Set_OpReg(ehci_ctrl_segment, 0x00000000); 1293 1294 /* Turn on/off the schedulers */ 1295 ehci_toggle_scheduler(ehcip); 1296 1297 /* Set host controller soft state to operational */ 1298 ehcip->ehci_hc_soft_state = EHCI_CTLR_OPERATIONAL_STATE; 1299 1300 /* 1301 * Set the Periodic Frame List Base Address register with the 1302 * starting physical address of the Periodic Frame List. 1303 */ 1304 Set_OpReg(ehci_periodic_list_base, 1305 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address & 1306 EHCI_PERIODIC_LIST_BASE)); 1307 1308 /* 1309 * Set ehci_interrupt to enable all interrupts except Root 1310 * Hub Status change interrupt. 1311 */ 1312 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR | 1313 EHCI_INTR_FRAME_LIST_ROLLOVER | EHCI_INTR_USB_ERROR | 1314 EHCI_INTR_USB); 1315 1316 /* 1317 * Set the desired interrupt threshold and turn on EHCI host controller. 1318 */ 1319 Set_OpReg(ehci_command, 1320 ((Get_OpReg(ehci_command) & ~EHCI_CMD_INTR_THRESHOLD) | 1321 (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN))); 1322 1323 ASSERT(Get_OpReg(ehci_command) & EHCI_CMD_HOST_CTRL_RUN); 1324 1325 if (init_type == EHCI_NORMAL_INITIALIZATION) { 1326 1327 if (ehci_init_workaround(ehcip) != DDI_SUCCESS) { 1328 1329 /* Set host controller soft state to error */ 1330 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE; 1331 1332 return (DDI_FAILURE); 1333 } 1334 1335 if (ehci_init_check_status(ehcip) != DDI_SUCCESS) { 1336 1337 /* Set host controller soft state to error */ 1338 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE; 1339 1340 return (DDI_FAILURE); 1341 } 1342 1343 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1344 "ehci_init_ctlr: SOF's have started"); 1345 } 1346 1347 /* Route all Root hub ports to EHCI host controller */ 1348 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI); 1349 1350 return (DDI_SUCCESS); 1351 } 1352 1353 /* 1354 * ehci_take_control: 1355 * 1356 * Handshake to take EHCI control from BIOS if necessary. Its only valid for 1357 * x86 machines, because sparc doesn't have a BIOS. 1358 * On x86 machine, the take control process includes 1359 * o get the base address of the extended capability list 1360 * o find out the capability for handoff synchronization in the list. 1361 * o check if BIOS has owned the host controller. 1362 * o set the OS Owned semaphore bit, ask the BIOS to release the ownership. 1363 * o wait for a constant time and check if BIOS has relinquished control. 1364 */ 1365 /* ARGSUSED */ 1366 static int 1367 ehci_take_control(ehci_state_t *ehcip) 1368 { 1369 #if defined(__x86) 1370 uint32_t extended_cap; 1371 uint32_t extended_cap_offset; 1372 uint32_t extended_cap_id; 1373 uint_t retry; 1374 1375 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1376 "ehci_take_control:"); 1377 1378 /* 1379 * According EHCI Spec 2.2.4, get EECP base address from HCCPARAMS 1380 * register. 1381 */ 1382 extended_cap_offset = (Get_Cap(ehci_hcc_params) & EHCI_HCC_EECP) >> 1383 EHCI_HCC_EECP_SHIFT; 1384 1385 /* 1386 * According EHCI Spec 2.2.4, if the extended capability offset is 1387 * less than 40h then its not valid. This means we don't need to 1388 * worry about BIOS handoff. 1389 */ 1390 if (extended_cap_offset < EHCI_HCC_EECP_MIN_OFFSET) { 1391 1392 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1393 "ehci_take_control: Hardware doesn't support legacy."); 1394 1395 goto success; 1396 } 1397 1398 /* 1399 * According EHCI Spec 2.1.7, A zero offset indicates the 1400 * end of the extended capability list. 1401 */ 1402 while (extended_cap_offset) { 1403 1404 /* Get the extended capability value. */ 1405 extended_cap = pci_config_get32(ehcip->ehci_config_handle, 1406 extended_cap_offset); 1407 1408 /* 1409 * It's possible that we'll receive an invalid PCI read here due 1410 * to something going wrong due to platform firmware. This has 1411 * been observed in the wild depending on the version of ACPI in 1412 * use. If this happens, we'll assume that the capability does 1413 * not exist and that we do not need to take control from the 1414 * BIOS. 1415 */ 1416 if (extended_cap == PCI_EINVAL32) { 1417 extended_cap_id = EHCI_EX_CAP_ID_RESERVED; 1418 break; 1419 } 1420 1421 /* Get the capability ID */ 1422 extended_cap_id = (extended_cap & EHCI_EX_CAP_ID) >> 1423 EHCI_EX_CAP_ID_SHIFT; 1424 1425 /* Check if the card support legacy */ 1426 if (extended_cap_id == EHCI_EX_CAP_ID_BIOS_HANDOFF) { 1427 break; 1428 } 1429 1430 /* Get the offset of the next capability */ 1431 extended_cap_offset = (extended_cap & EHCI_EX_CAP_NEXT_PTR) >> 1432 EHCI_EX_CAP_NEXT_PTR_SHIFT; 1433 1434 } 1435 1436 /* 1437 * Unable to find legacy support in hardware's extended capability list. 1438 * This means we don't need to worry about BIOS handoff. 1439 */ 1440 if (extended_cap_id != EHCI_EX_CAP_ID_BIOS_HANDOFF) { 1441 1442 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1443 "ehci_take_control: Hardware doesn't support legacy"); 1444 1445 goto success; 1446 } 1447 1448 /* Check if BIOS has owned it. */ 1449 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) { 1450 1451 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1452 "ehci_take_control: BIOS does not own EHCI"); 1453 1454 goto success; 1455 } 1456 1457 /* 1458 * According EHCI Spec 5.1, The OS driver initiates an ownership 1459 * request by setting the OS Owned semaphore to a one. The OS 1460 * waits for the BIOS Owned bit to go to a zero before attempting 1461 * to use the EHCI controller. The time that OS must wait for BIOS 1462 * to respond to the request for ownership is beyond the scope of 1463 * this specification. 1464 * It waits up to EHCI_TAKEOVER_WAIT_COUNT*EHCI_TAKEOVER_DELAY ms 1465 * for BIOS to release the ownership. 1466 */ 1467 extended_cap |= EHCI_LEGSUP_OS_OWNED_SEM; 1468 pci_config_put32(ehcip->ehci_config_handle, extended_cap_offset, 1469 extended_cap); 1470 1471 for (retry = 0; retry < EHCI_TAKEOVER_WAIT_COUNT; retry++) { 1472 1473 /* wait a special interval */ 1474 #ifndef __lock_lint 1475 delay(drv_usectohz(EHCI_TAKEOVER_DELAY)); 1476 #endif 1477 /* Check to see if the BIOS has released the ownership */ 1478 extended_cap = pci_config_get32( 1479 ehcip->ehci_config_handle, extended_cap_offset); 1480 1481 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) { 1482 1483 USB_DPRINTF_L3(PRINT_MASK_ATTA, 1484 ehcip->ehci_log_hdl, 1485 "ehci_take_control: BIOS has released " 1486 "the ownership. retry = %d", retry); 1487 1488 goto success; 1489 } 1490 1491 } 1492 1493 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1494 "ehci_take_control: take control from BIOS failed."); 1495 1496 return (USB_FAILURE); 1497 1498 success: 1499 1500 #endif /* __x86 */ 1501 return (USB_SUCCESS); 1502 } 1503 1504 1505 /* 1506 * ehci_init_periodic_frame_list_table : 1507 * 1508 * Allocate the system memory and initialize Host Controller 1509 * Periodic Frame List table area. The starting of the Periodic 1510 * Frame List Table area must be 4096 byte aligned. 1511 */ 1512 static int 1513 ehci_init_periodic_frame_lst_table(ehci_state_t *ehcip) 1514 { 1515 ddi_device_acc_attr_t dev_attr; 1516 size_t real_length; 1517 uint_t ccount; 1518 int result; 1519 1520 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 1521 1522 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1523 "ehci_init_periodic_frame_lst_table:"); 1524 1525 /* The host controller will be little endian */ 1526 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 1527 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 1528 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1529 1530 /* Force the required 4K restrictive alignment */ 1531 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_PFL_ALIGNMENT; 1532 1533 /* Create space for the Periodic Frame List */ 1534 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr, 1535 DDI_DMA_SLEEP, 0, &ehcip->ehci_pflt_dma_handle) != DDI_SUCCESS) { 1536 1537 goto failure; 1538 } 1539 1540 if (ddi_dma_mem_alloc(ehcip->ehci_pflt_dma_handle, 1541 sizeof (ehci_periodic_frame_list_t), 1542 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 1543 0, (caddr_t *)&ehcip->ehci_periodic_frame_list_tablep, 1544 &real_length, &ehcip->ehci_pflt_mem_handle)) { 1545 1546 goto failure; 1547 } 1548 1549 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1550 "ehci_init_periodic_frame_lst_table: " 1551 "Real length %lu", real_length); 1552 1553 /* Map the whole Periodic Frame List into the I/O address space */ 1554 result = ddi_dma_addr_bind_handle(ehcip->ehci_pflt_dma_handle, 1555 NULL, (caddr_t)ehcip->ehci_periodic_frame_list_tablep, 1556 real_length, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1557 DDI_DMA_SLEEP, NULL, &ehcip->ehci_pflt_cookie, &ccount); 1558 1559 if (result == DDI_DMA_MAPPED) { 1560 /* The cookie count should be 1 */ 1561 if (ccount != 1) { 1562 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1563 "ehci_init_periodic_frame_lst_table: " 1564 "More than 1 cookie"); 1565 1566 goto failure; 1567 } 1568 } else { 1569 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result); 1570 1571 goto failure; 1572 } 1573 1574 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1575 "ehci_init_periodic_frame_lst_table: virtual 0x%p physical 0x%x", 1576 (void *)ehcip->ehci_periodic_frame_list_tablep, 1577 ehcip->ehci_pflt_cookie.dmac_address); 1578 1579 /* 1580 * DMA addresses for Periodic Frame List are bound. 1581 */ 1582 ehcip->ehci_dma_addr_bind_flag |= EHCI_PFLT_DMA_BOUND; 1583 1584 bzero((void *)ehcip->ehci_periodic_frame_list_tablep, real_length); 1585 1586 /* Initialize the Periodic Frame List */ 1587 ehci_build_interrupt_lattice(ehcip); 1588 1589 /* Reset Byte Alignment to Default */ 1590 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 1591 1592 return (DDI_SUCCESS); 1593 failure: 1594 /* Byte alignment */ 1595 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 1596 1597 return (DDI_FAILURE); 1598 } 1599 1600 1601 /* 1602 * ehci_build_interrupt_lattice: 1603 * 1604 * Construct the interrupt lattice tree using static Endpoint Descriptors 1605 * (QH). This interrupt lattice tree will have total of 32 interrupt QH 1606 * lists and the Host Controller (HC) processes one interrupt QH list in 1607 * every frame. The Host Controller traverses the periodic schedule by 1608 * constructing an array offset reference from the Periodic List Base Address 1609 * register and bits 12 to 3 of Frame Index register. It fetches the element 1610 * and begins traversing the graph of linked schedule data structures. 1611 */ 1612 static void 1613 ehci_build_interrupt_lattice(ehci_state_t *ehcip) 1614 { 1615 ehci_qh_t *list_array = ehcip->ehci_qh_pool_addr; 1616 ushort_t ehci_index[EHCI_NUM_PERIODIC_FRAME_LISTS]; 1617 ehci_periodic_frame_list_t *periodic_frame_list = 1618 ehcip->ehci_periodic_frame_list_tablep; 1619 ushort_t *temp, num_of_nodes; 1620 uintptr_t addr; 1621 int i, j, k; 1622 1623 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1624 "ehci_build_interrupt_lattice:"); 1625 1626 /* 1627 * Reserve the first 63 Endpoint Descriptor (QH) structures 1628 * in the pool as static endpoints & these are required for 1629 * constructing interrupt lattice tree. 1630 */ 1631 for (i = 0; i < EHCI_NUM_STATIC_NODES; i++) { 1632 Set_QH(list_array[i].qh_state, EHCI_QH_STATIC); 1633 Set_QH(list_array[i].qh_status, EHCI_QH_STS_HALTED); 1634 Set_QH(list_array[i].qh_next_qtd, EHCI_QH_NEXT_QTD_PTR_VALID); 1635 Set_QH(list_array[i].qh_alt_next_qtd, 1636 EHCI_QH_ALT_NEXT_QTD_PTR_VALID); 1637 } 1638 1639 /* 1640 * Make sure that last Endpoint on the periodic frame list terminates 1641 * periodic schedule. 1642 */ 1643 Set_QH(list_array[0].qh_link_ptr, EHCI_QH_LINK_PTR_VALID); 1644 1645 /* Build the interrupt lattice tree */ 1646 for (i = 0; i < (EHCI_NUM_STATIC_NODES / 2); i++) { 1647 /* 1648 * The next pointer in the host controller endpoint 1649 * descriptor must contain an iommu address. Calculate 1650 * the offset into the cpu address and add this to the 1651 * starting iommu address. 1652 */ 1653 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *)&list_array[i]); 1654 1655 Set_QH(list_array[2*i + 1].qh_link_ptr, 1656 addr | EHCI_QH_LINK_REF_QH); 1657 Set_QH(list_array[2*i + 2].qh_link_ptr, 1658 addr | EHCI_QH_LINK_REF_QH); 1659 } 1660 1661 /* Build the tree bottom */ 1662 temp = (unsigned short *) 1663 kmem_zalloc(EHCI_NUM_PERIODIC_FRAME_LISTS * 2, KM_SLEEP); 1664 1665 num_of_nodes = 1; 1666 1667 /* 1668 * Initialize the values which are used for setting up head pointers 1669 * for the 32ms scheduling lists which starts from the Periodic Frame 1670 * List. 1671 */ 1672 for (i = 0; i < ehci_log_2(EHCI_NUM_PERIODIC_FRAME_LISTS); i++) { 1673 for (j = 0, k = 0; k < num_of_nodes; k++, j++) { 1674 ehci_index[j++] = temp[k]; 1675 ehci_index[j] = temp[k] + ehci_pow_2(i); 1676 } 1677 1678 num_of_nodes *= 2; 1679 for (k = 0; k < num_of_nodes; k++) 1680 temp[k] = ehci_index[k]; 1681 } 1682 1683 kmem_free((void *)temp, (EHCI_NUM_PERIODIC_FRAME_LISTS * 2)); 1684 1685 /* 1686 * Initialize the interrupt list in the Periodic Frame List Table 1687 * so that it points to the bottom of the tree. 1688 */ 1689 for (i = 0, j = 0; i < ehci_pow_2(TREE_HEIGHT); i++) { 1690 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *) 1691 (&list_array[((EHCI_NUM_STATIC_NODES + 1) / 2) + i - 1])); 1692 1693 ASSERT(addr); 1694 1695 for (k = 0; k < ehci_pow_2(TREE_HEIGHT); k++) { 1696 Set_PFLT(periodic_frame_list-> 1697 ehci_periodic_frame_list_table[ehci_index[j++]], 1698 (uint32_t)(addr | EHCI_QH_LINK_REF_QH)); 1699 } 1700 } 1701 } 1702 1703 1704 /* 1705 * ehci_alloc_hcdi_ops: 1706 * 1707 * The HCDI interfaces or entry points are the software interfaces used by 1708 * the Universal Serial Bus Driver (USBA) to access the services of the 1709 * Host Controller Driver (HCD). During HCD initialization, inform USBA 1710 * about all available HCDI interfaces or entry points. 1711 */ 1712 usba_hcdi_ops_t * 1713 ehci_alloc_hcdi_ops(ehci_state_t *ehcip) 1714 { 1715 usba_hcdi_ops_t *usba_hcdi_ops; 1716 1717 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1718 "ehci_alloc_hcdi_ops:"); 1719 1720 usba_hcdi_ops = usba_alloc_hcdi_ops(); 1721 1722 usba_hcdi_ops->usba_hcdi_ops_version = HCDI_OPS_VERSION; 1723 1724 usba_hcdi_ops->usba_hcdi_pm_support = ehci_hcdi_pm_support; 1725 usba_hcdi_ops->usba_hcdi_pipe_open = ehci_hcdi_pipe_open; 1726 usba_hcdi_ops->usba_hcdi_pipe_close = ehci_hcdi_pipe_close; 1727 1728 usba_hcdi_ops->usba_hcdi_pipe_reset = ehci_hcdi_pipe_reset; 1729 usba_hcdi_ops->usba_hcdi_pipe_reset_data_toggle = 1730 ehci_hcdi_pipe_reset_data_toggle; 1731 1732 usba_hcdi_ops->usba_hcdi_pipe_ctrl_xfer = ehci_hcdi_pipe_ctrl_xfer; 1733 usba_hcdi_ops->usba_hcdi_pipe_bulk_xfer = ehci_hcdi_pipe_bulk_xfer; 1734 usba_hcdi_ops->usba_hcdi_pipe_intr_xfer = ehci_hcdi_pipe_intr_xfer; 1735 usba_hcdi_ops->usba_hcdi_pipe_isoc_xfer = ehci_hcdi_pipe_isoc_xfer; 1736 1737 usba_hcdi_ops->usba_hcdi_bulk_transfer_size = 1738 ehci_hcdi_bulk_transfer_size; 1739 1740 usba_hcdi_ops->usba_hcdi_pipe_stop_intr_polling = 1741 ehci_hcdi_pipe_stop_intr_polling; 1742 usba_hcdi_ops->usba_hcdi_pipe_stop_isoc_polling = 1743 ehci_hcdi_pipe_stop_isoc_polling; 1744 1745 usba_hcdi_ops->usba_hcdi_get_current_frame_number = 1746 ehci_hcdi_get_current_frame_number; 1747 usba_hcdi_ops->usba_hcdi_get_max_isoc_pkts = 1748 ehci_hcdi_get_max_isoc_pkts; 1749 1750 usba_hcdi_ops->usba_hcdi_console_input_init = 1751 ehci_hcdi_polled_input_init; 1752 usba_hcdi_ops->usba_hcdi_console_input_enter = 1753 ehci_hcdi_polled_input_enter; 1754 usba_hcdi_ops->usba_hcdi_console_read = 1755 ehci_hcdi_polled_read; 1756 usba_hcdi_ops->usba_hcdi_console_input_exit = 1757 ehci_hcdi_polled_input_exit; 1758 usba_hcdi_ops->usba_hcdi_console_input_fini = 1759 ehci_hcdi_polled_input_fini; 1760 1761 usba_hcdi_ops->usba_hcdi_console_output_init = 1762 ehci_hcdi_polled_output_init; 1763 usba_hcdi_ops->usba_hcdi_console_output_enter = 1764 ehci_hcdi_polled_output_enter; 1765 usba_hcdi_ops->usba_hcdi_console_write = 1766 ehci_hcdi_polled_write; 1767 usba_hcdi_ops->usba_hcdi_console_output_exit = 1768 ehci_hcdi_polled_output_exit; 1769 usba_hcdi_ops->usba_hcdi_console_output_fini = 1770 ehci_hcdi_polled_output_fini; 1771 return (usba_hcdi_ops); 1772 } 1773 1774 1775 /* 1776 * Host Controller Driver (HCD) deinitialization functions 1777 */ 1778 1779 /* 1780 * ehci_cleanup: 1781 * 1782 * Cleanup on attach failure or detach 1783 */ 1784 int 1785 ehci_cleanup(ehci_state_t *ehcip) 1786 { 1787 ehci_trans_wrapper_t *tw; 1788 ehci_pipe_private_t *pp; 1789 ehci_qtd_t *qtd; 1790 int i, ctrl, rval; 1791 int flags = ehcip->ehci_flags; 1792 1793 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_cleanup:"); 1794 1795 if (flags & EHCI_RHREG) { 1796 /* Unload the root hub driver */ 1797 if (ehci_unload_root_hub_driver(ehcip) != USB_SUCCESS) { 1798 1799 return (DDI_FAILURE); 1800 } 1801 } 1802 1803 if (flags & EHCI_USBAREG) { 1804 /* Unregister this HCD instance with USBA */ 1805 usba_hcdi_unregister(ehcip->ehci_dip); 1806 } 1807 1808 if (flags & EHCI_INTR) { 1809 1810 mutex_enter(&ehcip->ehci_int_mutex); 1811 1812 /* Disable all EHCI QH list processing */ 1813 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) & 1814 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | 1815 EHCI_CMD_PERIODIC_SCHED_ENABLE))); 1816 1817 /* Disable all EHCI interrupts */ 1818 Set_OpReg(ehci_interrupt, 0); 1819 1820 /* wait for the next SOF */ 1821 (void) ehci_wait_for_sof(ehcip); 1822 1823 /* Route all Root hub ports to Classic host controller */ 1824 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC); 1825 1826 /* Stop the EHCI host controller */ 1827 Set_OpReg(ehci_command, 1828 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN); 1829 1830 mutex_exit(&ehcip->ehci_int_mutex); 1831 1832 /* Wait for sometime */ 1833 delay(drv_usectohz(EHCI_TIMEWAIT)); 1834 1835 ehci_rem_intrs(ehcip); 1836 } 1837 1838 /* Unmap the EHCI registers */ 1839 if (ehcip->ehci_caps_handle) { 1840 ddi_regs_map_free(&ehcip->ehci_caps_handle); 1841 } 1842 1843 if (ehcip->ehci_config_handle) { 1844 pci_config_teardown(&ehcip->ehci_config_handle); 1845 } 1846 1847 /* Free all the buffers */ 1848 if (ehcip->ehci_qtd_pool_addr && ehcip->ehci_qtd_pool_mem_handle) { 1849 for (i = 0; i < ehci_qtd_pool_size; i ++) { 1850 qtd = &ehcip->ehci_qtd_pool_addr[i]; 1851 ctrl = Get_QTD(ehcip-> 1852 ehci_qtd_pool_addr[i].qtd_state); 1853 1854 if ((ctrl != EHCI_QTD_FREE) && 1855 (ctrl != EHCI_QTD_DUMMY) && 1856 (qtd->qtd_trans_wrapper)) { 1857 1858 mutex_enter(&ehcip->ehci_int_mutex); 1859 1860 tw = (ehci_trans_wrapper_t *) 1861 EHCI_LOOKUP_ID((uint32_t) 1862 Get_QTD(qtd->qtd_trans_wrapper)); 1863 1864 /* Obtain the pipe private structure */ 1865 pp = tw->tw_pipe_private; 1866 1867 /* Stop the the transfer timer */ 1868 ehci_stop_xfer_timer(ehcip, tw, 1869 EHCI_REMOVE_XFER_ALWAYS); 1870 1871 ehci_deallocate_tw(ehcip, pp, tw); 1872 1873 mutex_exit(&ehcip->ehci_int_mutex); 1874 } 1875 } 1876 1877 /* 1878 * If EHCI_QTD_POOL_BOUND flag is set, then unbind 1879 * the handle for QTD pools. 1880 */ 1881 if ((ehcip->ehci_dma_addr_bind_flag & 1882 EHCI_QTD_POOL_BOUND) == EHCI_QTD_POOL_BOUND) { 1883 1884 rval = ddi_dma_unbind_handle( 1885 ehcip->ehci_qtd_pool_dma_handle); 1886 1887 ASSERT(rval == DDI_SUCCESS); 1888 } 1889 ddi_dma_mem_free(&ehcip->ehci_qtd_pool_mem_handle); 1890 } 1891 1892 /* Free the QTD pool */ 1893 if (ehcip->ehci_qtd_pool_dma_handle) { 1894 ddi_dma_free_handle(&ehcip->ehci_qtd_pool_dma_handle); 1895 } 1896 1897 if (ehcip->ehci_qh_pool_addr && ehcip->ehci_qh_pool_mem_handle) { 1898 /* 1899 * If EHCI_QH_POOL_BOUND flag is set, then unbind 1900 * the handle for QH pools. 1901 */ 1902 if ((ehcip->ehci_dma_addr_bind_flag & 1903 EHCI_QH_POOL_BOUND) == EHCI_QH_POOL_BOUND) { 1904 1905 rval = ddi_dma_unbind_handle( 1906 ehcip->ehci_qh_pool_dma_handle); 1907 1908 ASSERT(rval == DDI_SUCCESS); 1909 } 1910 1911 ddi_dma_mem_free(&ehcip->ehci_qh_pool_mem_handle); 1912 } 1913 1914 /* Free the QH pool */ 1915 if (ehcip->ehci_qh_pool_dma_handle) { 1916 ddi_dma_free_handle(&ehcip->ehci_qh_pool_dma_handle); 1917 } 1918 1919 /* Free the Periodic frame list table (PFLT) area */ 1920 if (ehcip->ehci_periodic_frame_list_tablep && 1921 ehcip->ehci_pflt_mem_handle) { 1922 /* 1923 * If EHCI_PFLT_DMA_BOUND flag is set, then unbind 1924 * the handle for PFLT. 1925 */ 1926 if ((ehcip->ehci_dma_addr_bind_flag & 1927 EHCI_PFLT_DMA_BOUND) == EHCI_PFLT_DMA_BOUND) { 1928 1929 rval = ddi_dma_unbind_handle( 1930 ehcip->ehci_pflt_dma_handle); 1931 1932 ASSERT(rval == DDI_SUCCESS); 1933 } 1934 1935 ddi_dma_mem_free(&ehcip->ehci_pflt_mem_handle); 1936 } 1937 1938 (void) ehci_isoc_cleanup(ehcip); 1939 1940 if (ehcip->ehci_pflt_dma_handle) { 1941 ddi_dma_free_handle(&ehcip->ehci_pflt_dma_handle); 1942 } 1943 1944 if (flags & EHCI_INTR) { 1945 /* Destroy the mutex */ 1946 mutex_destroy(&ehcip->ehci_int_mutex); 1947 1948 /* Destroy the async schedule advance condition variable */ 1949 cv_destroy(&ehcip->ehci_async_schedule_advance_cv); 1950 } 1951 1952 /* clean up kstat structs */ 1953 ehci_destroy_stats(ehcip); 1954 1955 /* Free ehci hcdi ops */ 1956 if (ehcip->ehci_hcdi_ops) { 1957 usba_free_hcdi_ops(ehcip->ehci_hcdi_ops); 1958 } 1959 1960 if (flags & EHCI_ZALLOC) { 1961 1962 usb_free_log_hdl(ehcip->ehci_log_hdl); 1963 1964 /* Remove all properties that might have been created */ 1965 ddi_prop_remove_all(ehcip->ehci_dip); 1966 1967 /* Free the soft state */ 1968 ddi_soft_state_free(ehci_statep, 1969 ddi_get_instance(ehcip->ehci_dip)); 1970 } 1971 1972 return (DDI_SUCCESS); 1973 } 1974 1975 1976 /* 1977 * ehci_rem_intrs: 1978 * 1979 * Unregister FIXED or MSI interrupts 1980 */ 1981 static void 1982 ehci_rem_intrs(ehci_state_t *ehcip) 1983 { 1984 int i; 1985 1986 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1987 "ehci_rem_intrs: interrupt type 0x%x", ehcip->ehci_intr_type); 1988 1989 /* Disable all interrupts */ 1990 if (ehcip->ehci_intr_cap & DDI_INTR_FLAG_BLOCK) { 1991 (void) ddi_intr_block_disable(ehcip->ehci_htable, 1992 ehcip->ehci_intr_cnt); 1993 } else { 1994 for (i = 0; i < ehcip->ehci_intr_cnt; i++) { 1995 (void) ddi_intr_disable(ehcip->ehci_htable[i]); 1996 } 1997 } 1998 1999 /* Call ddi_intr_remove_handler() */ 2000 for (i = 0; i < ehcip->ehci_intr_cnt; i++) { 2001 (void) ddi_intr_remove_handler(ehcip->ehci_htable[i]); 2002 (void) ddi_intr_free(ehcip->ehci_htable[i]); 2003 } 2004 2005 kmem_free(ehcip->ehci_htable, 2006 ehcip->ehci_intr_cnt * sizeof (ddi_intr_handle_t)); 2007 } 2008 2009 2010 /* 2011 * ehci_cpr_suspend 2012 */ 2013 int 2014 ehci_cpr_suspend(ehci_state_t *ehcip) 2015 { 2016 int i; 2017 2018 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 2019 "ehci_cpr_suspend:"); 2020 2021 /* Call into the root hub and suspend it */ 2022 if (usba_hubdi_detach(ehcip->ehci_dip, DDI_SUSPEND) != DDI_SUCCESS) { 2023 2024 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 2025 "ehci_cpr_suspend: root hub fails to suspend"); 2026 2027 return (DDI_FAILURE); 2028 } 2029 2030 /* Only root hub's intr pipe should be open at this time */ 2031 mutex_enter(&ehcip->ehci_int_mutex); 2032 2033 ASSERT(ehcip->ehci_open_pipe_count == 0); 2034 2035 /* Just wait till all resources are reclaimed */ 2036 i = 0; 2037 while ((ehcip->ehci_reclaim_list != NULL) && (i++ < 3)) { 2038 ehci_handle_endpoint_reclaimation(ehcip); 2039 (void) ehci_wait_for_sof(ehcip); 2040 } 2041 ASSERT(ehcip->ehci_reclaim_list == NULL); 2042 2043 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 2044 "ehci_cpr_suspend: Disable HC QH list processing"); 2045 2046 /* Disable all EHCI QH list processing */ 2047 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) & 2048 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE))); 2049 2050 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 2051 "ehci_cpr_suspend: Disable HC interrupts"); 2052 2053 /* Disable all EHCI interrupts */ 2054 Set_OpReg(ehci_interrupt, 0); 2055 2056 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 2057 "ehci_cpr_suspend: Wait for the next SOF"); 2058 2059 /* Wait for the next SOF */ 2060 if (ehci_wait_for_sof(ehcip) != USB_SUCCESS) { 2061 2062 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 2063 "ehci_cpr_suspend: ehci host controller suspend failed"); 2064 2065 mutex_exit(&ehcip->ehci_int_mutex); 2066 return (DDI_FAILURE); 2067 } 2068 2069 /* 2070 * Stop the ehci host controller 2071 * if usb keyboard is not connected. 2072 */ 2073 if (ehcip->ehci_polled_kbd_count == 0 || force_ehci_off != 0) { 2074 Set_OpReg(ehci_command, 2075 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN); 2076 2077 } 2078 2079 /* Set host controller soft state to suspend */ 2080 ehcip->ehci_hc_soft_state = EHCI_CTLR_SUSPEND_STATE; 2081 2082 mutex_exit(&ehcip->ehci_int_mutex); 2083 2084 return (DDI_SUCCESS); 2085 } 2086 2087 2088 /* 2089 * ehci_cpr_resume 2090 */ 2091 int 2092 ehci_cpr_resume(ehci_state_t *ehcip) 2093 { 2094 mutex_enter(&ehcip->ehci_int_mutex); 2095 2096 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 2097 "ehci_cpr_resume: Restart the controller"); 2098 2099 /* Cleanup ehci specific information across cpr */ 2100 ehci_cpr_cleanup(ehcip); 2101 2102 /* Restart the controller */ 2103 if (ehci_init_ctlr(ehcip, EHCI_NORMAL_INITIALIZATION) != DDI_SUCCESS) { 2104 2105 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 2106 "ehci_cpr_resume: ehci host controller resume failed "); 2107 2108 mutex_exit(&ehcip->ehci_int_mutex); 2109 2110 return (DDI_FAILURE); 2111 } 2112 2113 mutex_exit(&ehcip->ehci_int_mutex); 2114 2115 /* Now resume the root hub */ 2116 if (usba_hubdi_attach(ehcip->ehci_dip, DDI_RESUME) != DDI_SUCCESS) { 2117 2118 return (DDI_FAILURE); 2119 } 2120 2121 return (DDI_SUCCESS); 2122 } 2123 2124 2125 /* 2126 * Bandwidth Allocation functions 2127 */ 2128 2129 /* 2130 * ehci_allocate_bandwidth: 2131 * 2132 * Figure out whether or not this interval may be supported. Return the index 2133 * into the lattice if it can be supported. Return allocation failure if it 2134 * can not be supported. 2135 */ 2136 int 2137 ehci_allocate_bandwidth( 2138 ehci_state_t *ehcip, 2139 usba_pipe_handle_data_t *ph, 2140 uint_t *pnode, 2141 uchar_t *smask, 2142 uchar_t *cmask) 2143 { 2144 int error = USB_SUCCESS; 2145 2146 /* This routine is protected by the ehci_int_mutex */ 2147 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2148 2149 /* Reset the pnode to the last checked pnode */ 2150 *pnode = 0; 2151 2152 /* Allocate high speed bandwidth */ 2153 if ((error = ehci_allocate_high_speed_bandwidth(ehcip, 2154 ph, pnode, smask, cmask)) != USB_SUCCESS) { 2155 2156 return (error); 2157 } 2158 2159 /* 2160 * For low/full speed usb devices, allocate classic TT bandwidth 2161 * in additional to high speed bandwidth. 2162 */ 2163 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) { 2164 2165 /* Allocate classic TT bandwidth */ 2166 if ((error = ehci_allocate_classic_tt_bandwidth( 2167 ehcip, ph, *pnode)) != USB_SUCCESS) { 2168 2169 /* Deallocate high speed bandwidth */ 2170 ehci_deallocate_high_speed_bandwidth( 2171 ehcip, ph, *pnode, *smask, *cmask); 2172 } 2173 } 2174 2175 return (error); 2176 } 2177 2178 2179 /* 2180 * ehci_allocate_high_speed_bandwidth: 2181 * 2182 * Allocate high speed bandwidth for the low/full/high speed interrupt and 2183 * isochronous endpoints. 2184 */ 2185 static int 2186 ehci_allocate_high_speed_bandwidth( 2187 ehci_state_t *ehcip, 2188 usba_pipe_handle_data_t *ph, 2189 uint_t *pnode, 2190 uchar_t *smask, 2191 uchar_t *cmask) 2192 { 2193 uint_t sbandwidth, cbandwidth; 2194 int interval; 2195 usb_ep_descr_t *endpoint = &ph->p_ep; 2196 usba_device_t *child_ud; 2197 usb_port_status_t port_status; 2198 int error; 2199 2200 /* This routine is protected by the ehci_int_mutex */ 2201 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2202 2203 /* Get child's usba device structure */ 2204 child_ud = ph->p_usba_device; 2205 2206 mutex_enter(&child_ud->usb_mutex); 2207 2208 /* Get the current usb device's port status */ 2209 port_status = ph->p_usba_device->usb_port_status; 2210 2211 mutex_exit(&child_ud->usb_mutex); 2212 2213 /* 2214 * Calculate the length in bytes of a transaction on this 2215 * periodic endpoint. Return failure if maximum packet is 2216 * zero. 2217 */ 2218 error = ehci_compute_high_speed_bandwidth(ehcip, endpoint, 2219 port_status, &sbandwidth, &cbandwidth); 2220 if (error != USB_SUCCESS) { 2221 2222 return (error); 2223 } 2224 2225 /* 2226 * Adjust polling interval to be a power of 2. 2227 * If this interval can't be supported, return 2228 * allocation failure. 2229 */ 2230 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status); 2231 if (interval == USB_FAILURE) { 2232 2233 return (USB_FAILURE); 2234 } 2235 2236 if (port_status == USBA_HIGH_SPEED_DEV) { 2237 /* Allocate bandwidth for high speed devices */ 2238 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) == 2239 USB_EP_ATTR_ISOCH) { 2240 error = USB_SUCCESS; 2241 } else { 2242 2243 error = ehci_find_bestfit_hs_mask(ehcip, smask, pnode, 2244 endpoint, sbandwidth, interval); 2245 } 2246 2247 *cmask = 0x00; 2248 2249 } else { 2250 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) == 2251 USB_EP_ATTR_INTR) { 2252 2253 /* Allocate bandwidth for low speed interrupt */ 2254 error = ehci_find_bestfit_ls_intr_mask(ehcip, 2255 smask, cmask, pnode, sbandwidth, cbandwidth, 2256 interval); 2257 } else { 2258 if ((endpoint->bEndpointAddress & 2259 USB_EP_DIR_MASK) == USB_EP_DIR_IN) { 2260 2261 /* Allocate bandwidth for sitd in */ 2262 error = ehci_find_bestfit_sitd_in_mask(ehcip, 2263 smask, cmask, pnode, sbandwidth, cbandwidth, 2264 interval); 2265 } else { 2266 2267 /* Allocate bandwidth for sitd out */ 2268 error = ehci_find_bestfit_sitd_out_mask(ehcip, 2269 smask, pnode, sbandwidth, interval); 2270 *cmask = 0x00; 2271 } 2272 } 2273 } 2274 2275 if (error != USB_SUCCESS) { 2276 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2277 "ehci_allocate_high_speed_bandwidth: Reached maximum " 2278 "bandwidth value and cannot allocate bandwidth for a " 2279 "given high-speed periodic endpoint"); 2280 2281 return (USB_NO_BANDWIDTH); 2282 } 2283 2284 return (error); 2285 } 2286 2287 2288 /* 2289 * ehci_allocate_classic_tt_speed_bandwidth: 2290 * 2291 * Allocate classic TT bandwidth for the low/full speed interrupt and 2292 * isochronous endpoints. 2293 */ 2294 static int 2295 ehci_allocate_classic_tt_bandwidth( 2296 ehci_state_t *ehcip, 2297 usba_pipe_handle_data_t *ph, 2298 uint_t pnode) 2299 { 2300 uint_t bandwidth, min; 2301 uint_t height, leftmost, list; 2302 usb_ep_descr_t *endpoint = &ph->p_ep; 2303 usba_device_t *child_ud, *parent_ud; 2304 usb_port_status_t port_status; 2305 int i, interval; 2306 2307 /* This routine is protected by the ehci_int_mutex */ 2308 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2309 2310 /* Get child's usba device structure */ 2311 child_ud = ph->p_usba_device; 2312 2313 mutex_enter(&child_ud->usb_mutex); 2314 2315 /* Get the current usb device's port status */ 2316 port_status = child_ud->usb_port_status; 2317 2318 /* Get the parent high speed hub's usba device structure */ 2319 parent_ud = child_ud->usb_hs_hub_usba_dev; 2320 2321 mutex_exit(&child_ud->usb_mutex); 2322 2323 USB_DPRINTF_L3(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2324 "ehci_allocate_classic_tt_bandwidth: " 2325 "child_ud 0x%p parent_ud 0x%p", 2326 (void *)child_ud, (void *)parent_ud); 2327 2328 /* 2329 * Calculate the length in bytes of a transaction on this 2330 * periodic endpoint. Return failure if maximum packet is 2331 * zero. 2332 */ 2333 if (ehci_compute_classic_bandwidth(endpoint, 2334 port_status, &bandwidth) != USB_SUCCESS) { 2335 2336 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2337 "ehci_allocate_classic_tt_bandwidth: Periodic endpoint " 2338 "with zero endpoint maximum packet size is not supported"); 2339 2340 return (USB_NOT_SUPPORTED); 2341 } 2342 2343 USB_DPRINTF_L3(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2344 "ehci_allocate_classic_tt_bandwidth: bandwidth %d", bandwidth); 2345 2346 mutex_enter(&parent_ud->usb_mutex); 2347 2348 /* 2349 * If the length in bytes plus the allocated bandwidth exceeds 2350 * the maximum, return bandwidth allocation failure. 2351 */ 2352 if ((parent_ud->usb_hs_hub_min_bandwidth + bandwidth) > 2353 FS_PERIODIC_BANDWIDTH) { 2354 2355 mutex_exit(&parent_ud->usb_mutex); 2356 2357 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2358 "ehci_allocate_classic_tt_bandwidth: Reached maximum " 2359 "bandwidth value and cannot allocate bandwidth for a " 2360 "given low/full speed periodic endpoint"); 2361 2362 return (USB_NO_BANDWIDTH); 2363 } 2364 2365 mutex_exit(&parent_ud->usb_mutex); 2366 2367 /* Adjust polling interval to be a power of 2 */ 2368 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status); 2369 2370 /* Find the height in the tree */ 2371 height = ehci_lattice_height(interval); 2372 2373 /* Find the leftmost leaf in the subtree specified by the node. */ 2374 leftmost = ehci_leftmost_leaf(pnode, height); 2375 2376 mutex_enter(&parent_ud->usb_mutex); 2377 2378 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) { 2379 list = ehci_index[leftmost + i]; 2380 2381 if ((parent_ud->usb_hs_hub_bandwidth[list] + 2382 bandwidth) > FS_PERIODIC_BANDWIDTH) { 2383 2384 mutex_exit(&parent_ud->usb_mutex); 2385 2386 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2387 "ehci_allocate_classic_tt_bandwidth: Reached " 2388 "maximum bandwidth value and cannot allocate " 2389 "bandwidth for low/full periodic endpoint"); 2390 2391 return (USB_NO_BANDWIDTH); 2392 } 2393 } 2394 2395 /* 2396 * All the leaves for this node must be updated with the bandwidth. 2397 */ 2398 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) { 2399 list = ehci_index[leftmost + i]; 2400 parent_ud->usb_hs_hub_bandwidth[list] += bandwidth; 2401 } 2402 2403 /* Find the leaf with the smallest allocated bandwidth */ 2404 min = parent_ud->usb_hs_hub_bandwidth[0]; 2405 2406 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) { 2407 if (parent_ud->usb_hs_hub_bandwidth[i] < min) { 2408 min = parent_ud->usb_hs_hub_bandwidth[i]; 2409 } 2410 } 2411 2412 /* Save the minimum for later use */ 2413 parent_ud->usb_hs_hub_min_bandwidth = min; 2414 2415 mutex_exit(&parent_ud->usb_mutex); 2416 2417 return (USB_SUCCESS); 2418 } 2419 2420 2421 /* 2422 * ehci_deallocate_bandwidth: 2423 * 2424 * Deallocate bandwidth for the given node in the lattice and the length 2425 * of transfer. 2426 */ 2427 void 2428 ehci_deallocate_bandwidth( 2429 ehci_state_t *ehcip, 2430 usba_pipe_handle_data_t *ph, 2431 uint_t pnode, 2432 uchar_t smask, 2433 uchar_t cmask) 2434 { 2435 /* This routine is protected by the ehci_int_mutex */ 2436 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2437 2438 ehci_deallocate_high_speed_bandwidth(ehcip, ph, pnode, smask, cmask); 2439 2440 /* 2441 * For low/full speed usb devices, deallocate classic TT bandwidth 2442 * in additional to high speed bandwidth. 2443 */ 2444 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) { 2445 2446 /* Deallocate classic TT bandwidth */ 2447 ehci_deallocate_classic_tt_bandwidth(ehcip, ph, pnode); 2448 } 2449 } 2450 2451 2452 /* 2453 * ehci_deallocate_high_speed_bandwidth: 2454 * 2455 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint. 2456 */ 2457 static void 2458 ehci_deallocate_high_speed_bandwidth( 2459 ehci_state_t *ehcip, 2460 usba_pipe_handle_data_t *ph, 2461 uint_t pnode, 2462 uchar_t smask, 2463 uchar_t cmask) 2464 { 2465 uint_t height, leftmost; 2466 uint_t list_count; 2467 uint_t sbandwidth, cbandwidth; 2468 int interval; 2469 usb_ep_descr_t *endpoint = &ph->p_ep; 2470 usba_device_t *child_ud; 2471 usb_port_status_t port_status; 2472 2473 /* This routine is protected by the ehci_int_mutex */ 2474 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2475 2476 /* Get child's usba device structure */ 2477 child_ud = ph->p_usba_device; 2478 2479 mutex_enter(&child_ud->usb_mutex); 2480 2481 /* Get the current usb device's port status */ 2482 port_status = ph->p_usba_device->usb_port_status; 2483 2484 mutex_exit(&child_ud->usb_mutex); 2485 2486 (void) ehci_compute_high_speed_bandwidth(ehcip, endpoint, 2487 port_status, &sbandwidth, &cbandwidth); 2488 2489 /* Adjust polling interval to be a power of 2 */ 2490 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status); 2491 2492 /* Find the height in the tree */ 2493 height = ehci_lattice_height(interval); 2494 2495 /* 2496 * Find the leftmost leaf in the subtree specified by the node 2497 */ 2498 leftmost = ehci_leftmost_leaf(pnode, height); 2499 2500 list_count = EHCI_NUM_INTR_QH_LISTS/interval; 2501 2502 /* Delete the bandwidth from the appropriate lists */ 2503 if (port_status == USBA_HIGH_SPEED_DEV) { 2504 2505 ehci_update_bw_availability(ehcip, -sbandwidth, 2506 leftmost, list_count, smask); 2507 } else { 2508 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) == 2509 USB_EP_ATTR_INTR) { 2510 2511 ehci_update_bw_availability(ehcip, -sbandwidth, 2512 leftmost, list_count, smask); 2513 ehci_update_bw_availability(ehcip, -cbandwidth, 2514 leftmost, list_count, cmask); 2515 } else { 2516 if ((endpoint->bEndpointAddress & 2517 USB_EP_DIR_MASK) == USB_EP_DIR_IN) { 2518 2519 ehci_update_bw_availability(ehcip, -sbandwidth, 2520 leftmost, list_count, smask); 2521 ehci_update_bw_availability(ehcip, 2522 -MAX_UFRAME_SITD_XFER, leftmost, 2523 list_count, cmask); 2524 } else { 2525 2526 ehci_update_bw_availability(ehcip, 2527 -MAX_UFRAME_SITD_XFER, leftmost, 2528 list_count, smask); 2529 } 2530 } 2531 } 2532 } 2533 2534 /* 2535 * ehci_deallocate_classic_tt_bandwidth: 2536 * 2537 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint. 2538 */ 2539 static void 2540 ehci_deallocate_classic_tt_bandwidth( 2541 ehci_state_t *ehcip, 2542 usba_pipe_handle_data_t *ph, 2543 uint_t pnode) 2544 { 2545 uint_t bandwidth, height, leftmost, list, min; 2546 int i, interval; 2547 usb_ep_descr_t *endpoint = &ph->p_ep; 2548 usba_device_t *child_ud, *parent_ud; 2549 usb_port_status_t port_status; 2550 2551 /* This routine is protected by the ehci_int_mutex */ 2552 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2553 2554 /* Get child's usba device structure */ 2555 child_ud = ph->p_usba_device; 2556 2557 mutex_enter(&child_ud->usb_mutex); 2558 2559 /* Get the current usb device's port status */ 2560 port_status = child_ud->usb_port_status; 2561 2562 /* Get the parent high speed hub's usba device structure */ 2563 parent_ud = child_ud->usb_hs_hub_usba_dev; 2564 2565 mutex_exit(&child_ud->usb_mutex); 2566 2567 /* Obtain the bandwidth */ 2568 (void) ehci_compute_classic_bandwidth(endpoint, 2569 port_status, &bandwidth); 2570 2571 /* Adjust polling interval to be a power of 2 */ 2572 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status); 2573 2574 /* Find the height in the tree */ 2575 height = ehci_lattice_height(interval); 2576 2577 /* Find the leftmost leaf in the subtree specified by the node */ 2578 leftmost = ehci_leftmost_leaf(pnode, height); 2579 2580 mutex_enter(&parent_ud->usb_mutex); 2581 2582 /* Delete the bandwidth from the appropriate lists */ 2583 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) { 2584 list = ehci_index[leftmost + i]; 2585 parent_ud->usb_hs_hub_bandwidth[list] -= bandwidth; 2586 } 2587 2588 /* Find the leaf with the smallest allocated bandwidth */ 2589 min = parent_ud->usb_hs_hub_bandwidth[0]; 2590 2591 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) { 2592 if (parent_ud->usb_hs_hub_bandwidth[i] < min) { 2593 min = parent_ud->usb_hs_hub_bandwidth[i]; 2594 } 2595 } 2596 2597 /* Save the minimum for later use */ 2598 parent_ud->usb_hs_hub_min_bandwidth = min; 2599 2600 mutex_exit(&parent_ud->usb_mutex); 2601 } 2602 2603 2604 /* 2605 * ehci_compute_high_speed_bandwidth: 2606 * 2607 * Given a periodic endpoint (interrupt or isochronous) determine the total 2608 * bandwidth for one transaction. The EHCI host controller traverses the 2609 * endpoint descriptor lists on a first-come-first-serve basis. When the HC 2610 * services an endpoint, only a single transaction attempt is made. The HC 2611 * moves to the next Endpoint Descriptor after the first transaction attempt 2612 * rather than finishing the entire Transfer Descriptor. Therefore, when a 2613 * Transfer Descriptor is inserted into the lattice, we will only count the 2614 * number of bytes for one transaction. 2615 * 2616 * The following are the formulas used for calculating bandwidth in terms 2617 * bytes and it is for the single USB high speed transaction. The protocol 2618 * overheads will be different for each of type of USB transfer & all these 2619 * formulas & protocol overheads are derived from the 5.11.3 section of the 2620 * USB 2.0 Specification. 2621 * 2622 * High-Speed: 2623 * Protocol overhead + ((MaxPktSz * 7)/6) + Host_Delay 2624 * 2625 * Split Transaction: (Low/Full speed devices connected behind usb2.0 hub) 2626 * 2627 * Protocol overhead + Split transaction overhead + 2628 * ((MaxPktSz * 7)/6) + Host_Delay; 2629 */ 2630 /* ARGSUSED */ 2631 static int 2632 ehci_compute_high_speed_bandwidth( 2633 ehci_state_t *ehcip, 2634 usb_ep_descr_t *endpoint, 2635 usb_port_status_t port_status, 2636 uint_t *sbandwidth, 2637 uint_t *cbandwidth) 2638 { 2639 ushort_t maxpacketsize = endpoint->wMaxPacketSize; 2640 2641 /* Return failure if endpoint maximum packet is zero */ 2642 if (maxpacketsize == 0) { 2643 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2644 "ehci_allocate_high_speed_bandwidth: Periodic endpoint " 2645 "with zero endpoint maximum packet size is not supported"); 2646 2647 return (USB_NOT_SUPPORTED); 2648 } 2649 2650 /* Add bit-stuffing overhead */ 2651 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6); 2652 2653 /* Add Host Controller specific delay to required bandwidth */ 2654 *sbandwidth = EHCI_HOST_CONTROLLER_DELAY; 2655 2656 /* Add xfer specific protocol overheads */ 2657 if ((endpoint->bmAttributes & 2658 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) { 2659 /* High speed interrupt transaction */ 2660 *sbandwidth += HS_NON_ISOC_PROTO_OVERHEAD; 2661 } else { 2662 /* Isochronous transaction */ 2663 *sbandwidth += HS_ISOC_PROTO_OVERHEAD; 2664 } 2665 2666 /* 2667 * For low/full speed devices, add split transaction specific 2668 * overheads. 2669 */ 2670 if (port_status != USBA_HIGH_SPEED_DEV) { 2671 /* 2672 * Add start and complete split transaction 2673 * tokens overheads. 2674 */ 2675 *cbandwidth = *sbandwidth + COMPLETE_SPLIT_OVERHEAD; 2676 *sbandwidth += START_SPLIT_OVERHEAD; 2677 2678 /* Add data overhead depending on data direction */ 2679 if ((endpoint->bEndpointAddress & 2680 USB_EP_DIR_MASK) == USB_EP_DIR_IN) { 2681 *cbandwidth += maxpacketsize; 2682 } else { 2683 if ((endpoint->bmAttributes & 2684 USB_EP_ATTR_MASK) == USB_EP_ATTR_ISOCH) { 2685 /* There is no compete splits for out */ 2686 *cbandwidth = 0; 2687 } 2688 *sbandwidth += maxpacketsize; 2689 } 2690 } else { 2691 uint_t xactions; 2692 2693 /* Get the max transactions per microframe */ 2694 xactions = ((maxpacketsize & USB_EP_MAX_XACTS_MASK) >> 2695 USB_EP_MAX_XACTS_SHIFT) + 1; 2696 2697 /* High speed transaction */ 2698 *sbandwidth += maxpacketsize; 2699 2700 /* Calculate bandwidth per micro-frame */ 2701 *sbandwidth *= xactions; 2702 2703 *cbandwidth = 0; 2704 } 2705 2706 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2707 "ehci_allocate_high_speed_bandwidth: " 2708 "Start split bandwidth %d Complete split bandwidth %d", 2709 *sbandwidth, *cbandwidth); 2710 2711 return (USB_SUCCESS); 2712 } 2713 2714 2715 /* 2716 * ehci_compute_classic_bandwidth: 2717 * 2718 * Given a periodic endpoint (interrupt or isochronous) determine the total 2719 * bandwidth for one transaction. The EHCI host controller traverses the 2720 * endpoint descriptor lists on a first-come-first-serve basis. When the HC 2721 * services an endpoint, only a single transaction attempt is made. The HC 2722 * moves to the next Endpoint Descriptor after the first transaction attempt 2723 * rather than finishing the entire Transfer Descriptor. Therefore, when a 2724 * Transfer Descriptor is inserted into the lattice, we will only count the 2725 * number of bytes for one transaction. 2726 * 2727 * The following are the formulas used for calculating bandwidth in terms 2728 * bytes and it is for the single USB high speed transaction. The protocol 2729 * overheads will be different for each of type of USB transfer & all these 2730 * formulas & protocol overheads are derived from the 5.11.3 section of the 2731 * USB 2.0 Specification. 2732 * 2733 * Low-Speed: 2734 * Protocol overhead + Hub LS overhead + 2735 * (Low Speed clock * ((MaxPktSz * 7)/6)) + TT_Delay 2736 * 2737 * Full-Speed: 2738 * Protocol overhead + ((MaxPktSz * 7)/6) + TT_Delay 2739 */ 2740 /* ARGSUSED */ 2741 static int 2742 ehci_compute_classic_bandwidth( 2743 usb_ep_descr_t *endpoint, 2744 usb_port_status_t port_status, 2745 uint_t *bandwidth) 2746 { 2747 ushort_t maxpacketsize = endpoint->wMaxPacketSize; 2748 2749 /* 2750 * If endpoint maximum packet is zero, then return immediately. 2751 */ 2752 if (maxpacketsize == 0) { 2753 2754 return (USB_NOT_SUPPORTED); 2755 } 2756 2757 /* Add TT delay to required bandwidth */ 2758 *bandwidth = TT_DELAY; 2759 2760 /* Add bit-stuffing overhead */ 2761 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6); 2762 2763 switch (port_status) { 2764 case USBA_LOW_SPEED_DEV: 2765 /* Low speed interrupt transaction */ 2766 *bandwidth += (LOW_SPEED_PROTO_OVERHEAD + 2767 HUB_LOW_SPEED_PROTO_OVERHEAD + 2768 (LOW_SPEED_CLOCK * maxpacketsize)); 2769 break; 2770 case USBA_FULL_SPEED_DEV: 2771 /* Full speed transaction */ 2772 *bandwidth += maxpacketsize; 2773 2774 /* Add xfer specific protocol overheads */ 2775 if ((endpoint->bmAttributes & 2776 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) { 2777 /* Full speed interrupt transaction */ 2778 *bandwidth += FS_NON_ISOC_PROTO_OVERHEAD; 2779 } else { 2780 /* Isochronous and input transaction */ 2781 if ((endpoint->bEndpointAddress & 2782 USB_EP_DIR_MASK) == USB_EP_DIR_IN) { 2783 *bandwidth += FS_ISOC_INPUT_PROTO_OVERHEAD; 2784 } else { 2785 /* Isochronous and output transaction */ 2786 *bandwidth += FS_ISOC_OUTPUT_PROTO_OVERHEAD; 2787 } 2788 } 2789 break; 2790 } 2791 2792 return (USB_SUCCESS); 2793 } 2794 2795 2796 /* 2797 * ehci_adjust_polling_interval: 2798 * 2799 * Adjust bandwidth according usb device speed. 2800 */ 2801 /* ARGSUSED */ 2802 int 2803 ehci_adjust_polling_interval( 2804 ehci_state_t *ehcip, 2805 usb_ep_descr_t *endpoint, 2806 usb_port_status_t port_status) 2807 { 2808 uint_t interval; 2809 int i = 0; 2810 2811 /* Get the polling interval */ 2812 interval = endpoint->bInterval; 2813 2814 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2815 "ehci_adjust_polling_interval: Polling interval 0x%x", interval); 2816 2817 /* 2818 * According USB 2.0 Specifications, a high-speed endpoint's 2819 * polling intervals are specified interms of 125us or micro 2820 * frame, where as full/low endpoint's polling intervals are 2821 * specified in milliseconds. 2822 * 2823 * A high speed interrupt/isochronous endpoints can specify 2824 * desired polling interval between 1 to 16 micro-frames, 2825 * where as full/low endpoints can specify between 1 to 255 2826 * milliseconds. 2827 */ 2828 switch (port_status) { 2829 case USBA_LOW_SPEED_DEV: 2830 /* 2831 * Low speed endpoints are limited to specifying 2832 * only 8ms to 255ms in this driver. If a device 2833 * reports a polling interval that is less than 8ms, 2834 * it will use 8 ms instead. 2835 */ 2836 if (interval < LS_MIN_POLL_INTERVAL) { 2837 2838 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2839 "Low speed endpoint's poll interval of %d ms " 2840 "is below threshold. Rounding up to %d ms", 2841 interval, LS_MIN_POLL_INTERVAL); 2842 2843 interval = LS_MIN_POLL_INTERVAL; 2844 } 2845 2846 /* 2847 * Return an error if the polling interval is greater 2848 * than 255ms. 2849 */ 2850 if (interval > LS_MAX_POLL_INTERVAL) { 2851 2852 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2853 "Low speed endpoint's poll interval is " 2854 "greater than %d ms", LS_MAX_POLL_INTERVAL); 2855 2856 return (USB_FAILURE); 2857 } 2858 break; 2859 2860 case USBA_FULL_SPEED_DEV: 2861 /* 2862 * Return an error if the polling interval is less 2863 * than 1ms and greater than 255ms. 2864 */ 2865 if ((interval < FS_MIN_POLL_INTERVAL) && 2866 (interval > FS_MAX_POLL_INTERVAL)) { 2867 2868 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2869 "Full speed endpoint's poll interval must " 2870 "be between %d and %d ms", FS_MIN_POLL_INTERVAL, 2871 FS_MAX_POLL_INTERVAL); 2872 2873 return (USB_FAILURE); 2874 } 2875 break; 2876 case USBA_HIGH_SPEED_DEV: 2877 /* 2878 * Return an error if the polling interval is less 1 2879 * and greater than 16. Convert this value to 125us 2880 * units using 2^(bInterval -1). refer usb 2.0 spec 2881 * page 51 for details. 2882 */ 2883 if ((interval < HS_MIN_POLL_INTERVAL) && 2884 (interval > HS_MAX_POLL_INTERVAL)) { 2885 2886 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2887 "High speed endpoint's poll interval " 2888 "must be between %d and %d units", 2889 HS_MIN_POLL_INTERVAL, HS_MAX_POLL_INTERVAL); 2890 2891 return (USB_FAILURE); 2892 } 2893 2894 /* Adjust high speed device polling interval */ 2895 interval = 2896 ehci_adjust_high_speed_polling_interval(ehcip, endpoint); 2897 2898 break; 2899 } 2900 2901 /* 2902 * If polling interval is greater than 32ms, 2903 * adjust polling interval equal to 32ms. 2904 */ 2905 if (interval > EHCI_NUM_INTR_QH_LISTS) { 2906 interval = EHCI_NUM_INTR_QH_LISTS; 2907 } 2908 2909 /* 2910 * Find the nearest power of 2 that's less 2911 * than interval. 2912 */ 2913 while ((ehci_pow_2(i)) <= interval) { 2914 i++; 2915 } 2916 2917 return (ehci_pow_2((i - 1))); 2918 } 2919 2920 2921 /* 2922 * ehci_adjust_high_speed_polling_interval: 2923 */ 2924 /* ARGSUSED */ 2925 static int 2926 ehci_adjust_high_speed_polling_interval( 2927 ehci_state_t *ehcip, 2928 usb_ep_descr_t *endpoint) 2929 { 2930 uint_t interval; 2931 2932 /* Get the polling interval */ 2933 interval = ehci_pow_2(endpoint->bInterval - 1); 2934 2935 /* 2936 * Convert polling interval from micro seconds 2937 * to milli seconds. 2938 */ 2939 if (interval <= EHCI_MAX_UFRAMES) { 2940 interval = 1; 2941 } else { 2942 interval = interval/EHCI_MAX_UFRAMES; 2943 } 2944 2945 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2946 "ehci_adjust_high_speed_polling_interval: " 2947 "High speed adjusted interval 0x%x", interval); 2948 2949 return (interval); 2950 } 2951 2952 2953 /* 2954 * ehci_lattice_height: 2955 * 2956 * Given the requested bandwidth, find the height in the tree at which the 2957 * nodes for this bandwidth fall. The height is measured as the number of 2958 * nodes from the leaf to the level specified by bandwidth The root of the 2959 * tree is at height TREE_HEIGHT. 2960 */ 2961 static uint_t 2962 ehci_lattice_height(uint_t interval) 2963 { 2964 return (TREE_HEIGHT - (ehci_log_2(interval))); 2965 } 2966 2967 2968 /* 2969 * ehci_lattice_parent: 2970 * 2971 * Given a node in the lattice, find the index of the parent node 2972 */ 2973 static uint_t 2974 ehci_lattice_parent(uint_t node) 2975 { 2976 if ((node % 2) == 0) { 2977 2978 return ((node/2) - 1); 2979 } else { 2980 2981 return ((node + 1)/2 - 1); 2982 } 2983 } 2984 2985 2986 /* 2987 * ehci_find_periodic_node: 2988 * 2989 * Based on the "real" array leaf node and interval, get the periodic node. 2990 */ 2991 static uint_t 2992 ehci_find_periodic_node(uint_t leaf, int interval) 2993 { 2994 uint_t lattice_leaf; 2995 uint_t height = ehci_lattice_height(interval); 2996 uint_t pnode; 2997 int i; 2998 2999 /* Get the leaf number in the lattice */ 3000 lattice_leaf = leaf + EHCI_NUM_INTR_QH_LISTS - 1; 3001 3002 /* Get the node in the lattice based on the height and leaf */ 3003 pnode = lattice_leaf; 3004 for (i = 0; i < height; i++) { 3005 pnode = ehci_lattice_parent(pnode); 3006 } 3007 3008 return (pnode); 3009 } 3010 3011 3012 /* 3013 * ehci_leftmost_leaf: 3014 * 3015 * Find the leftmost leaf in the subtree specified by the node. Height refers 3016 * to number of nodes from the bottom of the tree to the node, including the 3017 * node. 3018 * 3019 * The formula for a zero based tree is: 3020 * 2^H * Node + 2^H - 1 3021 * The leaf of the tree is an array, convert the number for the array. 3022 * Subtract the size of nodes not in the array 3023 * 2^H * Node + 2^H - 1 - (EHCI_NUM_INTR_QH_LISTS - 1) = 3024 * 2^H * Node + 2^H - EHCI_NUM_INTR_QH_LISTS = 3025 * 2^H * (Node + 1) - EHCI_NUM_INTR_QH_LISTS 3026 * 0 3027 * 1 2 3028 * 0 1 2 3 3029 */ 3030 static uint_t 3031 ehci_leftmost_leaf( 3032 uint_t node, 3033 uint_t height) 3034 { 3035 return ((ehci_pow_2(height) * (node + 1)) - EHCI_NUM_INTR_QH_LISTS); 3036 } 3037 3038 3039 /* 3040 * ehci_pow_2: 3041 * 3042 * Compute 2 to the power 3043 */ 3044 static uint_t 3045 ehci_pow_2(uint_t x) 3046 { 3047 if (x == 0) { 3048 3049 return (1); 3050 } else { 3051 3052 return (2 << (x - 1)); 3053 } 3054 } 3055 3056 3057 /* 3058 * ehci_log_2: 3059 * 3060 * Compute log base 2 of x 3061 */ 3062 static uint_t 3063 ehci_log_2(uint_t x) 3064 { 3065 int i = 0; 3066 3067 while (x != 1) { 3068 x = x >> 1; 3069 i++; 3070 } 3071 3072 return (i); 3073 } 3074 3075 3076 /* 3077 * ehci_find_bestfit_hs_mask: 3078 * 3079 * Find the smask and cmask in the bandwidth allocation, and update the 3080 * bandwidth allocation. 3081 */ 3082 static int 3083 ehci_find_bestfit_hs_mask( 3084 ehci_state_t *ehcip, 3085 uchar_t *smask, 3086 uint_t *pnode, 3087 usb_ep_descr_t *endpoint, 3088 uint_t bandwidth, 3089 int interval) 3090 { 3091 int i; 3092 uint_t elements, index; 3093 int array_leaf, best_array_leaf; 3094 uint_t node_bandwidth, best_node_bandwidth; 3095 uint_t leaf_count; 3096 uchar_t bw_mask; 3097 uchar_t best_smask; 3098 3099 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 3100 "ehci_find_bestfit_hs_mask: "); 3101 3102 /* Get all the valid smasks */ 3103 switch (ehci_pow_2(endpoint->bInterval - 1)) { 3104 case EHCI_INTR_1US_POLL: 3105 index = EHCI_1US_MASK_INDEX; 3106 elements = EHCI_INTR_1US_POLL; 3107 break; 3108 case EHCI_INTR_2US_POLL: 3109 index = EHCI_2US_MASK_INDEX; 3110 elements = EHCI_INTR_2US_POLL; 3111 break; 3112 case EHCI_INTR_4US_POLL: 3113 index = EHCI_4US_MASK_INDEX; 3114 elements = EHCI_INTR_4US_POLL; 3115 break; 3116 case EHCI_INTR_XUS_POLL: 3117 default: 3118 index = EHCI_XUS_MASK_INDEX; 3119 elements = EHCI_INTR_XUS_POLL; 3120 break; 3121 } 3122 3123 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval; 3124 3125 /* 3126 * Because of the way the leaves are setup, we will automatically 3127 * hit the leftmost leaf of every possible node with this interval. 3128 */ 3129 best_smask = 0x00; 3130 best_node_bandwidth = 0; 3131 for (array_leaf = 0; array_leaf < interval; array_leaf++) { 3132 /* Find the bandwidth mask */ 3133 node_bandwidth = ehci_calculate_bw_availability_mask(ehcip, 3134 bandwidth, ehci_index[array_leaf], leaf_count, &bw_mask); 3135 3136 /* 3137 * If this node cannot support our requirements skip to the 3138 * next leaf. 3139 */ 3140 if (bw_mask == 0x00) { 3141 continue; 3142 } 3143 3144 /* 3145 * Now make sure our bandwidth requirements can be 3146 * satisfied with one of smasks in this node. 3147 */ 3148 *smask = 0x00; 3149 for (i = index; i < (index + elements); i++) { 3150 /* Check the start split mask value */ 3151 if (ehci_start_split_mask[index] & bw_mask) { 3152 *smask = ehci_start_split_mask[index]; 3153 break; 3154 } 3155 } 3156 3157 /* 3158 * If an appropriate smask is found save the information if: 3159 * o best_smask has not been found yet. 3160 * - or - 3161 * o This is the node with the least amount of bandwidth 3162 */ 3163 if ((*smask != 0x00) && 3164 ((best_smask == 0x00) || 3165 (best_node_bandwidth > node_bandwidth))) { 3166 3167 best_node_bandwidth = node_bandwidth; 3168 best_array_leaf = array_leaf; 3169 best_smask = *smask; 3170 } 3171 } 3172 3173 /* 3174 * If we find node that can handle the bandwidth populate the 3175 * appropriate variables and return success. 3176 */ 3177 if (best_smask) { 3178 *smask = best_smask; 3179 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf], 3180 interval); 3181 ehci_update_bw_availability(ehcip, bandwidth, 3182 ehci_index[best_array_leaf], leaf_count, best_smask); 3183 3184 return (USB_SUCCESS); 3185 } 3186 3187 return (USB_FAILURE); 3188 } 3189 3190 3191 /* 3192 * ehci_find_bestfit_ls_intr_mask: 3193 * 3194 * Find the smask and cmask in the bandwidth allocation. 3195 */ 3196 static int 3197 ehci_find_bestfit_ls_intr_mask( 3198 ehci_state_t *ehcip, 3199 uchar_t *smask, 3200 uchar_t *cmask, 3201 uint_t *pnode, 3202 uint_t sbandwidth, 3203 uint_t cbandwidth, 3204 int interval) 3205 { 3206 int i; 3207 uint_t elements, index; 3208 int array_leaf, best_array_leaf; 3209 uint_t node_sbandwidth, node_cbandwidth; 3210 uint_t best_node_bandwidth; 3211 uint_t leaf_count; 3212 uchar_t bw_smask, bw_cmask; 3213 uchar_t best_smask, best_cmask; 3214 3215 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 3216 "ehci_find_bestfit_ls_intr_mask: "); 3217 3218 /* For low and full speed devices */ 3219 index = EHCI_XUS_MASK_INDEX; 3220 elements = EHCI_INTR_4MS_POLL; 3221 3222 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval; 3223 3224 /* 3225 * Because of the way the leaves are setup, we will automatically 3226 * hit the leftmost leaf of every possible node with this interval. 3227 */ 3228 best_smask = 0x00; 3229 best_node_bandwidth = 0; 3230 for (array_leaf = 0; array_leaf < interval; array_leaf++) { 3231 /* Find the bandwidth mask */ 3232 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip, 3233 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask); 3234 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip, 3235 cbandwidth, ehci_index[array_leaf], leaf_count, &bw_cmask); 3236 3237 /* 3238 * If this node cannot support our requirements skip to the 3239 * next leaf. 3240 */ 3241 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) { 3242 continue; 3243 } 3244 3245 /* 3246 * Now make sure our bandwidth requirements can be 3247 * satisfied with one of smasks in this node. 3248 */ 3249 *smask = 0x00; 3250 *cmask = 0x00; 3251 for (i = index; i < (index + elements); i++) { 3252 /* Check the start split mask value */ 3253 if ((ehci_start_split_mask[index] & bw_smask) && 3254 (ehci_intr_complete_split_mask[index] & bw_cmask)) { 3255 *smask = ehci_start_split_mask[index]; 3256 *cmask = ehci_intr_complete_split_mask[index]; 3257 break; 3258 } 3259 } 3260 3261 /* 3262 * If an appropriate smask is found save the information if: 3263 * o best_smask has not been found yet. 3264 * - or - 3265 * o This is the node with the least amount of bandwidth 3266 */ 3267 if ((*smask != 0x00) && 3268 ((best_smask == 0x00) || 3269 (best_node_bandwidth > 3270 (node_sbandwidth + node_cbandwidth)))) { 3271 best_node_bandwidth = node_sbandwidth + node_cbandwidth; 3272 best_array_leaf = array_leaf; 3273 best_smask = *smask; 3274 best_cmask = *cmask; 3275 } 3276 } 3277 3278 /* 3279 * If we find node that can handle the bandwidth populate the 3280 * appropriate variables and return success. 3281 */ 3282 if (best_smask) { 3283 *smask = best_smask; 3284 *cmask = best_cmask; 3285 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf], 3286 interval); 3287 ehci_update_bw_availability(ehcip, sbandwidth, 3288 ehci_index[best_array_leaf], leaf_count, best_smask); 3289 ehci_update_bw_availability(ehcip, cbandwidth, 3290 ehci_index[best_array_leaf], leaf_count, best_cmask); 3291 3292 return (USB_SUCCESS); 3293 } 3294 3295 return (USB_FAILURE); 3296 } 3297 3298 3299 /* 3300 * ehci_find_bestfit_sitd_in_mask: 3301 * 3302 * Find the smask and cmask in the bandwidth allocation. 3303 */ 3304 static int 3305 ehci_find_bestfit_sitd_in_mask( 3306 ehci_state_t *ehcip, 3307 uchar_t *smask, 3308 uchar_t *cmask, 3309 uint_t *pnode, 3310 uint_t sbandwidth, 3311 uint_t cbandwidth, 3312 int interval) 3313 { 3314 int i, uFrames, found; 3315 int array_leaf, best_array_leaf; 3316 uint_t node_sbandwidth, node_cbandwidth; 3317 uint_t best_node_bandwidth; 3318 uint_t leaf_count; 3319 uchar_t bw_smask, bw_cmask; 3320 uchar_t best_smask, best_cmask; 3321 3322 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 3323 "ehci_find_bestfit_sitd_in_mask: "); 3324 3325 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval; 3326 3327 /* 3328 * Because of the way the leaves are setup, we will automatically 3329 * hit the leftmost leaf of every possible node with this interval. 3330 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame. 3331 */ 3332 /* 3333 * Need to add an additional 2 uFrames, if the "L"ast 3334 * complete split is before uFrame 6. See section 3335 * 11.8.4 in USB 2.0 Spec. Currently we do not support 3336 * the "Back Ptr" which means we support on IN of 3337 * ~4*MAX_UFRAME_SITD_XFER bandwidth/ 3338 */ 3339 uFrames = (cbandwidth / MAX_UFRAME_SITD_XFER) + 2; 3340 if (cbandwidth % MAX_UFRAME_SITD_XFER) { 3341 uFrames++; 3342 } 3343 if (uFrames > 6) { 3344 3345 return (USB_FAILURE); 3346 } 3347 *smask = 0x1; 3348 *cmask = 0x00; 3349 for (i = 0; i < uFrames; i++) { 3350 *cmask = *cmask << 1; 3351 *cmask |= 0x1; 3352 } 3353 /* cmask must start 2 frames after the smask */ 3354 *cmask = *cmask << 2; 3355 3356 found = 0; 3357 best_smask = 0x00; 3358 best_node_bandwidth = 0; 3359 for (array_leaf = 0; array_leaf < interval; array_leaf++) { 3360 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip, 3361 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask); 3362 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip, 3363 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count, 3364 &bw_cmask); 3365 3366 /* 3367 * If this node cannot support our requirements skip to the 3368 * next leaf. 3369 */ 3370 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) { 3371 continue; 3372 } 3373 3374 for (i = 0; i < (EHCI_MAX_UFRAMES - uFrames - 2); i++) { 3375 if ((*smask & bw_smask) && (*cmask & bw_cmask)) { 3376 found = 1; 3377 break; 3378 } 3379 *smask = *smask << 1; 3380 *cmask = *cmask << 1; 3381 } 3382 3383 /* 3384 * If an appropriate smask is found save the information if: 3385 * o best_smask has not been found yet. 3386 * - or - 3387 * o This is the node with the least amount of bandwidth 3388 */ 3389 if (found && 3390 ((best_smask == 0x00) || 3391 (best_node_bandwidth > 3392 (node_sbandwidth + node_cbandwidth)))) { 3393 best_node_bandwidth = node_sbandwidth + node_cbandwidth; 3394 best_array_leaf = array_leaf; 3395 best_smask = *smask; 3396 best_cmask = *cmask; 3397 } 3398 } 3399 3400 /* 3401 * If we find node that can handle the bandwidth populate the 3402 * appropriate variables and return success. 3403 */ 3404 if (best_smask) { 3405 *smask = best_smask; 3406 *cmask = best_cmask; 3407 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf], 3408 interval); 3409 ehci_update_bw_availability(ehcip, sbandwidth, 3410 ehci_index[best_array_leaf], leaf_count, best_smask); 3411 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER, 3412 ehci_index[best_array_leaf], leaf_count, best_cmask); 3413 3414 return (USB_SUCCESS); 3415 } 3416 3417 return (USB_FAILURE); 3418 } 3419 3420 3421 /* 3422 * ehci_find_bestfit_sitd_out_mask: 3423 * 3424 * Find the smask in the bandwidth allocation. 3425 */ 3426 static int 3427 ehci_find_bestfit_sitd_out_mask( 3428 ehci_state_t *ehcip, 3429 uchar_t *smask, 3430 uint_t *pnode, 3431 uint_t sbandwidth, 3432 int interval) 3433 { 3434 int i, uFrames, found; 3435 int array_leaf, best_array_leaf; 3436 uint_t node_sbandwidth; 3437 uint_t best_node_bandwidth; 3438 uint_t leaf_count; 3439 uchar_t bw_smask; 3440 uchar_t best_smask; 3441 3442 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 3443 "ehci_find_bestfit_sitd_out_mask: "); 3444 3445 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval; 3446 3447 /* 3448 * Because of the way the leaves are setup, we will automatically 3449 * hit the leftmost leaf of every possible node with this interval. 3450 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame. 3451 */ 3452 *smask = 0x00; 3453 uFrames = sbandwidth / MAX_UFRAME_SITD_XFER; 3454 if (sbandwidth % MAX_UFRAME_SITD_XFER) { 3455 uFrames++; 3456 } 3457 for (i = 0; i < uFrames; i++) { 3458 *smask = *smask << 1; 3459 *smask |= 0x1; 3460 } 3461 3462 found = 0; 3463 best_smask = 0x00; 3464 best_node_bandwidth = 0; 3465 for (array_leaf = 0; array_leaf < interval; array_leaf++) { 3466 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip, 3467 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count, 3468 &bw_smask); 3469 3470 /* 3471 * If this node cannot support our requirements skip to the 3472 * next leaf. 3473 */ 3474 if (bw_smask == 0x00) { 3475 continue; 3476 } 3477 3478 /* You cannot have a start split on the 8th uFrame */ 3479 for (i = 0; (*smask & 0x80) == 0; i++) { 3480 if (*smask & bw_smask) { 3481 found = 1; 3482 break; 3483 } 3484 *smask = *smask << 1; 3485 } 3486 3487 /* 3488 * If an appropriate smask is found save the information if: 3489 * o best_smask has not been found yet. 3490 * - or - 3491 * o This is the node with the least amount of bandwidth 3492 */ 3493 if (found && 3494 ((best_smask == 0x00) || 3495 (best_node_bandwidth > node_sbandwidth))) { 3496 best_node_bandwidth = node_sbandwidth; 3497 best_array_leaf = array_leaf; 3498 best_smask = *smask; 3499 } 3500 } 3501 3502 /* 3503 * If we find node that can handle the bandwidth populate the 3504 * appropriate variables and return success. 3505 */ 3506 if (best_smask) { 3507 *smask = best_smask; 3508 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf], 3509 interval); 3510 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER, 3511 ehci_index[best_array_leaf], leaf_count, best_smask); 3512 3513 return (USB_SUCCESS); 3514 } 3515 3516 return (USB_FAILURE); 3517 } 3518 3519 3520 /* 3521 * ehci_calculate_bw_availability_mask: 3522 * 3523 * Returns the "total bandwidth used" in this node. 3524 * Populates bw_mask with the uFrames that can support the bandwidth. 3525 * 3526 * If all the Frames cannot support this bandwidth, then bw_mask 3527 * will return 0x00 and the "total bandwidth used" will be invalid. 3528 */ 3529 static uint_t 3530 ehci_calculate_bw_availability_mask( 3531 ehci_state_t *ehcip, 3532 uint_t bandwidth, 3533 int leaf, 3534 int leaf_count, 3535 uchar_t *bw_mask) 3536 { 3537 int i, j; 3538 uchar_t bw_uframe; 3539 int uframe_total; 3540 ehci_frame_bandwidth_t *fbp; 3541 uint_t total_bandwidth = 0; 3542 3543 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 3544 "ehci_calculate_bw_availability_mask: leaf %d leaf count %d", 3545 leaf, leaf_count); 3546 3547 /* Start by saying all uFrames are available */ 3548 *bw_mask = 0xFF; 3549 3550 for (i = 0; (i < leaf_count) || (*bw_mask == 0x00); i++) { 3551 fbp = &ehcip->ehci_frame_bandwidth[leaf + i]; 3552 3553 total_bandwidth += fbp->ehci_allocated_frame_bandwidth; 3554 3555 for (j = 0; j < EHCI_MAX_UFRAMES; j++) { 3556 /* 3557 * If the uFrame in bw_mask is available check to see if 3558 * it can support the additional bandwidth. 3559 */ 3560 bw_uframe = (*bw_mask & (0x1 << j)); 3561 uframe_total = 3562 fbp->ehci_micro_frame_bandwidth[j] + 3563 bandwidth; 3564 if ((bw_uframe) && 3565 (uframe_total > HS_PERIODIC_BANDWIDTH)) { 3566 *bw_mask = *bw_mask & ~bw_uframe; 3567 } 3568 } 3569 } 3570 3571 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 3572 "ehci_calculate_bw_availability_mask: bandwidth mask 0x%x", 3573 *bw_mask); 3574 3575 return (total_bandwidth); 3576 } 3577 3578 3579 /* 3580 * ehci_update_bw_availability: 3581 * 3582 * The leftmost leaf needs to be in terms of array position and 3583 * not the actual lattice position. 3584 */ 3585 static void 3586 ehci_update_bw_availability( 3587 ehci_state_t *ehcip, 3588 int bandwidth, 3589 int leftmost_leaf, 3590 int leaf_count, 3591 uchar_t mask) 3592 { 3593 int i, j; 3594 ehci_frame_bandwidth_t *fbp; 3595 int uFrame_bandwidth[8]; 3596 3597 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3598 "ehci_update_bw_availability: " 3599 "leaf %d count %d bandwidth 0x%x mask 0x%x", 3600 leftmost_leaf, leaf_count, bandwidth, mask); 3601 3602 ASSERT(leftmost_leaf < 32); 3603 ASSERT(leftmost_leaf >= 0); 3604 3605 for (j = 0; j < EHCI_MAX_UFRAMES; j++) { 3606 if (mask & 0x1) { 3607 uFrame_bandwidth[j] = bandwidth; 3608 } else { 3609 uFrame_bandwidth[j] = 0; 3610 } 3611 3612 mask = mask >> 1; 3613 } 3614 3615 /* Updated all the effected leafs with the bandwidth */ 3616 for (i = 0; i < leaf_count; i++) { 3617 fbp = &ehcip->ehci_frame_bandwidth[leftmost_leaf + i]; 3618 3619 for (j = 0; j < EHCI_MAX_UFRAMES; j++) { 3620 fbp->ehci_micro_frame_bandwidth[j] += 3621 uFrame_bandwidth[j]; 3622 fbp->ehci_allocated_frame_bandwidth += 3623 uFrame_bandwidth[j]; 3624 } 3625 } 3626 } 3627 3628 /* 3629 * Miscellaneous functions 3630 */ 3631 3632 /* 3633 * ehci_obtain_state: 3634 * 3635 * NOTE: This function is also called from POLLED MODE. 3636 */ 3637 ehci_state_t * 3638 ehci_obtain_state(dev_info_t *dip) 3639 { 3640 int instance = ddi_get_instance(dip); 3641 3642 ehci_state_t *state = ddi_get_soft_state(ehci_statep, instance); 3643 3644 ASSERT(state != NULL); 3645 3646 return (state); 3647 } 3648 3649 3650 /* 3651 * ehci_state_is_operational: 3652 * 3653 * Check the Host controller state and return proper values. 3654 */ 3655 int 3656 ehci_state_is_operational(ehci_state_t *ehcip) 3657 { 3658 int val; 3659 3660 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3661 3662 switch (ehcip->ehci_hc_soft_state) { 3663 case EHCI_CTLR_INIT_STATE: 3664 case EHCI_CTLR_SUSPEND_STATE: 3665 val = USB_FAILURE; 3666 break; 3667 case EHCI_CTLR_OPERATIONAL_STATE: 3668 val = USB_SUCCESS; 3669 break; 3670 case EHCI_CTLR_ERROR_STATE: 3671 val = USB_HC_HARDWARE_ERROR; 3672 break; 3673 default: 3674 val = USB_FAILURE; 3675 break; 3676 } 3677 3678 return (val); 3679 } 3680 3681 3682 /* 3683 * ehci_do_soft_reset 3684 * 3685 * Do soft reset of ehci host controller. 3686 */ 3687 int 3688 ehci_do_soft_reset(ehci_state_t *ehcip) 3689 { 3690 usb_frame_number_t before_frame_number, after_frame_number; 3691 ehci_regs_t *ehci_save_regs; 3692 3693 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3694 3695 /* Increment host controller error count */ 3696 ehcip->ehci_hc_error++; 3697 3698 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3699 "ehci_do_soft_reset:" 3700 "Reset ehci host controller 0x%x", ehcip->ehci_hc_error); 3701 3702 /* 3703 * Allocate space for saving current Host Controller 3704 * registers. Don't do any recovery if allocation 3705 * fails. 3706 */ 3707 ehci_save_regs = (ehci_regs_t *) 3708 kmem_zalloc(sizeof (ehci_regs_t), KM_NOSLEEP); 3709 3710 if (ehci_save_regs == NULL) { 3711 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3712 "ehci_do_soft_reset: kmem_zalloc failed"); 3713 3714 return (USB_FAILURE); 3715 } 3716 3717 /* Save current ehci registers */ 3718 ehci_save_regs->ehci_command = Get_OpReg(ehci_command); 3719 ehci_save_regs->ehci_interrupt = Get_OpReg(ehci_interrupt); 3720 ehci_save_regs->ehci_ctrl_segment = Get_OpReg(ehci_ctrl_segment); 3721 ehci_save_regs->ehci_async_list_addr = Get_OpReg(ehci_async_list_addr); 3722 ehci_save_regs->ehci_config_flag = Get_OpReg(ehci_config_flag); 3723 ehci_save_regs->ehci_periodic_list_base = 3724 Get_OpReg(ehci_periodic_list_base); 3725 3726 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3727 "ehci_do_soft_reset: Save reg = 0x%p", (void *)ehci_save_regs); 3728 3729 /* Disable all list processing and interrupts */ 3730 Set_OpReg(ehci_command, Get_OpReg(ehci_command) & 3731 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE)); 3732 3733 /* Disable all EHCI interrupts */ 3734 Set_OpReg(ehci_interrupt, 0); 3735 3736 /* Wait for few milliseconds */ 3737 drv_usecwait(EHCI_SOF_TIMEWAIT); 3738 3739 /* Do light soft reset of ehci host controller */ 3740 Set_OpReg(ehci_command, 3741 Get_OpReg(ehci_command) | EHCI_CMD_LIGHT_HC_RESET); 3742 3743 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3744 "ehci_do_soft_reset: Reset in progress"); 3745 3746 /* Wait for reset to complete */ 3747 drv_usecwait(EHCI_RESET_TIMEWAIT); 3748 3749 /* 3750 * Restore previous saved EHCI register value 3751 * into the current EHCI registers. 3752 */ 3753 Set_OpReg(ehci_ctrl_segment, (uint32_t) 3754 ehci_save_regs->ehci_ctrl_segment); 3755 3756 Set_OpReg(ehci_periodic_list_base, (uint32_t) 3757 ehci_save_regs->ehci_periodic_list_base); 3758 3759 Set_OpReg(ehci_async_list_addr, (uint32_t) 3760 ehci_save_regs->ehci_async_list_addr); 3761 3762 /* 3763 * For some reason this register might get nulled out by 3764 * the Uli M1575 South Bridge. To workaround the hardware 3765 * problem, check the value after write and retry if the 3766 * last write fails. 3767 */ 3768 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) && 3769 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) && 3770 (ehci_save_regs->ehci_async_list_addr != 3771 Get_OpReg(ehci_async_list_addr))) { 3772 int retry = 0; 3773 3774 Set_OpRegRetry(ehci_async_list_addr, (uint32_t) 3775 ehci_save_regs->ehci_async_list_addr, retry); 3776 if (retry >= EHCI_MAX_RETRY) { 3777 USB_DPRINTF_L2(PRINT_MASK_ATTA, 3778 ehcip->ehci_log_hdl, "ehci_do_soft_reset:" 3779 " ASYNCLISTADDR write failed."); 3780 3781 return (USB_FAILURE); 3782 } 3783 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3784 "ehci_do_soft_reset: ASYNCLISTADDR " 3785 "write failed, retry=%d", retry); 3786 } 3787 3788 Set_OpReg(ehci_config_flag, (uint32_t) 3789 ehci_save_regs->ehci_config_flag); 3790 3791 /* Enable both Asynchronous and Periodic Schedule if necessary */ 3792 ehci_toggle_scheduler(ehcip); 3793 3794 /* 3795 * Set ehci_interrupt to enable all interrupts except Root 3796 * Hub Status change and frame list rollover interrupts. 3797 */ 3798 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR | 3799 EHCI_INTR_FRAME_LIST_ROLLOVER | 3800 EHCI_INTR_USB_ERROR | 3801 EHCI_INTR_USB); 3802 3803 /* 3804 * Deallocate the space that allocated for saving 3805 * HC registers. 3806 */ 3807 kmem_free((void *) ehci_save_regs, sizeof (ehci_regs_t)); 3808 3809 /* 3810 * Set the desired interrupt threshold, frame list size (if 3811 * applicable) and turn EHCI host controller. 3812 */ 3813 Set_OpReg(ehci_command, ((Get_OpReg(ehci_command) & 3814 ~EHCI_CMD_INTR_THRESHOLD) | 3815 (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN))); 3816 3817 /* Wait 10ms for EHCI to start sending SOF */ 3818 drv_usecwait(EHCI_RESET_TIMEWAIT); 3819 3820 /* 3821 * Get the current usb frame number before waiting for 3822 * few milliseconds. 3823 */ 3824 before_frame_number = ehci_get_current_frame_number(ehcip); 3825 3826 /* Wait for few milliseconds */ 3827 drv_usecwait(EHCI_SOF_TIMEWAIT); 3828 3829 /* 3830 * Get the current usb frame number after waiting for 3831 * few milliseconds. 3832 */ 3833 after_frame_number = ehci_get_current_frame_number(ehcip); 3834 3835 USB_DPRINTF_L4(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3836 "ehci_do_soft_reset: Before Frame Number 0x%llx " 3837 "After Frame Number 0x%llx", 3838 (unsigned long long)before_frame_number, 3839 (unsigned long long)after_frame_number); 3840 3841 if ((after_frame_number <= before_frame_number) && 3842 (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) { 3843 3844 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3845 "ehci_do_soft_reset: Soft reset failed"); 3846 3847 return (USB_FAILURE); 3848 } 3849 3850 return (USB_SUCCESS); 3851 } 3852 3853 3854 /* 3855 * ehci_get_xfer_attrs: 3856 * 3857 * Get the attributes of a particular xfer. 3858 * 3859 * NOTE: This function is also called from POLLED MODE. 3860 */ 3861 usb_req_attrs_t 3862 ehci_get_xfer_attrs( 3863 ehci_state_t *ehcip, 3864 ehci_pipe_private_t *pp, 3865 ehci_trans_wrapper_t *tw) 3866 { 3867 usb_ep_descr_t *eptd = &pp->pp_pipe_handle->p_ep; 3868 usb_req_attrs_t attrs = USB_ATTRS_NONE; 3869 3870 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3871 "ehci_get_xfer_attrs:"); 3872 3873 switch (eptd->bmAttributes & USB_EP_ATTR_MASK) { 3874 case USB_EP_ATTR_CONTROL: 3875 attrs = ((usb_ctrl_req_t *) 3876 tw->tw_curr_xfer_reqp)->ctrl_attributes; 3877 break; 3878 case USB_EP_ATTR_BULK: 3879 attrs = ((usb_bulk_req_t *) 3880 tw->tw_curr_xfer_reqp)->bulk_attributes; 3881 break; 3882 case USB_EP_ATTR_INTR: 3883 attrs = ((usb_intr_req_t *) 3884 tw->tw_curr_xfer_reqp)->intr_attributes; 3885 break; 3886 } 3887 3888 return (attrs); 3889 } 3890 3891 3892 /* 3893 * ehci_get_current_frame_number: 3894 * 3895 * Get the current software based usb frame number. 3896 */ 3897 usb_frame_number_t 3898 ehci_get_current_frame_number(ehci_state_t *ehcip) 3899 { 3900 usb_frame_number_t usb_frame_number; 3901 usb_frame_number_t ehci_fno, micro_frame_number; 3902 3903 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3904 3905 ehci_fno = ehcip->ehci_fno; 3906 micro_frame_number = Get_OpReg(ehci_frame_index) & 0x3FFF; 3907 3908 /* 3909 * Calculate current software based usb frame number. 3910 * 3911 * This code accounts for the fact that frame number is 3912 * updated by the Host Controller before the ehci driver 3913 * gets an FrameListRollover interrupt that will adjust 3914 * Frame higher part. 3915 * 3916 * Refer ehci specification 1.0, section 2.3.2, page 21. 3917 */ 3918 micro_frame_number = ((micro_frame_number & 0x1FFF) | 3919 ehci_fno) + (((micro_frame_number & 0x3FFF) ^ 3920 ehci_fno) & 0x2000); 3921 3922 /* 3923 * Micro Frame number is equivalent to 125 usec. Eight 3924 * Micro Frame numbers are equivalent to one millsecond 3925 * or one usb frame number. 3926 */ 3927 usb_frame_number = micro_frame_number >> 3928 EHCI_uFRAMES_PER_USB_FRAME_SHIFT; 3929 3930 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3931 "ehci_get_current_frame_number: " 3932 "Current usb uframe number = 0x%llx " 3933 "Current usb frame number = 0x%llx", 3934 (unsigned long long)micro_frame_number, 3935 (unsigned long long)usb_frame_number); 3936 3937 return (usb_frame_number); 3938 } 3939 3940 3941 /* 3942 * ehci_cpr_cleanup: 3943 * 3944 * Cleanup ehci state and other ehci specific informations across 3945 * Check Point Resume (CPR). 3946 */ 3947 static void 3948 ehci_cpr_cleanup(ehci_state_t *ehcip) 3949 { 3950 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3951 3952 /* Reset software part of usb frame number */ 3953 ehcip->ehci_fno = 0; 3954 } 3955 3956 3957 /* 3958 * ehci_wait_for_sof: 3959 * 3960 * Wait for couple of SOF interrupts 3961 */ 3962 int 3963 ehci_wait_for_sof(ehci_state_t *ehcip) 3964 { 3965 usb_frame_number_t before_frame_number, after_frame_number; 3966 int error = USB_SUCCESS; 3967 3968 USB_DPRINTF_L4(PRINT_MASK_LISTS, 3969 ehcip->ehci_log_hdl, "ehci_wait_for_sof"); 3970 3971 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3972 3973 error = ehci_state_is_operational(ehcip); 3974 3975 if (error != USB_SUCCESS) { 3976 3977 return (error); 3978 } 3979 3980 /* Get the current usb frame number before waiting for two SOFs */ 3981 before_frame_number = ehci_get_current_frame_number(ehcip); 3982 3983 mutex_exit(&ehcip->ehci_int_mutex); 3984 3985 /* Wait for few milliseconds */ 3986 delay(drv_usectohz(EHCI_SOF_TIMEWAIT)); 3987 3988 mutex_enter(&ehcip->ehci_int_mutex); 3989 3990 /* Get the current usb frame number after woken up */ 3991 after_frame_number = ehci_get_current_frame_number(ehcip); 3992 3993 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3994 "ehci_wait_for_sof: framenumber: before 0x%llx " 3995 "after 0x%llx", 3996 (unsigned long long)before_frame_number, 3997 (unsigned long long)after_frame_number); 3998 3999 /* Return failure, if usb frame number has not been changed */ 4000 if (after_frame_number <= before_frame_number) { 4001 4002 if ((ehci_do_soft_reset(ehcip)) != USB_SUCCESS) { 4003 4004 USB_DPRINTF_L0(PRINT_MASK_LISTS, 4005 ehcip->ehci_log_hdl, "No SOF interrupts"); 4006 4007 /* Set host controller soft state to error */ 4008 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE; 4009 4010 return (USB_FAILURE); 4011 } 4012 4013 } 4014 4015 return (USB_SUCCESS); 4016 } 4017 4018 /* 4019 * Toggle the async/periodic schedule based on opened pipe count. 4020 * During pipe cleanup(in pipe reset case), the pipe's QH is temporarily 4021 * disabled. But the TW on the pipe is not freed. In this case, we need 4022 * to disable async/periodic schedule for some non-compatible hardware. 4023 * Otherwise, the hardware will overwrite software's configuration of 4024 * the QH. 4025 */ 4026 void 4027 ehci_toggle_scheduler_on_pipe(ehci_state_t *ehcip) 4028 { 4029 uint_t temp_reg, cmd_reg; 4030 4031 cmd_reg = Get_OpReg(ehci_command); 4032 temp_reg = cmd_reg; 4033 4034 /* 4035 * Enable/Disable asynchronous scheduler, and 4036 * turn on/off async list door bell 4037 */ 4038 if (ehcip->ehci_open_async_count) { 4039 if ((ehcip->ehci_async_req_count > 0) && 4040 ((cmd_reg & EHCI_CMD_ASYNC_SCHED_ENABLE) == 0)) { 4041 /* 4042 * For some reason this address might get nulled out by 4043 * the ehci chip. Set it here just in case it is null. 4044 */ 4045 Set_OpReg(ehci_async_list_addr, 4046 ehci_qh_cpu_to_iommu(ehcip, 4047 ehcip->ehci_head_of_async_sched_list)); 4048 4049 /* 4050 * For some reason this register might get nulled out by 4051 * the Uli M1575 Southbridge. To workaround the HW 4052 * problem, check the value after write and retry if the 4053 * last write fails. 4054 * 4055 * If the ASYNCLISTADDR remains "stuck" after 4056 * EHCI_MAX_RETRY retries, then the M1575 is broken 4057 * and is stuck in an inconsistent state and is about 4058 * to crash the machine with a trn_oor panic when it 4059 * does a DMA read from 0x0. It is better to panic 4060 * now rather than wait for the trn_oor crash; this 4061 * way Customer Service will have a clean signature 4062 * that indicts the M1575 chip rather than a 4063 * mysterious and hard-to-diagnose trn_oor panic. 4064 */ 4065 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) && 4066 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) && 4067 (ehci_qh_cpu_to_iommu(ehcip, 4068 ehcip->ehci_head_of_async_sched_list) != 4069 Get_OpReg(ehci_async_list_addr))) { 4070 int retry = 0; 4071 4072 Set_OpRegRetry(ehci_async_list_addr, 4073 ehci_qh_cpu_to_iommu(ehcip, 4074 ehcip->ehci_head_of_async_sched_list), 4075 retry); 4076 if (retry >= EHCI_MAX_RETRY) 4077 cmn_err(CE_PANIC, 4078 "ehci_toggle_scheduler_on_pipe: " 4079 "ASYNCLISTADDR write failed."); 4080 4081 USB_DPRINTF_L2(PRINT_MASK_ATTA, 4082 ehcip->ehci_log_hdl, 4083 "ehci_toggle_scheduler_on_pipe:" 4084 " ASYNCLISTADDR write failed, retry=%d", 4085 retry); 4086 } 4087 4088 cmd_reg |= EHCI_CMD_ASYNC_SCHED_ENABLE; 4089 } 4090 } else { 4091 cmd_reg &= ~EHCI_CMD_ASYNC_SCHED_ENABLE; 4092 } 4093 4094 if (ehcip->ehci_open_periodic_count) { 4095 if ((ehcip->ehci_periodic_req_count > 0) && 4096 ((cmd_reg & EHCI_CMD_PERIODIC_SCHED_ENABLE) == 0)) { 4097 /* 4098 * For some reason this address get's nulled out by 4099 * the ehci chip. Set it here just in case it is null. 4100 */ 4101 Set_OpReg(ehci_periodic_list_base, 4102 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address & 4103 0xFFFFF000)); 4104 cmd_reg |= EHCI_CMD_PERIODIC_SCHED_ENABLE; 4105 } 4106 } else { 4107 cmd_reg &= ~EHCI_CMD_PERIODIC_SCHED_ENABLE; 4108 } 4109 4110 /* Just an optimization */ 4111 if (temp_reg != cmd_reg) { 4112 Set_OpReg(ehci_command, cmd_reg); 4113 } 4114 } 4115 4116 4117 /* 4118 * ehci_toggle_scheduler: 4119 * 4120 * Turn scheduler based on pipe open count. 4121 */ 4122 void 4123 ehci_toggle_scheduler(ehci_state_t *ehcip) 4124 { 4125 uint_t temp_reg, cmd_reg; 4126 4127 /* 4128 * For performance optimization, we need to change the bits 4129 * if (async == 1||async == 0) OR (periodic == 1||periodic == 0) 4130 * 4131 * Related bits already enabled if 4132 * async and periodic req counts are > 1 4133 * OR async req count > 1 & no periodic pipe 4134 * OR periodic req count > 1 & no async pipe 4135 */ 4136 if (((ehcip->ehci_async_req_count > 1) && 4137 (ehcip->ehci_periodic_req_count > 1)) || 4138 ((ehcip->ehci_async_req_count > 1) && 4139 (ehcip->ehci_open_periodic_count == 0)) || 4140 ((ehcip->ehci_periodic_req_count > 1) && 4141 (ehcip->ehci_open_async_count == 0))) { 4142 USB_DPRINTF_L4(PRINT_MASK_ATTA, 4143 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:" 4144 "async/periodic bits no need to change"); 4145 4146 return; 4147 } 4148 4149 cmd_reg = Get_OpReg(ehci_command); 4150 temp_reg = cmd_reg; 4151 4152 /* 4153 * Enable/Disable asynchronous scheduler, and 4154 * turn on/off async list door bell 4155 */ 4156 if (ehcip->ehci_async_req_count > 1) { 4157 /* we already enable the async bit */ 4158 USB_DPRINTF_L4(PRINT_MASK_ATTA, 4159 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:" 4160 "async bit already enabled: cmd_reg=0x%x", cmd_reg); 4161 } else if (ehcip->ehci_async_req_count == 1) { 4162 if (!(cmd_reg & EHCI_CMD_ASYNC_SCHED_ENABLE)) { 4163 /* 4164 * For some reason this address might get nulled out by 4165 * the ehci chip. Set it here just in case it is null. 4166 * If it's not null, we should not reset the 4167 * ASYNCLISTADDR, because it's updated by hardware to 4168 * point to the next queue head to be executed. 4169 */ 4170 if (!Get_OpReg(ehci_async_list_addr)) { 4171 Set_OpReg(ehci_async_list_addr, 4172 ehci_qh_cpu_to_iommu(ehcip, 4173 ehcip->ehci_head_of_async_sched_list)); 4174 } 4175 4176 /* 4177 * For some reason this register might get nulled out by 4178 * the Uli M1575 Southbridge. To workaround the HW 4179 * problem, check the value after write and retry if the 4180 * last write fails. 4181 * 4182 * If the ASYNCLISTADDR remains "stuck" after 4183 * EHCI_MAX_RETRY retries, then the M1575 is broken 4184 * and is stuck in an inconsistent state and is about 4185 * to crash the machine with a trn_oor panic when it 4186 * does a DMA read from 0x0. It is better to panic 4187 * now rather than wait for the trn_oor crash; this 4188 * way Customer Service will have a clean signature 4189 * that indicts the M1575 chip rather than a 4190 * mysterious and hard-to-diagnose trn_oor panic. 4191 */ 4192 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) && 4193 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) && 4194 (ehci_qh_cpu_to_iommu(ehcip, 4195 ehcip->ehci_head_of_async_sched_list) != 4196 Get_OpReg(ehci_async_list_addr))) { 4197 int retry = 0; 4198 4199 Set_OpRegRetry(ehci_async_list_addr, 4200 ehci_qh_cpu_to_iommu(ehcip, 4201 ehcip->ehci_head_of_async_sched_list), 4202 retry); 4203 if (retry >= EHCI_MAX_RETRY) 4204 cmn_err(CE_PANIC, 4205 "ehci_toggle_scheduler: " 4206 "ASYNCLISTADDR write failed."); 4207 4208 USB_DPRINTF_L3(PRINT_MASK_ATTA, 4209 ehcip->ehci_log_hdl, 4210 "ehci_toggle_scheduler: ASYNCLISTADDR " 4211 "write failed, retry=%d", retry); 4212 } 4213 } 4214 cmd_reg |= EHCI_CMD_ASYNC_SCHED_ENABLE; 4215 } else { 4216 cmd_reg &= ~EHCI_CMD_ASYNC_SCHED_ENABLE; 4217 } 4218 4219 if (ehcip->ehci_periodic_req_count > 1) { 4220 /* we already enable the periodic bit. */ 4221 USB_DPRINTF_L4(PRINT_MASK_ATTA, 4222 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:" 4223 "periodic bit already enabled: cmd_reg=0x%x", cmd_reg); 4224 } else if (ehcip->ehci_periodic_req_count == 1) { 4225 if (!(cmd_reg & EHCI_CMD_PERIODIC_SCHED_ENABLE)) { 4226 /* 4227 * For some reason this address get's nulled out by 4228 * the ehci chip. Set it here just in case it is null. 4229 */ 4230 Set_OpReg(ehci_periodic_list_base, 4231 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address & 4232 0xFFFFF000)); 4233 } 4234 cmd_reg |= EHCI_CMD_PERIODIC_SCHED_ENABLE; 4235 } else { 4236 cmd_reg &= ~EHCI_CMD_PERIODIC_SCHED_ENABLE; 4237 } 4238 4239 /* Just an optimization */ 4240 if (temp_reg != cmd_reg) { 4241 Set_OpReg(ehci_command, cmd_reg); 4242 4243 /* To make sure the command register is updated correctly */ 4244 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) && 4245 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) { 4246 int retry = 0; 4247 4248 Set_OpRegRetry(ehci_command, cmd_reg, retry); 4249 USB_DPRINTF_L3(PRINT_MASK_ATTA, 4250 ehcip->ehci_log_hdl, 4251 "ehci_toggle_scheduler: CMD write failed, retry=%d", 4252 retry); 4253 } 4254 4255 } 4256 } 4257 4258 /* 4259 * ehci print functions 4260 */ 4261 4262 /* 4263 * ehci_print_caps: 4264 */ 4265 void 4266 ehci_print_caps(ehci_state_t *ehcip) 4267 { 4268 uint_t i; 4269 4270 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4271 "\n\tUSB 2.0 Host Controller Characteristics\n"); 4272 4273 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4274 "Caps Length: 0x%x Version: 0x%x\n", 4275 Get_8Cap(ehci_caps_length), Get_16Cap(ehci_version)); 4276 4277 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4278 "Structural Parameters\n"); 4279 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4280 "Port indicators: %s", (Get_Cap(ehci_hcs_params) & 4281 EHCI_HCS_PORT_INDICATOR) ? "Yes" : "No"); 4282 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4283 "No of Classic host controllers: 0x%x", 4284 (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_COMP_CTRLS) 4285 >> EHCI_HCS_NUM_COMP_CTRL_SHIFT); 4286 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4287 "No of ports per Classic host controller: 0x%x", 4288 (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS_CC) 4289 >> EHCI_HCS_NUM_PORTS_CC_SHIFT); 4290 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4291 "Port routing rules: %s", (Get_Cap(ehci_hcs_params) & 4292 EHCI_HCS_PORT_ROUTING_RULES) ? "Yes" : "No"); 4293 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4294 "Port power control: %s", (Get_Cap(ehci_hcs_params) & 4295 EHCI_HCS_PORT_POWER_CONTROL) ? "Yes" : "No"); 4296 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4297 "No of root hub ports: 0x%x\n", 4298 Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); 4299 4300 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4301 "Capability Parameters\n"); 4302 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4303 "EHCI extended capability: %s", (Get_Cap(ehci_hcc_params) & 4304 EHCI_HCC_EECP) ? "Yes" : "No"); 4305 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4306 "Isoch schedule threshold: 0x%x", 4307 Get_Cap(ehci_hcc_params) & EHCI_HCC_ISOCH_SCHED_THRESHOLD); 4308 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4309 "Async schedule park capability: %s", (Get_Cap(ehci_hcc_params) & 4310 EHCI_HCC_ASYNC_SCHED_PARK_CAP) ? "Yes" : "No"); 4311 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4312 "Programmable frame list flag: %s", (Get_Cap(ehci_hcc_params) & 4313 EHCI_HCC_PROG_FRAME_LIST_FLAG) ? "256/512/1024" : "1024"); 4314 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4315 "64bit addressing capability: %s\n", (Get_Cap(ehci_hcc_params) & 4316 EHCI_HCC_64BIT_ADDR_CAP) ? "Yes" : "No"); 4317 4318 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4319 "Classic Port Route Description"); 4320 4321 for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) { 4322 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4323 "\tPort Route 0x%x: 0x%x", i, Get_8Cap(ehci_port_route[i])); 4324 } 4325 } 4326 4327 4328 /* 4329 * ehci_print_regs: 4330 */ 4331 void 4332 ehci_print_regs(ehci_state_t *ehcip) 4333 { 4334 uint_t i; 4335 4336 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4337 "\n\tEHCI%d Operational Registers\n", 4338 ddi_get_instance(ehcip->ehci_dip)); 4339 4340 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4341 "Command: 0x%x Status: 0x%x", 4342 Get_OpReg(ehci_command), Get_OpReg(ehci_status)); 4343 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4344 "Interrupt: 0x%x Frame Index: 0x%x", 4345 Get_OpReg(ehci_interrupt), Get_OpReg(ehci_frame_index)); 4346 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4347 "Control Segment: 0x%x Periodic List Base: 0x%x", 4348 Get_OpReg(ehci_ctrl_segment), Get_OpReg(ehci_periodic_list_base)); 4349 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4350 "Async List Addr: 0x%x Config Flag: 0x%x", 4351 Get_OpReg(ehci_async_list_addr), Get_OpReg(ehci_config_flag)); 4352 4353 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4354 "Root Hub Port Status"); 4355 4356 for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) { 4357 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4358 "\tPort Status 0x%x: 0x%x ", i, 4359 Get_OpReg(ehci_rh_port_status[i])); 4360 } 4361 } 4362 4363 4364 /* 4365 * ehci_print_qh: 4366 */ 4367 void 4368 ehci_print_qh( 4369 ehci_state_t *ehcip, 4370 ehci_qh_t *qh) 4371 { 4372 uint_t i; 4373 4374 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4375 "ehci_print_qh: qh = 0x%p", (void *)qh); 4376 4377 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4378 "\tqh_link_ptr: 0x%x ", Get_QH(qh->qh_link_ptr)); 4379 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4380 "\tqh_ctrl: 0x%x ", Get_QH(qh->qh_ctrl)); 4381 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4382 "\tqh_split_ctrl: 0x%x ", Get_QH(qh->qh_split_ctrl)); 4383 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4384 "\tqh_curr_qtd: 0x%x ", Get_QH(qh->qh_curr_qtd)); 4385 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4386 "\tqh_next_qtd: 0x%x ", Get_QH(qh->qh_next_qtd)); 4387 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4388 "\tqh_alt_next_qtd: 0x%x ", Get_QH(qh->qh_alt_next_qtd)); 4389 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4390 "\tqh_status: 0x%x ", Get_QH(qh->qh_status)); 4391 4392 for (i = 0; i < 5; i++) { 4393 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4394 "\tqh_buf[%d]: 0x%x ", i, Get_QH(qh->qh_buf[i])); 4395 } 4396 4397 for (i = 0; i < 5; i++) { 4398 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4399 "\tqh_buf_high[%d]: 0x%x ", 4400 i, Get_QH(qh->qh_buf_high[i])); 4401 } 4402 4403 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4404 "\tqh_dummy_qtd: 0x%x ", Get_QH(qh->qh_dummy_qtd)); 4405 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4406 "\tqh_prev: 0x%x ", Get_QH(qh->qh_prev)); 4407 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4408 "\tqh_state: 0x%x ", Get_QH(qh->qh_state)); 4409 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4410 "\tqh_reclaim_next: 0x%x ", Get_QH(qh->qh_reclaim_next)); 4411 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4412 "\tqh_reclaim_frame: 0x%x ", Get_QH(qh->qh_reclaim_frame)); 4413 } 4414 4415 4416 /* 4417 * ehci_print_qtd: 4418 */ 4419 void 4420 ehci_print_qtd( 4421 ehci_state_t *ehcip, 4422 ehci_qtd_t *qtd) 4423 { 4424 uint_t i; 4425 4426 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4427 "ehci_print_qtd: qtd = 0x%p", (void *)qtd); 4428 4429 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4430 "\tqtd_next_qtd: 0x%x ", Get_QTD(qtd->qtd_next_qtd)); 4431 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4432 "\tqtd_alt_next_qtd: 0x%x ", Get_QTD(qtd->qtd_alt_next_qtd)); 4433 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4434 "\tqtd_ctrl: 0x%x ", Get_QTD(qtd->qtd_ctrl)); 4435 4436 for (i = 0; i < 5; i++) { 4437 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4438 "\tqtd_buf[%d]: 0x%x ", i, Get_QTD(qtd->qtd_buf[i])); 4439 } 4440 4441 for (i = 0; i < 5; i++) { 4442 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4443 "\tqtd_buf_high[%d]: 0x%x ", 4444 i, Get_QTD(qtd->qtd_buf_high[i])); 4445 } 4446 4447 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4448 "\tqtd_trans_wrapper: 0x%x ", Get_QTD(qtd->qtd_trans_wrapper)); 4449 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4450 "\tqtd_tw_next_qtd: 0x%x ", Get_QTD(qtd->qtd_tw_next_qtd)); 4451 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4452 "\tqtd_active_qtd_next: 0x%x ", Get_QTD(qtd->qtd_active_qtd_next)); 4453 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4454 "\tqtd_active_qtd_prev: 0x%x ", Get_QTD(qtd->qtd_active_qtd_prev)); 4455 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4456 "\tqtd_state: 0x%x ", Get_QTD(qtd->qtd_state)); 4457 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4458 "\tqtd_ctrl_phase: 0x%x ", Get_QTD(qtd->qtd_ctrl_phase)); 4459 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4460 "\tqtd_xfer_offs: 0x%x ", Get_QTD(qtd->qtd_xfer_offs)); 4461 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4462 "\tqtd_xfer_len: 0x%x ", Get_QTD(qtd->qtd_xfer_len)); 4463 } 4464 4465 /* 4466 * ehci kstat functions 4467 */ 4468 4469 /* 4470 * ehci_create_stats: 4471 * 4472 * Allocate and initialize the ehci kstat structures 4473 */ 4474 void 4475 ehci_create_stats(ehci_state_t *ehcip) 4476 { 4477 char kstatname[KSTAT_STRLEN]; 4478 const char *dname = ddi_driver_name(ehcip->ehci_dip); 4479 char *usbtypes[USB_N_COUNT_KSTATS] = 4480 {"ctrl", "isoch", "bulk", "intr"}; 4481 uint_t instance = ehcip->ehci_instance; 4482 ehci_intrs_stats_t *isp; 4483 int i; 4484 4485 if (EHCI_INTRS_STATS(ehcip) == NULL) { 4486 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,intrs", 4487 dname, instance); 4488 EHCI_INTRS_STATS(ehcip) = kstat_create("usba", instance, 4489 kstatname, "usb_interrupts", KSTAT_TYPE_NAMED, 4490 sizeof (ehci_intrs_stats_t) / sizeof (kstat_named_t), 4491 KSTAT_FLAG_PERSISTENT); 4492 4493 if (EHCI_INTRS_STATS(ehcip)) { 4494 isp = EHCI_INTRS_STATS_DATA(ehcip); 4495 kstat_named_init(&isp->ehci_sts_total, 4496 "Interrupts Total", KSTAT_DATA_UINT64); 4497 kstat_named_init(&isp->ehci_sts_not_claimed, 4498 "Not Claimed", KSTAT_DATA_UINT64); 4499 kstat_named_init(&isp->ehci_sts_async_sched_status, 4500 "Async schedule status", KSTAT_DATA_UINT64); 4501 kstat_named_init(&isp->ehci_sts_periodic_sched_status, 4502 "Periodic sched status", KSTAT_DATA_UINT64); 4503 kstat_named_init(&isp->ehci_sts_empty_async_schedule, 4504 "Empty async schedule", KSTAT_DATA_UINT64); 4505 kstat_named_init(&isp->ehci_sts_host_ctrl_halted, 4506 "Host controller Halted", KSTAT_DATA_UINT64); 4507 kstat_named_init(&isp->ehci_sts_async_advance_intr, 4508 "Intr on async advance", KSTAT_DATA_UINT64); 4509 kstat_named_init(&isp->ehci_sts_host_system_error_intr, 4510 "Host system error", KSTAT_DATA_UINT64); 4511 kstat_named_init(&isp->ehci_sts_frm_list_rollover_intr, 4512 "Frame list rollover", KSTAT_DATA_UINT64); 4513 kstat_named_init(&isp->ehci_sts_rh_port_change_intr, 4514 "Port change detect", KSTAT_DATA_UINT64); 4515 kstat_named_init(&isp->ehci_sts_usb_error_intr, 4516 "USB error interrupt", KSTAT_DATA_UINT64); 4517 kstat_named_init(&isp->ehci_sts_usb_intr, 4518 "USB interrupt", KSTAT_DATA_UINT64); 4519 4520 EHCI_INTRS_STATS(ehcip)->ks_private = ehcip; 4521 EHCI_INTRS_STATS(ehcip)->ks_update = nulldev; 4522 kstat_install(EHCI_INTRS_STATS(ehcip)); 4523 } 4524 } 4525 4526 if (EHCI_TOTAL_STATS(ehcip) == NULL) { 4527 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,total", 4528 dname, instance); 4529 EHCI_TOTAL_STATS(ehcip) = kstat_create("usba", instance, 4530 kstatname, "usb_byte_count", KSTAT_TYPE_IO, 1, 4531 KSTAT_FLAG_PERSISTENT); 4532 4533 if (EHCI_TOTAL_STATS(ehcip)) { 4534 kstat_install(EHCI_TOTAL_STATS(ehcip)); 4535 } 4536 } 4537 4538 for (i = 0; i < USB_N_COUNT_KSTATS; i++) { 4539 if (ehcip->ehci_count_stats[i] == NULL) { 4540 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,%s", 4541 dname, instance, usbtypes[i]); 4542 ehcip->ehci_count_stats[i] = kstat_create("usba", 4543 instance, kstatname, "usb_byte_count", 4544 KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 4545 4546 if (ehcip->ehci_count_stats[i]) { 4547 kstat_install(ehcip->ehci_count_stats[i]); 4548 } 4549 } 4550 } 4551 } 4552 4553 4554 /* 4555 * ehci_destroy_stats: 4556 * 4557 * Clean up ehci kstat structures 4558 */ 4559 void 4560 ehci_destroy_stats(ehci_state_t *ehcip) 4561 { 4562 int i; 4563 4564 if (EHCI_INTRS_STATS(ehcip)) { 4565 kstat_delete(EHCI_INTRS_STATS(ehcip)); 4566 EHCI_INTRS_STATS(ehcip) = NULL; 4567 } 4568 4569 if (EHCI_TOTAL_STATS(ehcip)) { 4570 kstat_delete(EHCI_TOTAL_STATS(ehcip)); 4571 EHCI_TOTAL_STATS(ehcip) = NULL; 4572 } 4573 4574 for (i = 0; i < USB_N_COUNT_KSTATS; i++) { 4575 if (ehcip->ehci_count_stats[i]) { 4576 kstat_delete(ehcip->ehci_count_stats[i]); 4577 ehcip->ehci_count_stats[i] = NULL; 4578 } 4579 } 4580 } 4581 4582 4583 /* 4584 * ehci_do_intrs_stats: 4585 * 4586 * ehci status information 4587 */ 4588 void 4589 ehci_do_intrs_stats( 4590 ehci_state_t *ehcip, 4591 int val) 4592 { 4593 if (EHCI_INTRS_STATS(ehcip)) { 4594 EHCI_INTRS_STATS_DATA(ehcip)->ehci_sts_total.value.ui64++; 4595 switch (val) { 4596 case EHCI_STS_ASYNC_SCHED_STATUS: 4597 EHCI_INTRS_STATS_DATA(ehcip)-> 4598 ehci_sts_async_sched_status.value.ui64++; 4599 break; 4600 case EHCI_STS_PERIODIC_SCHED_STATUS: 4601 EHCI_INTRS_STATS_DATA(ehcip)-> 4602 ehci_sts_periodic_sched_status.value.ui64++; 4603 break; 4604 case EHCI_STS_EMPTY_ASYNC_SCHEDULE: 4605 EHCI_INTRS_STATS_DATA(ehcip)-> 4606 ehci_sts_empty_async_schedule.value.ui64++; 4607 break; 4608 case EHCI_STS_HOST_CTRL_HALTED: 4609 EHCI_INTRS_STATS_DATA(ehcip)-> 4610 ehci_sts_host_ctrl_halted.value.ui64++; 4611 break; 4612 case EHCI_STS_ASYNC_ADVANCE_INTR: 4613 EHCI_INTRS_STATS_DATA(ehcip)-> 4614 ehci_sts_async_advance_intr.value.ui64++; 4615 break; 4616 case EHCI_STS_HOST_SYSTEM_ERROR_INTR: 4617 EHCI_INTRS_STATS_DATA(ehcip)-> 4618 ehci_sts_host_system_error_intr.value.ui64++; 4619 break; 4620 case EHCI_STS_FRM_LIST_ROLLOVER_INTR: 4621 EHCI_INTRS_STATS_DATA(ehcip)-> 4622 ehci_sts_frm_list_rollover_intr.value.ui64++; 4623 break; 4624 case EHCI_STS_RH_PORT_CHANGE_INTR: 4625 EHCI_INTRS_STATS_DATA(ehcip)-> 4626 ehci_sts_rh_port_change_intr.value.ui64++; 4627 break; 4628 case EHCI_STS_USB_ERROR_INTR: 4629 EHCI_INTRS_STATS_DATA(ehcip)-> 4630 ehci_sts_usb_error_intr.value.ui64++; 4631 break; 4632 case EHCI_STS_USB_INTR: 4633 EHCI_INTRS_STATS_DATA(ehcip)-> 4634 ehci_sts_usb_intr.value.ui64++; 4635 break; 4636 default: 4637 EHCI_INTRS_STATS_DATA(ehcip)-> 4638 ehci_sts_not_claimed.value.ui64++; 4639 break; 4640 } 4641 } 4642 } 4643 4644 4645 /* 4646 * ehci_do_byte_stats: 4647 * 4648 * ehci data xfer information 4649 */ 4650 void 4651 ehci_do_byte_stats( 4652 ehci_state_t *ehcip, 4653 size_t len, 4654 uint8_t attr, 4655 uint8_t addr) 4656 { 4657 uint8_t type = attr & USB_EP_ATTR_MASK; 4658 uint8_t dir = addr & USB_EP_DIR_MASK; 4659 4660 if (dir == USB_EP_DIR_IN) { 4661 EHCI_TOTAL_STATS_DATA(ehcip)->reads++; 4662 EHCI_TOTAL_STATS_DATA(ehcip)->nread += len; 4663 switch (type) { 4664 case USB_EP_ATTR_CONTROL: 4665 EHCI_CTRL_STATS(ehcip)->reads++; 4666 EHCI_CTRL_STATS(ehcip)->nread += len; 4667 break; 4668 case USB_EP_ATTR_BULK: 4669 EHCI_BULK_STATS(ehcip)->reads++; 4670 EHCI_BULK_STATS(ehcip)->nread += len; 4671 break; 4672 case USB_EP_ATTR_INTR: 4673 EHCI_INTR_STATS(ehcip)->reads++; 4674 EHCI_INTR_STATS(ehcip)->nread += len; 4675 break; 4676 case USB_EP_ATTR_ISOCH: 4677 EHCI_ISOC_STATS(ehcip)->reads++; 4678 EHCI_ISOC_STATS(ehcip)->nread += len; 4679 break; 4680 } 4681 } else if (dir == USB_EP_DIR_OUT) { 4682 EHCI_TOTAL_STATS_DATA(ehcip)->writes++; 4683 EHCI_TOTAL_STATS_DATA(ehcip)->nwritten += len; 4684 switch (type) { 4685 case USB_EP_ATTR_CONTROL: 4686 EHCI_CTRL_STATS(ehcip)->writes++; 4687 EHCI_CTRL_STATS(ehcip)->nwritten += len; 4688 break; 4689 case USB_EP_ATTR_BULK: 4690 EHCI_BULK_STATS(ehcip)->writes++; 4691 EHCI_BULK_STATS(ehcip)->nwritten += len; 4692 break; 4693 case USB_EP_ATTR_INTR: 4694 EHCI_INTR_STATS(ehcip)->writes++; 4695 EHCI_INTR_STATS(ehcip)->nwritten += len; 4696 break; 4697 case USB_EP_ATTR_ISOCH: 4698 EHCI_ISOC_STATS(ehcip)->writes++; 4699 EHCI_ISOC_STATS(ehcip)->nwritten += len; 4700 break; 4701 } 4702 } 4703 } 4704