1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 26 27 /* 28 * EHCI Host Controller Driver (EHCI) 29 * 30 * The EHCI driver is a software driver which interfaces to the Universal 31 * Serial Bus layer (USBA) and the Host Controller (HC). The interface to 32 * the Host Controller is defined by the EHCI Host Controller Interface. 33 * 34 * This module contains the main EHCI driver code which handles all USB 35 * transfers, bandwidth allocations and other general functionalities. 36 */ 37 38 #include <sys/usb/hcd/ehci/ehcid.h> 39 #include <sys/usb/hcd/ehci/ehci_isoch.h> 40 #include <sys/usb/hcd/ehci/ehci_xfer.h> 41 42 /* 43 * EHCI MSI tunable: 44 * 45 * By default MSI is enabled on all supported platforms except for the 46 * EHCI controller of ULI1575 South bridge. 47 */ 48 boolean_t ehci_enable_msi = B_TRUE; 49 50 /* Pointer to the state structure */ 51 extern void *ehci_statep; 52 53 extern void ehci_handle_endpoint_reclaimation(ehci_state_t *); 54 55 extern uint_t ehci_vt62x2_workaround; 56 extern int force_ehci_off; 57 58 /* Adjustable variables for the size of the pools */ 59 int ehci_qh_pool_size = EHCI_QH_POOL_SIZE; 60 int ehci_qtd_pool_size = EHCI_QTD_POOL_SIZE; 61 62 /* 63 * Initialize the values which the order of 32ms intr qh are executed 64 * by the host controller in the lattice tree. 65 */ 66 static uchar_t ehci_index[EHCI_NUM_INTR_QH_LISTS] = 67 {0x00, 0x10, 0x08, 0x18, 68 0x04, 0x14, 0x0c, 0x1c, 69 0x02, 0x12, 0x0a, 0x1a, 70 0x06, 0x16, 0x0e, 0x1e, 71 0x01, 0x11, 0x09, 0x19, 72 0x05, 0x15, 0x0d, 0x1d, 73 0x03, 0x13, 0x0b, 0x1b, 74 0x07, 0x17, 0x0f, 0x1f}; 75 76 /* 77 * Initialize the values which are used to calculate start split mask 78 * for the low/full/high speed interrupt and isochronous endpoints. 79 */ 80 static uint_t ehci_start_split_mask[15] = { 81 /* 82 * For high/full/low speed usb devices. For high speed 83 * device with polling interval greater than or equal 84 * to 8us (125us). 85 */ 86 0x01, /* 00000001 */ 87 0x02, /* 00000010 */ 88 0x04, /* 00000100 */ 89 0x08, /* 00001000 */ 90 0x10, /* 00010000 */ 91 0x20, /* 00100000 */ 92 0x40, /* 01000000 */ 93 0x80, /* 10000000 */ 94 95 /* Only for high speed devices with polling interval 4us */ 96 0x11, /* 00010001 */ 97 0x22, /* 00100010 */ 98 0x44, /* 01000100 */ 99 0x88, /* 10001000 */ 100 101 /* Only for high speed devices with polling interval 2us */ 102 0x55, /* 01010101 */ 103 0xaa, /* 10101010 */ 104 105 /* Only for high speed devices with polling interval 1us */ 106 0xff /* 11111111 */ 107 }; 108 109 /* 110 * Initialize the values which are used to calculate complete split mask 111 * for the low/full speed interrupt and isochronous endpoints. 112 */ 113 static uint_t ehci_intr_complete_split_mask[7] = { 114 /* Only full/low speed devices */ 115 0x1c, /* 00011100 */ 116 0x38, /* 00111000 */ 117 0x70, /* 01110000 */ 118 0xe0, /* 11100000 */ 119 0x00, /* Need FSTN feature */ 120 0x00, /* Need FSTN feature */ 121 0x00 /* Need FSTN feature */ 122 }; 123 124 125 /* 126 * EHCI Internal Function Prototypes 127 */ 128 129 /* Host Controller Driver (HCD) initialization functions */ 130 void ehci_set_dma_attributes(ehci_state_t *ehcip); 131 int ehci_allocate_pools(ehci_state_t *ehcip); 132 void ehci_decode_ddi_dma_addr_bind_handle_result( 133 ehci_state_t *ehcip, 134 int result); 135 int ehci_map_regs(ehci_state_t *ehcip); 136 int ehci_register_intrs_and_init_mutex( 137 ehci_state_t *ehcip); 138 static int ehci_add_intrs(ehci_state_t *ehcip, 139 int intr_type); 140 int ehci_init_ctlr(ehci_state_t *ehcip, 141 int init_type); 142 static int ehci_take_control(ehci_state_t *ehcip); 143 static int ehci_init_periodic_frame_lst_table( 144 ehci_state_t *ehcip); 145 static void ehci_build_interrupt_lattice( 146 ehci_state_t *ehcip); 147 usba_hcdi_ops_t *ehci_alloc_hcdi_ops(ehci_state_t *ehcip); 148 149 /* Host Controller Driver (HCD) deinitialization functions */ 150 int ehci_cleanup(ehci_state_t *ehcip); 151 static void ehci_rem_intrs(ehci_state_t *ehcip); 152 int ehci_cpr_suspend(ehci_state_t *ehcip); 153 int ehci_cpr_resume(ehci_state_t *ehcip); 154 155 /* Bandwidth Allocation functions */ 156 int ehci_allocate_bandwidth(ehci_state_t *ehcip, 157 usba_pipe_handle_data_t *ph, 158 uint_t *pnode, 159 uchar_t *smask, 160 uchar_t *cmask); 161 static int ehci_allocate_high_speed_bandwidth( 162 ehci_state_t *ehcip, 163 usba_pipe_handle_data_t *ph, 164 uint_t *hnode, 165 uchar_t *smask, 166 uchar_t *cmask); 167 static int ehci_allocate_classic_tt_bandwidth( 168 ehci_state_t *ehcip, 169 usba_pipe_handle_data_t *ph, 170 uint_t pnode); 171 void ehci_deallocate_bandwidth(ehci_state_t *ehcip, 172 usba_pipe_handle_data_t *ph, 173 uint_t pnode, 174 uchar_t smask, 175 uchar_t cmask); 176 static void ehci_deallocate_high_speed_bandwidth( 177 ehci_state_t *ehcip, 178 usba_pipe_handle_data_t *ph, 179 uint_t hnode, 180 uchar_t smask, 181 uchar_t cmask); 182 static void ehci_deallocate_classic_tt_bandwidth( 183 ehci_state_t *ehcip, 184 usba_pipe_handle_data_t *ph, 185 uint_t pnode); 186 static int ehci_compute_high_speed_bandwidth( 187 ehci_state_t *ehcip, 188 usb_ep_descr_t *endpoint, 189 usb_port_status_t port_status, 190 uint_t *sbandwidth, 191 uint_t *cbandwidth); 192 static int ehci_compute_classic_bandwidth( 193 usb_ep_descr_t *endpoint, 194 usb_port_status_t port_status, 195 uint_t *bandwidth); 196 int ehci_adjust_polling_interval( 197 ehci_state_t *ehcip, 198 usb_ep_descr_t *endpoint, 199 usb_port_status_t port_status); 200 static int ehci_adjust_high_speed_polling_interval( 201 ehci_state_t *ehcip, 202 usb_ep_descr_t *endpoint); 203 static uint_t ehci_lattice_height(uint_t interval); 204 static uint_t ehci_lattice_parent(uint_t node); 205 static uint_t ehci_find_periodic_node( 206 uint_t leaf, 207 int interval); 208 static uint_t ehci_leftmost_leaf(uint_t node, 209 uint_t height); 210 static uint_t ehci_pow_2(uint_t x); 211 static uint_t ehci_log_2(uint_t x); 212 static int ehci_find_bestfit_hs_mask( 213 ehci_state_t *ehcip, 214 uchar_t *smask, 215 uint_t *pnode, 216 usb_ep_descr_t *endpoint, 217 uint_t bandwidth, 218 int interval); 219 static int ehci_find_bestfit_ls_intr_mask( 220 ehci_state_t *ehcip, 221 uchar_t *smask, 222 uchar_t *cmask, 223 uint_t *pnode, 224 uint_t sbandwidth, 225 uint_t cbandwidth, 226 int interval); 227 static int ehci_find_bestfit_sitd_in_mask( 228 ehci_state_t *ehcip, 229 uchar_t *smask, 230 uchar_t *cmask, 231 uint_t *pnode, 232 uint_t sbandwidth, 233 uint_t cbandwidth, 234 int interval); 235 static int ehci_find_bestfit_sitd_out_mask( 236 ehci_state_t *ehcip, 237 uchar_t *smask, 238 uint_t *pnode, 239 uint_t sbandwidth, 240 int interval); 241 static uint_t ehci_calculate_bw_availability_mask( 242 ehci_state_t *ehcip, 243 uint_t bandwidth, 244 int leaf, 245 int leaf_count, 246 uchar_t *bw_mask); 247 static void ehci_update_bw_availability( 248 ehci_state_t *ehcip, 249 int bandwidth, 250 int leftmost_leaf, 251 int leaf_count, 252 uchar_t mask); 253 254 /* Miscellaneous functions */ 255 ehci_state_t *ehci_obtain_state( 256 dev_info_t *dip); 257 int ehci_state_is_operational( 258 ehci_state_t *ehcip); 259 int ehci_do_soft_reset( 260 ehci_state_t *ehcip); 261 usb_req_attrs_t ehci_get_xfer_attrs(ehci_state_t *ehcip, 262 ehci_pipe_private_t *pp, 263 ehci_trans_wrapper_t *tw); 264 usb_frame_number_t ehci_get_current_frame_number( 265 ehci_state_t *ehcip); 266 static void ehci_cpr_cleanup( 267 ehci_state_t *ehcip); 268 int ehci_wait_for_sof( 269 ehci_state_t *ehcip); 270 void ehci_toggle_scheduler( 271 ehci_state_t *ehcip); 272 void ehci_print_caps(ehci_state_t *ehcip); 273 void ehci_print_regs(ehci_state_t *ehcip); 274 void ehci_print_qh(ehci_state_t *ehcip, 275 ehci_qh_t *qh); 276 void ehci_print_qtd(ehci_state_t *ehcip, 277 ehci_qtd_t *qtd); 278 void ehci_create_stats(ehci_state_t *ehcip); 279 void ehci_destroy_stats(ehci_state_t *ehcip); 280 void ehci_do_intrs_stats(ehci_state_t *ehcip, 281 int val); 282 void ehci_do_byte_stats(ehci_state_t *ehcip, 283 size_t len, 284 uint8_t attr, 285 uint8_t addr); 286 287 /* 288 * check if this ehci controller can support PM 289 */ 290 int 291 ehci_hcdi_pm_support(dev_info_t *dip) 292 { 293 ehci_state_t *ehcip = ddi_get_soft_state(ehci_statep, 294 ddi_get_instance(dip)); 295 296 if (((ehcip->ehci_vendor_id == PCI_VENDOR_NEC_COMBO) && 297 (ehcip->ehci_device_id == PCI_DEVICE_NEC_COMBO)) || 298 299 ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) && 300 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) || 301 302 (ehcip->ehci_vendor_id == PCI_VENDOR_VIA)) { 303 304 return (USB_SUCCESS); 305 } 306 307 return (USB_FAILURE); 308 } 309 310 void 311 ehci_dma_attr_workaround(ehci_state_t *ehcip) 312 { 313 /* 314 * Some Nvidia chips can not handle qh dma address above 2G. 315 * The bit 31 of the dma address might be omitted and it will 316 * cause system crash or other unpredicable result. So force 317 * the dma address allocated below 2G to make ehci work. 318 */ 319 if (PCI_VENDOR_NVIDIA == ehcip->ehci_vendor_id) { 320 switch (ehcip->ehci_device_id) { 321 case PCI_DEVICE_NVIDIA_CK804: 322 case PCI_DEVICE_NVIDIA_MCP04: 323 USB_DPRINTF_L2(PRINT_MASK_ATTA, 324 ehcip->ehci_log_hdl, 325 "ehci_dma_attr_workaround: NVIDIA dma " 326 "workaround enabled, force dma address " 327 "to be allocated below 2G"); 328 ehcip->ehci_dma_attr.dma_attr_addr_hi = 329 0x7fffffffull; 330 break; 331 default: 332 break; 333 334 } 335 } 336 } 337 338 /* 339 * Host Controller Driver (HCD) initialization functions 340 */ 341 342 /* 343 * ehci_set_dma_attributes: 344 * 345 * Set the limits in the DMA attributes structure. Most of the values used 346 * in the DMA limit structures are the default values as specified by the 347 * Writing PCI device drivers document. 348 */ 349 void 350 ehci_set_dma_attributes(ehci_state_t *ehcip) 351 { 352 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 353 "ehci_set_dma_attributes:"); 354 355 /* Initialize the DMA attributes */ 356 ehcip->ehci_dma_attr.dma_attr_version = DMA_ATTR_V0; 357 ehcip->ehci_dma_attr.dma_attr_addr_lo = 0x00000000ull; 358 ehcip->ehci_dma_attr.dma_attr_addr_hi = 0xfffffffeull; 359 360 /* 32 bit addressing */ 361 ehcip->ehci_dma_attr.dma_attr_count_max = EHCI_DMA_ATTR_COUNT_MAX; 362 363 /* Byte alignment */ 364 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 365 366 /* 367 * Since PCI specification is byte alignment, the 368 * burst size field should be set to 1 for PCI devices. 369 */ 370 ehcip->ehci_dma_attr.dma_attr_burstsizes = 0x1; 371 372 ehcip->ehci_dma_attr.dma_attr_minxfer = 0x1; 373 ehcip->ehci_dma_attr.dma_attr_maxxfer = EHCI_DMA_ATTR_MAX_XFER; 374 ehcip->ehci_dma_attr.dma_attr_seg = 0xffffffffull; 375 ehcip->ehci_dma_attr.dma_attr_sgllen = 1; 376 ehcip->ehci_dma_attr.dma_attr_granular = EHCI_DMA_ATTR_GRANULAR; 377 ehcip->ehci_dma_attr.dma_attr_flags = 0; 378 ehci_dma_attr_workaround(ehcip); 379 } 380 381 382 /* 383 * ehci_allocate_pools: 384 * 385 * Allocate the system memory for the Endpoint Descriptor (QH) and for the 386 * Transfer Descriptor (QTD) pools. Both QH and QTD structures must be aligned 387 * to a 16 byte boundary. 388 */ 389 int 390 ehci_allocate_pools(ehci_state_t *ehcip) 391 { 392 ddi_device_acc_attr_t dev_attr; 393 size_t real_length; 394 int result; 395 uint_t ccount; 396 int i; 397 398 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 399 "ehci_allocate_pools:"); 400 401 /* The host controller will be little endian */ 402 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 403 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 404 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 405 406 /* Byte alignment */ 407 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_TD_QH_ALIGNMENT; 408 409 /* Allocate the QTD pool DMA handle */ 410 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr, 411 DDI_DMA_SLEEP, 0, 412 &ehcip->ehci_qtd_pool_dma_handle) != DDI_SUCCESS) { 413 414 goto failure; 415 } 416 417 /* Allocate the memory for the QTD pool */ 418 if (ddi_dma_mem_alloc(ehcip->ehci_qtd_pool_dma_handle, 419 ehci_qtd_pool_size * sizeof (ehci_qtd_t), 420 &dev_attr, 421 DDI_DMA_CONSISTENT, 422 DDI_DMA_SLEEP, 423 0, 424 (caddr_t *)&ehcip->ehci_qtd_pool_addr, 425 &real_length, 426 &ehcip->ehci_qtd_pool_mem_handle)) { 427 428 goto failure; 429 } 430 431 /* Map the QTD pool into the I/O address space */ 432 result = ddi_dma_addr_bind_handle( 433 ehcip->ehci_qtd_pool_dma_handle, 434 NULL, 435 (caddr_t)ehcip->ehci_qtd_pool_addr, 436 real_length, 437 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 438 DDI_DMA_SLEEP, 439 NULL, 440 &ehcip->ehci_qtd_pool_cookie, 441 &ccount); 442 443 bzero((void *)ehcip->ehci_qtd_pool_addr, 444 ehci_qtd_pool_size * sizeof (ehci_qtd_t)); 445 446 /* Process the result */ 447 if (result == DDI_DMA_MAPPED) { 448 /* The cookie count should be 1 */ 449 if (ccount != 1) { 450 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 451 "ehci_allocate_pools: More than 1 cookie"); 452 453 goto failure; 454 } 455 } else { 456 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 457 "ehci_allocate_pools: Result = %d", result); 458 459 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result); 460 461 goto failure; 462 } 463 464 /* 465 * DMA addresses for QTD pools are bound 466 */ 467 ehcip->ehci_dma_addr_bind_flag |= EHCI_QTD_POOL_BOUND; 468 469 /* Initialize the QTD pool */ 470 for (i = 0; i < ehci_qtd_pool_size; i ++) { 471 Set_QTD(ehcip->ehci_qtd_pool_addr[i]. 472 qtd_state, EHCI_QTD_FREE); 473 } 474 475 /* Allocate the QTD pool DMA handle */ 476 if (ddi_dma_alloc_handle(ehcip->ehci_dip, 477 &ehcip->ehci_dma_attr, 478 DDI_DMA_SLEEP, 479 0, 480 &ehcip->ehci_qh_pool_dma_handle) != DDI_SUCCESS) { 481 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 482 "ehci_allocate_pools: ddi_dma_alloc_handle failed"); 483 484 goto failure; 485 } 486 487 /* Allocate the memory for the QH pool */ 488 if (ddi_dma_mem_alloc(ehcip->ehci_qh_pool_dma_handle, 489 ehci_qh_pool_size * sizeof (ehci_qh_t), 490 &dev_attr, 491 DDI_DMA_CONSISTENT, 492 DDI_DMA_SLEEP, 493 0, 494 (caddr_t *)&ehcip->ehci_qh_pool_addr, 495 &real_length, 496 &ehcip->ehci_qh_pool_mem_handle) != DDI_SUCCESS) { 497 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 498 "ehci_allocate_pools: ddi_dma_mem_alloc failed"); 499 500 goto failure; 501 } 502 503 result = ddi_dma_addr_bind_handle(ehcip->ehci_qh_pool_dma_handle, 504 NULL, 505 (caddr_t)ehcip->ehci_qh_pool_addr, 506 real_length, 507 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 508 DDI_DMA_SLEEP, 509 NULL, 510 &ehcip->ehci_qh_pool_cookie, 511 &ccount); 512 513 bzero((void *)ehcip->ehci_qh_pool_addr, 514 ehci_qh_pool_size * sizeof (ehci_qh_t)); 515 516 /* Process the result */ 517 if (result == DDI_DMA_MAPPED) { 518 /* The cookie count should be 1 */ 519 if (ccount != 1) { 520 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 521 "ehci_allocate_pools: More than 1 cookie"); 522 523 goto failure; 524 } 525 } else { 526 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result); 527 528 goto failure; 529 } 530 531 /* 532 * DMA addresses for QH pools are bound 533 */ 534 ehcip->ehci_dma_addr_bind_flag |= EHCI_QH_POOL_BOUND; 535 536 /* Initialize the QH pool */ 537 for (i = 0; i < ehci_qh_pool_size; i ++) { 538 Set_QH(ehcip->ehci_qh_pool_addr[i].qh_state, EHCI_QH_FREE); 539 } 540 541 /* Byte alignment */ 542 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 543 544 return (DDI_SUCCESS); 545 546 failure: 547 /* Byte alignment */ 548 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 549 550 return (DDI_FAILURE); 551 } 552 553 554 /* 555 * ehci_decode_ddi_dma_addr_bind_handle_result: 556 * 557 * Process the return values of ddi_dma_addr_bind_handle() 558 */ 559 void 560 ehci_decode_ddi_dma_addr_bind_handle_result( 561 ehci_state_t *ehcip, 562 int result) 563 { 564 USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl, 565 "ehci_decode_ddi_dma_addr_bind_handle_result:"); 566 567 switch (result) { 568 case DDI_DMA_PARTIAL_MAP: 569 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 570 "Partial transfers not allowed"); 571 break; 572 case DDI_DMA_INUSE: 573 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 574 "Handle is in use"); 575 break; 576 case DDI_DMA_NORESOURCES: 577 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 578 "No resources"); 579 break; 580 case DDI_DMA_NOMAPPING: 581 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 582 "No mapping"); 583 break; 584 case DDI_DMA_TOOBIG: 585 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 586 "Object is too big"); 587 break; 588 default: 589 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 590 "Unknown dma error"); 591 } 592 } 593 594 595 /* 596 * ehci_map_regs: 597 * 598 * The Host Controller (HC) contains a set of on-chip operational registers 599 * and which should be mapped into a non-cacheable portion of the system 600 * addressable space. 601 */ 602 int 603 ehci_map_regs(ehci_state_t *ehcip) 604 { 605 ddi_device_acc_attr_t attr; 606 uint16_t cmd_reg; 607 uint_t length; 608 609 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_map_regs:"); 610 611 /* Check to make sure we have memory access */ 612 if (pci_config_setup(ehcip->ehci_dip, 613 &ehcip->ehci_config_handle) != DDI_SUCCESS) { 614 615 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 616 "ehci_map_regs: Config error"); 617 618 return (DDI_FAILURE); 619 } 620 621 /* Make sure Memory Access Enable is set */ 622 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM); 623 624 if (!(cmd_reg & PCI_COMM_MAE)) { 625 626 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 627 "ehci_map_regs: Memory base address access disabled"); 628 629 return (DDI_FAILURE); 630 } 631 632 /* The host controller will be little endian */ 633 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 634 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 635 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 636 637 /* Map in EHCI Capability registers */ 638 if (ddi_regs_map_setup(ehcip->ehci_dip, 1, 639 (caddr_t *)&ehcip->ehci_capsp, 0, 640 sizeof (ehci_caps_t), &attr, 641 &ehcip->ehci_caps_handle) != DDI_SUCCESS) { 642 643 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 644 "ehci_map_regs: Map setup error"); 645 646 return (DDI_FAILURE); 647 } 648 649 length = ddi_get8(ehcip->ehci_caps_handle, 650 (uint8_t *)&ehcip->ehci_capsp->ehci_caps_length); 651 652 /* Free the original mapping */ 653 ddi_regs_map_free(&ehcip->ehci_caps_handle); 654 655 /* Re-map in EHCI Capability and Operational registers */ 656 if (ddi_regs_map_setup(ehcip->ehci_dip, 1, 657 (caddr_t *)&ehcip->ehci_capsp, 0, 658 length + sizeof (ehci_regs_t), &attr, 659 &ehcip->ehci_caps_handle) != DDI_SUCCESS) { 660 661 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 662 "ehci_map_regs: Map setup error"); 663 664 return (DDI_FAILURE); 665 } 666 667 /* Get the pointer to EHCI Operational Register */ 668 ehcip->ehci_regsp = (ehci_regs_t *) 669 ((uintptr_t)ehcip->ehci_capsp + length); 670 671 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 672 "ehci_map_regs: Capsp 0x%p Regsp 0x%p\n", 673 (void *)ehcip->ehci_capsp, (void *)ehcip->ehci_regsp); 674 675 return (DDI_SUCCESS); 676 } 677 678 /* 679 * The following simulated polling is for debugging purposes only. 680 * It is activated on x86 by setting usb-polling=true in GRUB or ehci.conf. 681 */ 682 static int 683 ehci_is_polled(dev_info_t *dip) 684 { 685 int ret; 686 char *propval; 687 688 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, 689 "usb-polling", &propval) != DDI_SUCCESS) 690 691 return (0); 692 693 ret = (strcmp(propval, "true") == 0); 694 ddi_prop_free(propval); 695 696 return (ret); 697 } 698 699 static void 700 ehci_poll_intr(void *arg) 701 { 702 /* poll every msec */ 703 for (;;) { 704 (void) ehci_intr(arg, NULL); 705 delay(drv_usectohz(1000)); 706 } 707 } 708 709 /* 710 * ehci_register_intrs_and_init_mutex: 711 * 712 * Register interrupts and initialize each mutex and condition variables 713 */ 714 int 715 ehci_register_intrs_and_init_mutex(ehci_state_t *ehcip) 716 { 717 int intr_types; 718 719 #if defined(__x86) 720 uint8_t iline; 721 #endif 722 723 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 724 "ehci_register_intrs_and_init_mutex:"); 725 726 /* 727 * There is a known MSI hardware bug with the EHCI controller 728 * of ULI1575 southbridge. Hence MSI is disabled for this chip. 729 */ 730 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) && 731 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) { 732 ehcip->ehci_msi_enabled = B_FALSE; 733 } else { 734 /* Set the MSI enable flag from the global EHCI MSI tunable */ 735 ehcip->ehci_msi_enabled = ehci_enable_msi; 736 } 737 738 /* launch polling thread instead of enabling pci interrupt */ 739 if (ehci_is_polled(ehcip->ehci_dip)) { 740 extern pri_t maxclsyspri; 741 742 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 743 "ehci_register_intrs_and_init_mutex: " 744 "running in simulated polled mode"); 745 746 (void) thread_create(NULL, 0, ehci_poll_intr, ehcip, 0, &p0, 747 TS_RUN, maxclsyspri); 748 749 goto skip_intr; 750 } 751 752 #if defined(__x86) 753 /* 754 * Make sure that the interrupt pin is connected to the 755 * interrupt controller on x86. Interrupt line 255 means 756 * "unknown" or "not connected" (PCI spec 6.2.4, footnote 43). 757 * If we would return failure when interrupt line equals 255, then 758 * high speed devices will be routed to companion host controllers. 759 * However, it is not necessary to return failure here, and 760 * o/uhci codes don't check the interrupt line either. 761 * But it's good to log a message here for debug purposes. 762 */ 763 iline = pci_config_get8(ehcip->ehci_config_handle, 764 PCI_CONF_ILINE); 765 766 if (iline == 255) { 767 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 768 "ehci_register_intrs_and_init_mutex: " 769 "interrupt line value out of range (%d)", 770 iline); 771 } 772 #endif /* __x86 */ 773 774 /* Get supported interrupt types */ 775 if (ddi_intr_get_supported_types(ehcip->ehci_dip, 776 &intr_types) != DDI_SUCCESS) { 777 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 778 "ehci_register_intrs_and_init_mutex: " 779 "ddi_intr_get_supported_types failed"); 780 781 return (DDI_FAILURE); 782 } 783 784 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 785 "ehci_register_intrs_and_init_mutex: " 786 "supported interrupt types 0x%x", intr_types); 787 788 if ((intr_types & DDI_INTR_TYPE_MSI) && ehcip->ehci_msi_enabled) { 789 if (ehci_add_intrs(ehcip, DDI_INTR_TYPE_MSI) 790 != DDI_SUCCESS) { 791 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 792 "ehci_register_intrs_and_init_mutex: MSI " 793 "registration failed, trying FIXED interrupt \n"); 794 } else { 795 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 796 "ehci_register_intrs_and_init_mutex: " 797 "Using MSI interrupt type\n"); 798 799 ehcip->ehci_intr_type = DDI_INTR_TYPE_MSI; 800 ehcip->ehci_flags |= EHCI_INTR; 801 } 802 } 803 804 if ((!(ehcip->ehci_flags & EHCI_INTR)) && 805 (intr_types & DDI_INTR_TYPE_FIXED)) { 806 if (ehci_add_intrs(ehcip, DDI_INTR_TYPE_FIXED) 807 != DDI_SUCCESS) { 808 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 809 "ehci_register_intrs_and_init_mutex: " 810 "FIXED interrupt registration failed\n"); 811 812 return (DDI_FAILURE); 813 } 814 815 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 816 "ehci_register_intrs_and_init_mutex: " 817 "Using FIXED interrupt type\n"); 818 819 ehcip->ehci_intr_type = DDI_INTR_TYPE_FIXED; 820 ehcip->ehci_flags |= EHCI_INTR; 821 } 822 823 skip_intr: 824 /* Create prototype for advance on async schedule */ 825 cv_init(&ehcip->ehci_async_schedule_advance_cv, 826 NULL, CV_DRIVER, NULL); 827 828 return (DDI_SUCCESS); 829 } 830 831 832 /* 833 * ehci_add_intrs: 834 * 835 * Register FIXED or MSI interrupts. 836 */ 837 static int 838 ehci_add_intrs(ehci_state_t *ehcip, 839 int intr_type) 840 { 841 int actual, avail, intr_size, count = 0; 842 int i, flag, ret; 843 844 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 845 "ehci_add_intrs: interrupt type 0x%x", intr_type); 846 847 /* Get number of interrupts */ 848 ret = ddi_intr_get_nintrs(ehcip->ehci_dip, intr_type, &count); 849 if ((ret != DDI_SUCCESS) || (count == 0)) { 850 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 851 "ehci_add_intrs: ddi_intr_get_nintrs() failure, " 852 "ret: %d, count: %d", ret, count); 853 854 return (DDI_FAILURE); 855 } 856 857 /* Get number of available interrupts */ 858 ret = ddi_intr_get_navail(ehcip->ehci_dip, intr_type, &avail); 859 if ((ret != DDI_SUCCESS) || (avail == 0)) { 860 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 861 "ehci_add_intrs: ddi_intr_get_navail() failure, " 862 "ret: %d, count: %d", ret, count); 863 864 return (DDI_FAILURE); 865 } 866 867 if (avail < count) { 868 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 869 "ehci_add_intrs: ehci_add_intrs: nintrs () " 870 "returned %d, navail returned %d\n", count, avail); 871 } 872 873 /* Allocate an array of interrupt handles */ 874 intr_size = count * sizeof (ddi_intr_handle_t); 875 ehcip->ehci_htable = kmem_zalloc(intr_size, KM_SLEEP); 876 877 flag = (intr_type == DDI_INTR_TYPE_MSI) ? 878 DDI_INTR_ALLOC_STRICT:DDI_INTR_ALLOC_NORMAL; 879 880 /* call ddi_intr_alloc() */ 881 ret = ddi_intr_alloc(ehcip->ehci_dip, ehcip->ehci_htable, 882 intr_type, 0, count, &actual, flag); 883 884 if ((ret != DDI_SUCCESS) || (actual == 0)) { 885 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 886 "ehci_add_intrs: ddi_intr_alloc() failed %d", ret); 887 888 kmem_free(ehcip->ehci_htable, intr_size); 889 890 return (DDI_FAILURE); 891 } 892 893 if (actual < count) { 894 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 895 "ehci_add_intrs: Requested: %d, Received: %d\n", 896 count, actual); 897 898 for (i = 0; i < actual; i++) 899 (void) ddi_intr_free(ehcip->ehci_htable[i]); 900 901 kmem_free(ehcip->ehci_htable, intr_size); 902 903 return (DDI_FAILURE); 904 } 905 906 ehcip->ehci_intr_cnt = actual; 907 908 if ((ret = ddi_intr_get_pri(ehcip->ehci_htable[0], 909 &ehcip->ehci_intr_pri)) != DDI_SUCCESS) { 910 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 911 "ehci_add_intrs: ddi_intr_get_pri() failed %d", ret); 912 913 for (i = 0; i < actual; i++) 914 (void) ddi_intr_free(ehcip->ehci_htable[i]); 915 916 kmem_free(ehcip->ehci_htable, intr_size); 917 918 return (DDI_FAILURE); 919 } 920 921 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 922 "ehci_add_intrs: Supported Interrupt priority 0x%x", 923 ehcip->ehci_intr_pri); 924 925 /* Test for high level mutex */ 926 if (ehcip->ehci_intr_pri >= ddi_intr_get_hilevel_pri()) { 927 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 928 "ehci_add_intrs: Hi level interrupt not supported"); 929 930 for (i = 0; i < actual; i++) 931 (void) ddi_intr_free(ehcip->ehci_htable[i]); 932 933 kmem_free(ehcip->ehci_htable, intr_size); 934 935 return (DDI_FAILURE); 936 } 937 938 /* Initialize the mutex */ 939 mutex_init(&ehcip->ehci_int_mutex, NULL, MUTEX_DRIVER, 940 DDI_INTR_PRI(ehcip->ehci_intr_pri)); 941 942 /* Call ddi_intr_add_handler() */ 943 for (i = 0; i < actual; i++) { 944 if ((ret = ddi_intr_add_handler(ehcip->ehci_htable[i], 945 ehci_intr, (caddr_t)ehcip, 946 (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 947 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 948 "ehci_add_intrs:ddi_intr_add_handler() " 949 "failed %d", ret); 950 951 for (i = 0; i < actual; i++) 952 (void) ddi_intr_free(ehcip->ehci_htable[i]); 953 954 mutex_destroy(&ehcip->ehci_int_mutex); 955 kmem_free(ehcip->ehci_htable, intr_size); 956 957 return (DDI_FAILURE); 958 } 959 } 960 961 if ((ret = ddi_intr_get_cap(ehcip->ehci_htable[0], 962 &ehcip->ehci_intr_cap)) != DDI_SUCCESS) { 963 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 964 "ehci_add_intrs: ddi_intr_get_cap() failed %d", ret); 965 966 for (i = 0; i < actual; i++) { 967 (void) ddi_intr_remove_handler(ehcip->ehci_htable[i]); 968 (void) ddi_intr_free(ehcip->ehci_htable[i]); 969 } 970 971 mutex_destroy(&ehcip->ehci_int_mutex); 972 kmem_free(ehcip->ehci_htable, intr_size); 973 974 return (DDI_FAILURE); 975 } 976 977 /* Enable all interrupts */ 978 if (ehcip->ehci_intr_cap & DDI_INTR_FLAG_BLOCK) { 979 /* Call ddi_intr_block_enable() for MSI interrupts */ 980 (void) ddi_intr_block_enable(ehcip->ehci_htable, 981 ehcip->ehci_intr_cnt); 982 } else { 983 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 984 for (i = 0; i < ehcip->ehci_intr_cnt; i++) 985 (void) ddi_intr_enable(ehcip->ehci_htable[i]); 986 } 987 988 return (DDI_SUCCESS); 989 } 990 991 992 /* 993 * ehci_init_hardware 994 * 995 * take control from BIOS, reset EHCI host controller, and check version, etc. 996 */ 997 int 998 ehci_init_hardware(ehci_state_t *ehcip) 999 { 1000 int revision; 1001 uint16_t cmd_reg; 1002 int abort_on_BIOS_take_over_failure; 1003 1004 /* Take control from the BIOS */ 1005 if (ehci_take_control(ehcip) != USB_SUCCESS) { 1006 1007 /* read .conf file properties */ 1008 abort_on_BIOS_take_over_failure = 1009 ddi_prop_get_int(DDI_DEV_T_ANY, 1010 ehcip->ehci_dip, DDI_PROP_DONTPASS, 1011 "abort-on-BIOS-take-over-failure", 0); 1012 1013 if (abort_on_BIOS_take_over_failure) { 1014 1015 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1016 "Unable to take control from BIOS."); 1017 1018 return (DDI_FAILURE); 1019 } 1020 1021 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1022 "Unable to take control from BIOS. Failure is ignored."); 1023 } 1024 1025 /* set Memory Master Enable */ 1026 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM); 1027 cmd_reg |= (PCI_COMM_MAE | PCI_COMM_ME); 1028 pci_config_put16(ehcip->ehci_config_handle, PCI_CONF_COMM, cmd_reg); 1029 1030 /* Reset the EHCI host controller */ 1031 Set_OpReg(ehci_command, 1032 Get_OpReg(ehci_command) | EHCI_CMD_HOST_CTRL_RESET); 1033 1034 /* Wait 10ms for reset to complete */ 1035 drv_usecwait(EHCI_RESET_TIMEWAIT); 1036 1037 ASSERT(Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED); 1038 1039 /* Verify the version number */ 1040 revision = Get_16Cap(ehci_version); 1041 1042 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1043 "ehci_init_hardware: Revision 0x%x", revision); 1044 1045 /* 1046 * EHCI driver supports EHCI host controllers compliant to 1047 * 0.95 and higher revisions of EHCI specifications. 1048 */ 1049 if (revision < EHCI_REVISION_0_95) { 1050 1051 USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1052 "Revision 0x%x is not supported", revision); 1053 1054 return (DDI_FAILURE); 1055 } 1056 1057 if (ehcip->ehci_hc_soft_state == EHCI_CTLR_INIT_STATE) { 1058 1059 /* Initialize the Frame list base address area */ 1060 if (ehci_init_periodic_frame_lst_table(ehcip) != DDI_SUCCESS) { 1061 1062 return (DDI_FAILURE); 1063 } 1064 1065 /* 1066 * For performance reasons, do not insert anything into the 1067 * asynchronous list or activate the asynch list schedule until 1068 * there is a valid QH. 1069 */ 1070 ehcip->ehci_head_of_async_sched_list = NULL; 1071 1072 if ((ehcip->ehci_vendor_id == PCI_VENDOR_VIA) && 1073 (ehci_vt62x2_workaround & EHCI_VIA_ASYNC_SCHEDULE)) { 1074 /* 1075 * The driver is unable to reliably stop the asynch 1076 * list schedule on VIA VT6202 controllers, so we 1077 * always keep a dummy QH on the list. 1078 */ 1079 ehci_qh_t *dummy_async_qh = 1080 ehci_alloc_qh(ehcip, NULL, NULL); 1081 1082 Set_QH(dummy_async_qh->qh_link_ptr, 1083 ((ehci_qh_cpu_to_iommu(ehcip, dummy_async_qh) & 1084 EHCI_QH_LINK_PTR) | EHCI_QH_LINK_REF_QH)); 1085 1086 /* Set this QH to be the "head" of the circular list */ 1087 Set_QH(dummy_async_qh->qh_ctrl, 1088 Get_QH(dummy_async_qh->qh_ctrl) | 1089 EHCI_QH_CTRL_RECLAIM_HEAD); 1090 1091 Set_QH(dummy_async_qh->qh_next_qtd, 1092 EHCI_QH_NEXT_QTD_PTR_VALID); 1093 Set_QH(dummy_async_qh->qh_alt_next_qtd, 1094 EHCI_QH_ALT_NEXT_QTD_PTR_VALID); 1095 1096 ehcip->ehci_head_of_async_sched_list = dummy_async_qh; 1097 ehcip->ehci_open_async_count++; 1098 ehcip->ehci_async_req_count++; 1099 } 1100 } 1101 1102 return (DDI_SUCCESS); 1103 } 1104 1105 1106 /* 1107 * ehci_init_workaround 1108 * 1109 * some workarounds during initializing ehci 1110 */ 1111 int 1112 ehci_init_workaround(ehci_state_t *ehcip) 1113 { 1114 /* 1115 * Acer Labs Inc. M5273 EHCI controller does not send 1116 * interrupts unless the Root hub ports are routed to the EHCI 1117 * host controller; so route the ports now, before we test for 1118 * the presence of SOFs interrupts. 1119 */ 1120 if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) { 1121 /* Route all Root hub ports to EHCI host controller */ 1122 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI); 1123 } 1124 1125 /* 1126 * VIA chips have some issues and may not work reliably. 1127 * Revisions >= 0x80 are part of a southbridge and appear 1128 * to be reliable with the workaround. 1129 * For revisions < 0x80, if we were bound using class 1130 * complain, else proceed. This will allow the user to 1131 * bind ehci specifically to this chip and not have the 1132 * warnings 1133 */ 1134 if (ehcip->ehci_vendor_id == PCI_VENDOR_VIA) { 1135 1136 if (ehcip->ehci_rev_id >= PCI_VIA_REVISION_6212) { 1137 1138 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1139 "ehci_init_workaround: Applying VIA workarounds " 1140 "for the 6212 chip."); 1141 1142 } else if (strcmp(DEVI(ehcip->ehci_dip)->devi_binding_name, 1143 "pciclass,0c0320") == 0) { 1144 1145 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1146 "Due to recently discovered incompatibilities"); 1147 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1148 "with this USB controller, USB2.x transfer"); 1149 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1150 "support has been disabled. This device will"); 1151 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1152 "continue to function as a USB1.x controller."); 1153 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1154 "If you are interested in enabling USB2.x"); 1155 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1156 "support please, refer to the ehci(7D) man page."); 1157 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1158 "Please also refer to www.sun.com/io for"); 1159 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1160 "Solaris Ready products and to"); 1161 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1162 "www.sun.com/bigadmin/hcl for additional"); 1163 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1164 "compatible USB products."); 1165 1166 return (DDI_FAILURE); 1167 1168 } else if (ehci_vt62x2_workaround) { 1169 1170 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1171 "Applying VIA workarounds"); 1172 } 1173 } 1174 1175 return (DDI_SUCCESS); 1176 } 1177 1178 1179 /* 1180 * ehci_init_check_status 1181 * 1182 * Check if EHCI host controller is running 1183 */ 1184 int 1185 ehci_init_check_status(ehci_state_t *ehcip) 1186 { 1187 clock_t sof_time_wait; 1188 1189 /* 1190 * Get the number of clock ticks to wait. 1191 * This is based on the maximum time it takes for a frame list rollover 1192 * and maximum time wait for SOFs to begin. 1193 */ 1194 sof_time_wait = drv_usectohz((EHCI_NUM_PERIODIC_FRAME_LISTS * 1000) + 1195 EHCI_SOF_TIMEWAIT); 1196 1197 /* Tell the ISR to broadcast ehci_async_schedule_advance_cv */ 1198 ehcip->ehci_flags |= EHCI_CV_INTR; 1199 1200 /* We need to add a delay to allow the chip time to start running */ 1201 (void) cv_reltimedwait(&ehcip->ehci_async_schedule_advance_cv, 1202 &ehcip->ehci_int_mutex, sof_time_wait, TR_CLOCK_TICK); 1203 1204 /* 1205 * Check EHCI host controller is running, otherwise return failure. 1206 */ 1207 if ((ehcip->ehci_flags & EHCI_CV_INTR) || 1208 (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) { 1209 1210 USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1211 "No SOF interrupts have been received, this USB EHCI host" 1212 "controller is unusable"); 1213 1214 /* 1215 * Route all Root hub ports to Classic host 1216 * controller, in case this is an unusable ALI M5273 1217 * EHCI controller. 1218 */ 1219 if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) { 1220 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC); 1221 } 1222 1223 return (DDI_FAILURE); 1224 } 1225 1226 return (DDI_SUCCESS); 1227 } 1228 1229 1230 /* 1231 * ehci_init_ctlr: 1232 * 1233 * Initialize the Host Controller (HC). 1234 */ 1235 int 1236 ehci_init_ctlr(ehci_state_t *ehcip, 1237 int init_type) 1238 { 1239 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_init_ctlr:"); 1240 1241 if (init_type == EHCI_NORMAL_INITIALIZATION) { 1242 1243 if (ehci_init_hardware(ehcip) != DDI_SUCCESS) { 1244 1245 return (DDI_FAILURE); 1246 } 1247 } 1248 1249 /* 1250 * Check for Asynchronous schedule park capability feature. If this 1251 * feature is supported, then, program ehci command register with 1252 * appropriate values.. 1253 */ 1254 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_ASYNC_SCHED_PARK_CAP) { 1255 1256 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1257 "ehci_init_ctlr: Async park mode is supported"); 1258 1259 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) | 1260 (EHCI_CMD_ASYNC_PARK_ENABLE | 1261 EHCI_CMD_ASYNC_PARK_COUNT_3))); 1262 } 1263 1264 /* 1265 * Check for programmable periodic frame list feature. If this 1266 * feature is supported, then, program ehci command register with 1267 * 1024 frame list value. 1268 */ 1269 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_PROG_FRAME_LIST_FLAG) { 1270 1271 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1272 "ehci_init_ctlr: Variable programmable periodic " 1273 "frame list is supported"); 1274 1275 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) | 1276 EHCI_CMD_FRAME_1024_SIZE)); 1277 } 1278 1279 /* 1280 * Currently EHCI driver doesn't support 64 bit addressing. 1281 * 1282 * If we are using 64 bit addressing capability, then, program 1283 * ehci_ctrl_segment register with 4 Gigabyte segment where all 1284 * of the interface data structures are allocated. 1285 */ 1286 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_64BIT_ADDR_CAP) { 1287 1288 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1289 "ehci_init_ctlr: EHCI driver doesn't support " 1290 "64 bit addressing"); 1291 } 1292 1293 /* 64 bit addressing is not support */ 1294 Set_OpReg(ehci_ctrl_segment, 0x00000000); 1295 1296 /* Turn on/off the schedulers */ 1297 ehci_toggle_scheduler(ehcip); 1298 1299 /* Set host controller soft state to operational */ 1300 ehcip->ehci_hc_soft_state = EHCI_CTLR_OPERATIONAL_STATE; 1301 1302 /* 1303 * Set the Periodic Frame List Base Address register with the 1304 * starting physical address of the Periodic Frame List. 1305 */ 1306 Set_OpReg(ehci_periodic_list_base, 1307 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address & 1308 EHCI_PERIODIC_LIST_BASE)); 1309 1310 /* 1311 * Set ehci_interrupt to enable all interrupts except Root 1312 * Hub Status change interrupt. 1313 */ 1314 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR | 1315 EHCI_INTR_FRAME_LIST_ROLLOVER | EHCI_INTR_USB_ERROR | 1316 EHCI_INTR_USB); 1317 1318 /* 1319 * Set the desired interrupt threshold and turn on EHCI host controller. 1320 */ 1321 Set_OpReg(ehci_command, 1322 ((Get_OpReg(ehci_command) & ~EHCI_CMD_INTR_THRESHOLD) | 1323 (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN))); 1324 1325 ASSERT(Get_OpReg(ehci_command) & EHCI_CMD_HOST_CTRL_RUN); 1326 1327 if (init_type == EHCI_NORMAL_INITIALIZATION) { 1328 1329 if (ehci_init_workaround(ehcip) != DDI_SUCCESS) { 1330 1331 /* Set host controller soft state to error */ 1332 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE; 1333 1334 return (DDI_FAILURE); 1335 } 1336 1337 if (ehci_init_check_status(ehcip) != DDI_SUCCESS) { 1338 1339 /* Set host controller soft state to error */ 1340 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE; 1341 1342 return (DDI_FAILURE); 1343 } 1344 1345 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1346 "ehci_init_ctlr: SOF's have started"); 1347 } 1348 1349 /* Route all Root hub ports to EHCI host controller */ 1350 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI); 1351 1352 return (DDI_SUCCESS); 1353 } 1354 1355 /* 1356 * ehci_take_control: 1357 * 1358 * Handshake to take EHCI control from BIOS if necessary. Its only valid for 1359 * x86 machines, because sparc doesn't have a BIOS. 1360 * On x86 machine, the take control process includes 1361 * o get the base address of the extended capability list 1362 * o find out the capability for handoff synchronization in the list. 1363 * o check if BIOS has owned the host controller. 1364 * o set the OS Owned semaphore bit, ask the BIOS to release the ownership. 1365 * o wait for a constant time and check if BIOS has relinquished control. 1366 */ 1367 /* ARGSUSED */ 1368 static int 1369 ehci_take_control(ehci_state_t *ehcip) 1370 { 1371 #if defined(__x86) 1372 uint32_t extended_cap; 1373 uint32_t extended_cap_offset; 1374 uint32_t extended_cap_id; 1375 uint_t retry; 1376 1377 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1378 "ehci_take_control:"); 1379 1380 /* 1381 * According EHCI Spec 2.2.4, get EECP base address from HCCPARAMS 1382 * register. 1383 */ 1384 extended_cap_offset = (Get_Cap(ehci_hcc_params) & EHCI_HCC_EECP) >> 1385 EHCI_HCC_EECP_SHIFT; 1386 1387 /* 1388 * According EHCI Spec 2.2.4, if the extended capability offset is 1389 * less than 40h then its not valid. This means we don't need to 1390 * worry about BIOS handoff. 1391 */ 1392 if (extended_cap_offset < EHCI_HCC_EECP_MIN_OFFSET) { 1393 1394 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1395 "ehci_take_control: Hardware doesn't support legacy."); 1396 1397 goto success; 1398 } 1399 1400 /* 1401 * According EHCI Spec 2.1.7, A zero offset indicates the 1402 * end of the extended capability list. 1403 */ 1404 while (extended_cap_offset) { 1405 1406 /* Get the extended capability value. */ 1407 extended_cap = pci_config_get32(ehcip->ehci_config_handle, 1408 extended_cap_offset); 1409 1410 /* Get the capability ID */ 1411 extended_cap_id = (extended_cap & EHCI_EX_CAP_ID) >> 1412 EHCI_EX_CAP_ID_SHIFT; 1413 1414 /* Check if the card support legacy */ 1415 if (extended_cap_id == EHCI_EX_CAP_ID_BIOS_HANDOFF) { 1416 break; 1417 } 1418 1419 /* Get the offset of the next capability */ 1420 extended_cap_offset = (extended_cap & EHCI_EX_CAP_NEXT_PTR) >> 1421 EHCI_EX_CAP_NEXT_PTR_SHIFT; 1422 } 1423 1424 /* 1425 * Unable to find legacy support in hardware's extended capability list. 1426 * This means we don't need to worry about BIOS handoff. 1427 */ 1428 if (extended_cap_id != EHCI_EX_CAP_ID_BIOS_HANDOFF) { 1429 1430 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1431 "ehci_take_control: Hardware doesn't support legacy"); 1432 1433 goto success; 1434 } 1435 1436 /* Check if BIOS has owned it. */ 1437 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) { 1438 1439 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1440 "ehci_take_control: BIOS does not own EHCI"); 1441 1442 goto success; 1443 } 1444 1445 /* 1446 * According EHCI Spec 5.1, The OS driver initiates an ownership 1447 * request by setting the OS Owned semaphore to a one. The OS 1448 * waits for the BIOS Owned bit to go to a zero before attempting 1449 * to use the EHCI controller. The time that OS must wait for BIOS 1450 * to respond to the request for ownership is beyond the scope of 1451 * this specification. 1452 * It waits up to EHCI_TAKEOVER_WAIT_COUNT*EHCI_TAKEOVER_DELAY ms 1453 * for BIOS to release the ownership. 1454 */ 1455 extended_cap |= EHCI_LEGSUP_OS_OWNED_SEM; 1456 pci_config_put32(ehcip->ehci_config_handle, extended_cap_offset, 1457 extended_cap); 1458 1459 for (retry = 0; retry < EHCI_TAKEOVER_WAIT_COUNT; retry++) { 1460 1461 /* wait a special interval */ 1462 #ifndef __lock_lint 1463 delay(drv_usectohz(EHCI_TAKEOVER_DELAY)); 1464 #endif 1465 /* Check to see if the BIOS has released the ownership */ 1466 extended_cap = pci_config_get32( 1467 ehcip->ehci_config_handle, extended_cap_offset); 1468 1469 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) { 1470 1471 USB_DPRINTF_L3(PRINT_MASK_ATTA, 1472 ehcip->ehci_log_hdl, 1473 "ehci_take_control: BIOS has released " 1474 "the ownership. retry = %d", retry); 1475 1476 goto success; 1477 } 1478 1479 } 1480 1481 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1482 "ehci_take_control: take control from BIOS failed."); 1483 1484 return (USB_FAILURE); 1485 1486 success: 1487 1488 #endif /* __x86 */ 1489 return (USB_SUCCESS); 1490 } 1491 1492 1493 /* 1494 * ehci_init_periodic_frame_list_table : 1495 * 1496 * Allocate the system memory and initialize Host Controller 1497 * Periodic Frame List table area. The starting of the Periodic 1498 * Frame List Table area must be 4096 byte aligned. 1499 */ 1500 static int 1501 ehci_init_periodic_frame_lst_table(ehci_state_t *ehcip) 1502 { 1503 ddi_device_acc_attr_t dev_attr; 1504 size_t real_length; 1505 uint_t ccount; 1506 int result; 1507 1508 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 1509 1510 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1511 "ehci_init_periodic_frame_lst_table:"); 1512 1513 /* The host controller will be little endian */ 1514 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 1515 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 1516 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1517 1518 /* Force the required 4K restrictive alignment */ 1519 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_PFL_ALIGNMENT; 1520 1521 /* Create space for the Periodic Frame List */ 1522 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr, 1523 DDI_DMA_SLEEP, 0, &ehcip->ehci_pflt_dma_handle) != DDI_SUCCESS) { 1524 1525 goto failure; 1526 } 1527 1528 if (ddi_dma_mem_alloc(ehcip->ehci_pflt_dma_handle, 1529 sizeof (ehci_periodic_frame_list_t), 1530 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 1531 0, (caddr_t *)&ehcip->ehci_periodic_frame_list_tablep, 1532 &real_length, &ehcip->ehci_pflt_mem_handle)) { 1533 1534 goto failure; 1535 } 1536 1537 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1538 "ehci_init_periodic_frame_lst_table: " 1539 "Real length %lu", real_length); 1540 1541 /* Map the whole Periodic Frame List into the I/O address space */ 1542 result = ddi_dma_addr_bind_handle(ehcip->ehci_pflt_dma_handle, 1543 NULL, (caddr_t)ehcip->ehci_periodic_frame_list_tablep, 1544 real_length, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1545 DDI_DMA_SLEEP, NULL, &ehcip->ehci_pflt_cookie, &ccount); 1546 1547 if (result == DDI_DMA_MAPPED) { 1548 /* The cookie count should be 1 */ 1549 if (ccount != 1) { 1550 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1551 "ehci_init_periodic_frame_lst_table: " 1552 "More than 1 cookie"); 1553 1554 goto failure; 1555 } 1556 } else { 1557 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result); 1558 1559 goto failure; 1560 } 1561 1562 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1563 "ehci_init_periodic_frame_lst_table: virtual 0x%p physical 0x%x", 1564 (void *)ehcip->ehci_periodic_frame_list_tablep, 1565 ehcip->ehci_pflt_cookie.dmac_address); 1566 1567 /* 1568 * DMA addresses for Periodic Frame List are bound. 1569 */ 1570 ehcip->ehci_dma_addr_bind_flag |= EHCI_PFLT_DMA_BOUND; 1571 1572 bzero((void *)ehcip->ehci_periodic_frame_list_tablep, real_length); 1573 1574 /* Initialize the Periodic Frame List */ 1575 ehci_build_interrupt_lattice(ehcip); 1576 1577 /* Reset Byte Alignment to Default */ 1578 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 1579 1580 return (DDI_SUCCESS); 1581 failure: 1582 /* Byte alignment */ 1583 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 1584 1585 return (DDI_FAILURE); 1586 } 1587 1588 1589 /* 1590 * ehci_build_interrupt_lattice: 1591 * 1592 * Construct the interrupt lattice tree using static Endpoint Descriptors 1593 * (QH). This interrupt lattice tree will have total of 32 interrupt QH 1594 * lists and the Host Controller (HC) processes one interrupt QH list in 1595 * every frame. The Host Controller traverses the periodic schedule by 1596 * constructing an array offset reference from the Periodic List Base Address 1597 * register and bits 12 to 3 of Frame Index register. It fetches the element 1598 * and begins traversing the graph of linked schedule data structures. 1599 */ 1600 static void 1601 ehci_build_interrupt_lattice(ehci_state_t *ehcip) 1602 { 1603 ehci_qh_t *list_array = ehcip->ehci_qh_pool_addr; 1604 ushort_t ehci_index[EHCI_NUM_PERIODIC_FRAME_LISTS]; 1605 ehci_periodic_frame_list_t *periodic_frame_list = 1606 ehcip->ehci_periodic_frame_list_tablep; 1607 ushort_t *temp, num_of_nodes; 1608 uintptr_t addr; 1609 int i, j, k; 1610 1611 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1612 "ehci_build_interrupt_lattice:"); 1613 1614 /* 1615 * Reserve the first 63 Endpoint Descriptor (QH) structures 1616 * in the pool as static endpoints & these are required for 1617 * constructing interrupt lattice tree. 1618 */ 1619 for (i = 0; i < EHCI_NUM_STATIC_NODES; i++) { 1620 Set_QH(list_array[i].qh_state, EHCI_QH_STATIC); 1621 Set_QH(list_array[i].qh_status, EHCI_QH_STS_HALTED); 1622 Set_QH(list_array[i].qh_next_qtd, EHCI_QH_NEXT_QTD_PTR_VALID); 1623 Set_QH(list_array[i].qh_alt_next_qtd, 1624 EHCI_QH_ALT_NEXT_QTD_PTR_VALID); 1625 } 1626 1627 /* 1628 * Make sure that last Endpoint on the periodic frame list terminates 1629 * periodic schedule. 1630 */ 1631 Set_QH(list_array[0].qh_link_ptr, EHCI_QH_LINK_PTR_VALID); 1632 1633 /* Build the interrupt lattice tree */ 1634 for (i = 0; i < (EHCI_NUM_STATIC_NODES / 2); i++) { 1635 /* 1636 * The next pointer in the host controller endpoint 1637 * descriptor must contain an iommu address. Calculate 1638 * the offset into the cpu address and add this to the 1639 * starting iommu address. 1640 */ 1641 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *)&list_array[i]); 1642 1643 Set_QH(list_array[2*i + 1].qh_link_ptr, 1644 addr | EHCI_QH_LINK_REF_QH); 1645 Set_QH(list_array[2*i + 2].qh_link_ptr, 1646 addr | EHCI_QH_LINK_REF_QH); 1647 } 1648 1649 /* Build the tree bottom */ 1650 temp = (unsigned short *) 1651 kmem_zalloc(EHCI_NUM_PERIODIC_FRAME_LISTS * 2, KM_SLEEP); 1652 1653 num_of_nodes = 1; 1654 1655 /* 1656 * Initialize the values which are used for setting up head pointers 1657 * for the 32ms scheduling lists which starts from the Periodic Frame 1658 * List. 1659 */ 1660 for (i = 0; i < ehci_log_2(EHCI_NUM_PERIODIC_FRAME_LISTS); i++) { 1661 for (j = 0, k = 0; k < num_of_nodes; k++, j++) { 1662 ehci_index[j++] = temp[k]; 1663 ehci_index[j] = temp[k] + ehci_pow_2(i); 1664 } 1665 1666 num_of_nodes *= 2; 1667 for (k = 0; k < num_of_nodes; k++) 1668 temp[k] = ehci_index[k]; 1669 } 1670 1671 kmem_free((void *)temp, (EHCI_NUM_PERIODIC_FRAME_LISTS * 2)); 1672 1673 /* 1674 * Initialize the interrupt list in the Periodic Frame List Table 1675 * so that it points to the bottom of the tree. 1676 */ 1677 for (i = 0, j = 0; i < ehci_pow_2(TREE_HEIGHT); i++) { 1678 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *) 1679 (&list_array[((EHCI_NUM_STATIC_NODES + 1) / 2) + i - 1])); 1680 1681 ASSERT(addr); 1682 1683 for (k = 0; k < ehci_pow_2(TREE_HEIGHT); k++) { 1684 Set_PFLT(periodic_frame_list-> 1685 ehci_periodic_frame_list_table[ehci_index[j++]], 1686 (uint32_t)(addr | EHCI_QH_LINK_REF_QH)); 1687 } 1688 } 1689 } 1690 1691 1692 /* 1693 * ehci_alloc_hcdi_ops: 1694 * 1695 * The HCDI interfaces or entry points are the software interfaces used by 1696 * the Universal Serial Bus Driver (USBA) to access the services of the 1697 * Host Controller Driver (HCD). During HCD initialization, inform USBA 1698 * about all available HCDI interfaces or entry points. 1699 */ 1700 usba_hcdi_ops_t * 1701 ehci_alloc_hcdi_ops(ehci_state_t *ehcip) 1702 { 1703 usba_hcdi_ops_t *usba_hcdi_ops; 1704 1705 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1706 "ehci_alloc_hcdi_ops:"); 1707 1708 usba_hcdi_ops = usba_alloc_hcdi_ops(); 1709 1710 usba_hcdi_ops->usba_hcdi_ops_version = HCDI_OPS_VERSION; 1711 1712 usba_hcdi_ops->usba_hcdi_pm_support = ehci_hcdi_pm_support; 1713 usba_hcdi_ops->usba_hcdi_pipe_open = ehci_hcdi_pipe_open; 1714 usba_hcdi_ops->usba_hcdi_pipe_close = ehci_hcdi_pipe_close; 1715 1716 usba_hcdi_ops->usba_hcdi_pipe_reset = ehci_hcdi_pipe_reset; 1717 usba_hcdi_ops->usba_hcdi_pipe_reset_data_toggle = 1718 ehci_hcdi_pipe_reset_data_toggle; 1719 1720 usba_hcdi_ops->usba_hcdi_pipe_ctrl_xfer = ehci_hcdi_pipe_ctrl_xfer; 1721 usba_hcdi_ops->usba_hcdi_pipe_bulk_xfer = ehci_hcdi_pipe_bulk_xfer; 1722 usba_hcdi_ops->usba_hcdi_pipe_intr_xfer = ehci_hcdi_pipe_intr_xfer; 1723 usba_hcdi_ops->usba_hcdi_pipe_isoc_xfer = ehci_hcdi_pipe_isoc_xfer; 1724 1725 usba_hcdi_ops->usba_hcdi_bulk_transfer_size = 1726 ehci_hcdi_bulk_transfer_size; 1727 1728 usba_hcdi_ops->usba_hcdi_pipe_stop_intr_polling = 1729 ehci_hcdi_pipe_stop_intr_polling; 1730 usba_hcdi_ops->usba_hcdi_pipe_stop_isoc_polling = 1731 ehci_hcdi_pipe_stop_isoc_polling; 1732 1733 usba_hcdi_ops->usba_hcdi_get_current_frame_number = 1734 ehci_hcdi_get_current_frame_number; 1735 usba_hcdi_ops->usba_hcdi_get_max_isoc_pkts = 1736 ehci_hcdi_get_max_isoc_pkts; 1737 1738 usba_hcdi_ops->usba_hcdi_console_input_init = 1739 ehci_hcdi_polled_input_init; 1740 usba_hcdi_ops->usba_hcdi_console_input_enter = 1741 ehci_hcdi_polled_input_enter; 1742 usba_hcdi_ops->usba_hcdi_console_read = 1743 ehci_hcdi_polled_read; 1744 usba_hcdi_ops->usba_hcdi_console_input_exit = 1745 ehci_hcdi_polled_input_exit; 1746 usba_hcdi_ops->usba_hcdi_console_input_fini = 1747 ehci_hcdi_polled_input_fini; 1748 1749 usba_hcdi_ops->usba_hcdi_console_output_init = 1750 ehci_hcdi_polled_output_init; 1751 usba_hcdi_ops->usba_hcdi_console_output_enter = 1752 ehci_hcdi_polled_output_enter; 1753 usba_hcdi_ops->usba_hcdi_console_write = 1754 ehci_hcdi_polled_write; 1755 usba_hcdi_ops->usba_hcdi_console_output_exit = 1756 ehci_hcdi_polled_output_exit; 1757 usba_hcdi_ops->usba_hcdi_console_output_fini = 1758 ehci_hcdi_polled_output_fini; 1759 return (usba_hcdi_ops); 1760 } 1761 1762 1763 /* 1764 * Host Controller Driver (HCD) deinitialization functions 1765 */ 1766 1767 /* 1768 * ehci_cleanup: 1769 * 1770 * Cleanup on attach failure or detach 1771 */ 1772 int 1773 ehci_cleanup(ehci_state_t *ehcip) 1774 { 1775 ehci_trans_wrapper_t *tw; 1776 ehci_pipe_private_t *pp; 1777 ehci_qtd_t *qtd; 1778 int i, ctrl, rval; 1779 int flags = ehcip->ehci_flags; 1780 1781 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_cleanup:"); 1782 1783 if (flags & EHCI_RHREG) { 1784 /* Unload the root hub driver */ 1785 if (ehci_unload_root_hub_driver(ehcip) != USB_SUCCESS) { 1786 1787 return (DDI_FAILURE); 1788 } 1789 } 1790 1791 if (flags & EHCI_USBAREG) { 1792 /* Unregister this HCD instance with USBA */ 1793 usba_hcdi_unregister(ehcip->ehci_dip); 1794 } 1795 1796 if (flags & EHCI_INTR) { 1797 1798 mutex_enter(&ehcip->ehci_int_mutex); 1799 1800 /* Disable all EHCI QH list processing */ 1801 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) & 1802 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | 1803 EHCI_CMD_PERIODIC_SCHED_ENABLE))); 1804 1805 /* Disable all EHCI interrupts */ 1806 Set_OpReg(ehci_interrupt, 0); 1807 1808 /* wait for the next SOF */ 1809 (void) ehci_wait_for_sof(ehcip); 1810 1811 /* Route all Root hub ports to Classic host controller */ 1812 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC); 1813 1814 /* Stop the EHCI host controller */ 1815 Set_OpReg(ehci_command, 1816 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN); 1817 1818 mutex_exit(&ehcip->ehci_int_mutex); 1819 1820 /* Wait for sometime */ 1821 delay(drv_usectohz(EHCI_TIMEWAIT)); 1822 1823 ehci_rem_intrs(ehcip); 1824 } 1825 1826 /* Unmap the EHCI registers */ 1827 if (ehcip->ehci_caps_handle) { 1828 ddi_regs_map_free(&ehcip->ehci_caps_handle); 1829 } 1830 1831 if (ehcip->ehci_config_handle) { 1832 pci_config_teardown(&ehcip->ehci_config_handle); 1833 } 1834 1835 /* Free all the buffers */ 1836 if (ehcip->ehci_qtd_pool_addr && ehcip->ehci_qtd_pool_mem_handle) { 1837 for (i = 0; i < ehci_qtd_pool_size; i ++) { 1838 qtd = &ehcip->ehci_qtd_pool_addr[i]; 1839 ctrl = Get_QTD(ehcip-> 1840 ehci_qtd_pool_addr[i].qtd_state); 1841 1842 if ((ctrl != EHCI_QTD_FREE) && 1843 (ctrl != EHCI_QTD_DUMMY) && 1844 (qtd->qtd_trans_wrapper)) { 1845 1846 mutex_enter(&ehcip->ehci_int_mutex); 1847 1848 tw = (ehci_trans_wrapper_t *) 1849 EHCI_LOOKUP_ID((uint32_t) 1850 Get_QTD(qtd->qtd_trans_wrapper)); 1851 1852 /* Obtain the pipe private structure */ 1853 pp = tw->tw_pipe_private; 1854 1855 /* Stop the the transfer timer */ 1856 ehci_stop_xfer_timer(ehcip, tw, 1857 EHCI_REMOVE_XFER_ALWAYS); 1858 1859 ehci_deallocate_tw(ehcip, pp, tw); 1860 1861 mutex_exit(&ehcip->ehci_int_mutex); 1862 } 1863 } 1864 1865 /* 1866 * If EHCI_QTD_POOL_BOUND flag is set, then unbind 1867 * the handle for QTD pools. 1868 */ 1869 if ((ehcip->ehci_dma_addr_bind_flag & 1870 EHCI_QTD_POOL_BOUND) == EHCI_QTD_POOL_BOUND) { 1871 1872 rval = ddi_dma_unbind_handle( 1873 ehcip->ehci_qtd_pool_dma_handle); 1874 1875 ASSERT(rval == DDI_SUCCESS); 1876 } 1877 ddi_dma_mem_free(&ehcip->ehci_qtd_pool_mem_handle); 1878 } 1879 1880 /* Free the QTD pool */ 1881 if (ehcip->ehci_qtd_pool_dma_handle) { 1882 ddi_dma_free_handle(&ehcip->ehci_qtd_pool_dma_handle); 1883 } 1884 1885 if (ehcip->ehci_qh_pool_addr && ehcip->ehci_qh_pool_mem_handle) { 1886 /* 1887 * If EHCI_QH_POOL_BOUND flag is set, then unbind 1888 * the handle for QH pools. 1889 */ 1890 if ((ehcip->ehci_dma_addr_bind_flag & 1891 EHCI_QH_POOL_BOUND) == EHCI_QH_POOL_BOUND) { 1892 1893 rval = ddi_dma_unbind_handle( 1894 ehcip->ehci_qh_pool_dma_handle); 1895 1896 ASSERT(rval == DDI_SUCCESS); 1897 } 1898 1899 ddi_dma_mem_free(&ehcip->ehci_qh_pool_mem_handle); 1900 } 1901 1902 /* Free the QH pool */ 1903 if (ehcip->ehci_qh_pool_dma_handle) { 1904 ddi_dma_free_handle(&ehcip->ehci_qh_pool_dma_handle); 1905 } 1906 1907 /* Free the Periodic frame list table (PFLT) area */ 1908 if (ehcip->ehci_periodic_frame_list_tablep && 1909 ehcip->ehci_pflt_mem_handle) { 1910 /* 1911 * If EHCI_PFLT_DMA_BOUND flag is set, then unbind 1912 * the handle for PFLT. 1913 */ 1914 if ((ehcip->ehci_dma_addr_bind_flag & 1915 EHCI_PFLT_DMA_BOUND) == EHCI_PFLT_DMA_BOUND) { 1916 1917 rval = ddi_dma_unbind_handle( 1918 ehcip->ehci_pflt_dma_handle); 1919 1920 ASSERT(rval == DDI_SUCCESS); 1921 } 1922 1923 ddi_dma_mem_free(&ehcip->ehci_pflt_mem_handle); 1924 } 1925 1926 (void) ehci_isoc_cleanup(ehcip); 1927 1928 if (ehcip->ehci_pflt_dma_handle) { 1929 ddi_dma_free_handle(&ehcip->ehci_pflt_dma_handle); 1930 } 1931 1932 if (flags & EHCI_INTR) { 1933 /* Destroy the mutex */ 1934 mutex_destroy(&ehcip->ehci_int_mutex); 1935 1936 /* Destroy the async schedule advance condition variable */ 1937 cv_destroy(&ehcip->ehci_async_schedule_advance_cv); 1938 } 1939 1940 /* clean up kstat structs */ 1941 ehci_destroy_stats(ehcip); 1942 1943 /* Free ehci hcdi ops */ 1944 if (ehcip->ehci_hcdi_ops) { 1945 usba_free_hcdi_ops(ehcip->ehci_hcdi_ops); 1946 } 1947 1948 if (flags & EHCI_ZALLOC) { 1949 1950 usb_free_log_hdl(ehcip->ehci_log_hdl); 1951 1952 /* Remove all properties that might have been created */ 1953 ddi_prop_remove_all(ehcip->ehci_dip); 1954 1955 /* Free the soft state */ 1956 ddi_soft_state_free(ehci_statep, 1957 ddi_get_instance(ehcip->ehci_dip)); 1958 } 1959 1960 return (DDI_SUCCESS); 1961 } 1962 1963 1964 /* 1965 * ehci_rem_intrs: 1966 * 1967 * Unregister FIXED or MSI interrupts 1968 */ 1969 static void 1970 ehci_rem_intrs(ehci_state_t *ehcip) 1971 { 1972 int i; 1973 1974 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1975 "ehci_rem_intrs: interrupt type 0x%x", ehcip->ehci_intr_type); 1976 1977 /* Disable all interrupts */ 1978 if (ehcip->ehci_intr_cap & DDI_INTR_FLAG_BLOCK) { 1979 (void) ddi_intr_block_disable(ehcip->ehci_htable, 1980 ehcip->ehci_intr_cnt); 1981 } else { 1982 for (i = 0; i < ehcip->ehci_intr_cnt; i++) { 1983 (void) ddi_intr_disable(ehcip->ehci_htable[i]); 1984 } 1985 } 1986 1987 /* Call ddi_intr_remove_handler() */ 1988 for (i = 0; i < ehcip->ehci_intr_cnt; i++) { 1989 (void) ddi_intr_remove_handler(ehcip->ehci_htable[i]); 1990 (void) ddi_intr_free(ehcip->ehci_htable[i]); 1991 } 1992 1993 kmem_free(ehcip->ehci_htable, 1994 ehcip->ehci_intr_cnt * sizeof (ddi_intr_handle_t)); 1995 } 1996 1997 1998 /* 1999 * ehci_cpr_suspend 2000 */ 2001 int 2002 ehci_cpr_suspend(ehci_state_t *ehcip) 2003 { 2004 int i; 2005 2006 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 2007 "ehci_cpr_suspend:"); 2008 2009 /* Call into the root hub and suspend it */ 2010 if (usba_hubdi_detach(ehcip->ehci_dip, DDI_SUSPEND) != DDI_SUCCESS) { 2011 2012 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 2013 "ehci_cpr_suspend: root hub fails to suspend"); 2014 2015 return (DDI_FAILURE); 2016 } 2017 2018 /* Only root hub's intr pipe should be open at this time */ 2019 mutex_enter(&ehcip->ehci_int_mutex); 2020 2021 ASSERT(ehcip->ehci_open_pipe_count == 0); 2022 2023 /* Just wait till all resources are reclaimed */ 2024 i = 0; 2025 while ((ehcip->ehci_reclaim_list != NULL) && (i++ < 3)) { 2026 ehci_handle_endpoint_reclaimation(ehcip); 2027 (void) ehci_wait_for_sof(ehcip); 2028 } 2029 ASSERT(ehcip->ehci_reclaim_list == NULL); 2030 2031 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 2032 "ehci_cpr_suspend: Disable HC QH list processing"); 2033 2034 /* Disable all EHCI QH list processing */ 2035 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) & 2036 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE))); 2037 2038 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 2039 "ehci_cpr_suspend: Disable HC interrupts"); 2040 2041 /* Disable all EHCI interrupts */ 2042 Set_OpReg(ehci_interrupt, 0); 2043 2044 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 2045 "ehci_cpr_suspend: Wait for the next SOF"); 2046 2047 /* Wait for the next SOF */ 2048 if (ehci_wait_for_sof(ehcip) != USB_SUCCESS) { 2049 2050 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 2051 "ehci_cpr_suspend: ehci host controller suspend failed"); 2052 2053 mutex_exit(&ehcip->ehci_int_mutex); 2054 return (DDI_FAILURE); 2055 } 2056 2057 /* 2058 * Stop the ehci host controller 2059 * if usb keyboard is not connected. 2060 */ 2061 if (ehcip->ehci_polled_kbd_count == 0 || force_ehci_off != 0) { 2062 Set_OpReg(ehci_command, 2063 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN); 2064 2065 drv_usecwait(EHCI_RESET_TIMEWAIT); 2066 } 2067 2068 /* Set host controller soft state to suspend */ 2069 ehcip->ehci_hc_soft_state = EHCI_CTLR_SUSPEND_STATE; 2070 2071 /* Reset the host controller. This can poweroff downstream ports */ 2072 Set_OpReg(ehci_command, 2073 Get_OpReg(ehci_command) | EHCI_CMD_HOST_CTRL_RESET); 2074 2075 mutex_exit(&ehcip->ehci_int_mutex); 2076 2077 return (DDI_SUCCESS); 2078 } 2079 2080 2081 /* 2082 * ehci_cpr_resume 2083 */ 2084 int 2085 ehci_cpr_resume(ehci_state_t *ehcip) 2086 { 2087 mutex_enter(&ehcip->ehci_int_mutex); 2088 2089 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 2090 "ehci_cpr_resume: Restart the controller"); 2091 2092 /* Cleanup ehci specific information across cpr */ 2093 ehci_cpr_cleanup(ehcip); 2094 2095 /* Restart the controller */ 2096 if (ehci_init_ctlr(ehcip, EHCI_NORMAL_INITIALIZATION) != DDI_SUCCESS) { 2097 2098 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 2099 "ehci_cpr_resume: ehci host controller resume failed "); 2100 2101 mutex_exit(&ehcip->ehci_int_mutex); 2102 2103 return (DDI_FAILURE); 2104 } 2105 2106 mutex_exit(&ehcip->ehci_int_mutex); 2107 2108 /* Now resume the root hub */ 2109 if (usba_hubdi_attach(ehcip->ehci_dip, DDI_RESUME) != DDI_SUCCESS) { 2110 2111 return (DDI_FAILURE); 2112 } 2113 2114 return (DDI_SUCCESS); 2115 } 2116 2117 2118 /* 2119 * Bandwidth Allocation functions 2120 */ 2121 2122 /* 2123 * ehci_allocate_bandwidth: 2124 * 2125 * Figure out whether or not this interval may be supported. Return the index 2126 * into the lattice if it can be supported. Return allocation failure if it 2127 * can not be supported. 2128 */ 2129 int 2130 ehci_allocate_bandwidth( 2131 ehci_state_t *ehcip, 2132 usba_pipe_handle_data_t *ph, 2133 uint_t *pnode, 2134 uchar_t *smask, 2135 uchar_t *cmask) 2136 { 2137 int error = USB_SUCCESS; 2138 2139 /* This routine is protected by the ehci_int_mutex */ 2140 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2141 2142 /* Reset the pnode to the last checked pnode */ 2143 *pnode = 0; 2144 2145 /* Allocate high speed bandwidth */ 2146 if ((error = ehci_allocate_high_speed_bandwidth(ehcip, 2147 ph, pnode, smask, cmask)) != USB_SUCCESS) { 2148 2149 return (error); 2150 } 2151 2152 /* 2153 * For low/full speed usb devices, allocate classic TT bandwidth 2154 * in additional to high speed bandwidth. 2155 */ 2156 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) { 2157 2158 /* Allocate classic TT bandwidth */ 2159 if ((error = ehci_allocate_classic_tt_bandwidth( 2160 ehcip, ph, *pnode)) != USB_SUCCESS) { 2161 2162 /* Deallocate high speed bandwidth */ 2163 ehci_deallocate_high_speed_bandwidth( 2164 ehcip, ph, *pnode, *smask, *cmask); 2165 } 2166 } 2167 2168 return (error); 2169 } 2170 2171 2172 /* 2173 * ehci_allocate_high_speed_bandwidth: 2174 * 2175 * Allocate high speed bandwidth for the low/full/high speed interrupt and 2176 * isochronous endpoints. 2177 */ 2178 static int 2179 ehci_allocate_high_speed_bandwidth( 2180 ehci_state_t *ehcip, 2181 usba_pipe_handle_data_t *ph, 2182 uint_t *pnode, 2183 uchar_t *smask, 2184 uchar_t *cmask) 2185 { 2186 uint_t sbandwidth, cbandwidth; 2187 int interval; 2188 usb_ep_descr_t *endpoint = &ph->p_ep; 2189 usba_device_t *child_ud; 2190 usb_port_status_t port_status; 2191 int error; 2192 2193 /* This routine is protected by the ehci_int_mutex */ 2194 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2195 2196 /* Get child's usba device structure */ 2197 child_ud = ph->p_usba_device; 2198 2199 mutex_enter(&child_ud->usb_mutex); 2200 2201 /* Get the current usb device's port status */ 2202 port_status = ph->p_usba_device->usb_port_status; 2203 2204 mutex_exit(&child_ud->usb_mutex); 2205 2206 /* 2207 * Calculate the length in bytes of a transaction on this 2208 * periodic endpoint. Return failure if maximum packet is 2209 * zero. 2210 */ 2211 error = ehci_compute_high_speed_bandwidth(ehcip, endpoint, 2212 port_status, &sbandwidth, &cbandwidth); 2213 if (error != USB_SUCCESS) { 2214 2215 return (error); 2216 } 2217 2218 /* 2219 * Adjust polling interval to be a power of 2. 2220 * If this interval can't be supported, return 2221 * allocation failure. 2222 */ 2223 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status); 2224 if (interval == USB_FAILURE) { 2225 2226 return (USB_FAILURE); 2227 } 2228 2229 if (port_status == USBA_HIGH_SPEED_DEV) { 2230 /* Allocate bandwidth for high speed devices */ 2231 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) == 2232 USB_EP_ATTR_ISOCH) { 2233 error = USB_SUCCESS; 2234 } else { 2235 2236 error = ehci_find_bestfit_hs_mask(ehcip, smask, pnode, 2237 endpoint, sbandwidth, interval); 2238 } 2239 2240 *cmask = 0x00; 2241 2242 } else { 2243 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) == 2244 USB_EP_ATTR_INTR) { 2245 2246 /* Allocate bandwidth for low speed interrupt */ 2247 error = ehci_find_bestfit_ls_intr_mask(ehcip, 2248 smask, cmask, pnode, sbandwidth, cbandwidth, 2249 interval); 2250 } else { 2251 if ((endpoint->bEndpointAddress & 2252 USB_EP_DIR_MASK) == USB_EP_DIR_IN) { 2253 2254 /* Allocate bandwidth for sitd in */ 2255 error = ehci_find_bestfit_sitd_in_mask(ehcip, 2256 smask, cmask, pnode, sbandwidth, cbandwidth, 2257 interval); 2258 } else { 2259 2260 /* Allocate bandwidth for sitd out */ 2261 error = ehci_find_bestfit_sitd_out_mask(ehcip, 2262 smask, pnode, sbandwidth, interval); 2263 *cmask = 0x00; 2264 } 2265 } 2266 } 2267 2268 if (error != USB_SUCCESS) { 2269 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2270 "ehci_allocate_high_speed_bandwidth: Reached maximum " 2271 "bandwidth value and cannot allocate bandwidth for a " 2272 "given high-speed periodic endpoint"); 2273 2274 return (USB_NO_BANDWIDTH); 2275 } 2276 2277 return (error); 2278 } 2279 2280 2281 /* 2282 * ehci_allocate_classic_tt_speed_bandwidth: 2283 * 2284 * Allocate classic TT bandwidth for the low/full speed interrupt and 2285 * isochronous endpoints. 2286 */ 2287 static int 2288 ehci_allocate_classic_tt_bandwidth( 2289 ehci_state_t *ehcip, 2290 usba_pipe_handle_data_t *ph, 2291 uint_t pnode) 2292 { 2293 uint_t bandwidth, min; 2294 uint_t height, leftmost, list; 2295 usb_ep_descr_t *endpoint = &ph->p_ep; 2296 usba_device_t *child_ud, *parent_ud; 2297 usb_port_status_t port_status; 2298 int i, interval; 2299 2300 /* This routine is protected by the ehci_int_mutex */ 2301 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2302 2303 /* Get child's usba device structure */ 2304 child_ud = ph->p_usba_device; 2305 2306 mutex_enter(&child_ud->usb_mutex); 2307 2308 /* Get the current usb device's port status */ 2309 port_status = child_ud->usb_port_status; 2310 2311 /* Get the parent high speed hub's usba device structure */ 2312 parent_ud = child_ud->usb_hs_hub_usba_dev; 2313 2314 mutex_exit(&child_ud->usb_mutex); 2315 2316 USB_DPRINTF_L3(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2317 "ehci_allocate_classic_tt_bandwidth: " 2318 "child_ud 0x%p parent_ud 0x%p", 2319 (void *)child_ud, (void *)parent_ud); 2320 2321 /* 2322 * Calculate the length in bytes of a transaction on this 2323 * periodic endpoint. Return failure if maximum packet is 2324 * zero. 2325 */ 2326 if (ehci_compute_classic_bandwidth(endpoint, 2327 port_status, &bandwidth) != USB_SUCCESS) { 2328 2329 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2330 "ehci_allocate_classic_tt_bandwidth: Periodic endpoint " 2331 "with zero endpoint maximum packet size is not supported"); 2332 2333 return (USB_NOT_SUPPORTED); 2334 } 2335 2336 USB_DPRINTF_L3(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2337 "ehci_allocate_classic_tt_bandwidth: bandwidth %d", bandwidth); 2338 2339 mutex_enter(&parent_ud->usb_mutex); 2340 2341 /* 2342 * If the length in bytes plus the allocated bandwidth exceeds 2343 * the maximum, return bandwidth allocation failure. 2344 */ 2345 if ((parent_ud->usb_hs_hub_min_bandwidth + bandwidth) > 2346 FS_PERIODIC_BANDWIDTH) { 2347 2348 mutex_exit(&parent_ud->usb_mutex); 2349 2350 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2351 "ehci_allocate_classic_tt_bandwidth: Reached maximum " 2352 "bandwidth value and cannot allocate bandwidth for a " 2353 "given low/full speed periodic endpoint"); 2354 2355 return (USB_NO_BANDWIDTH); 2356 } 2357 2358 mutex_exit(&parent_ud->usb_mutex); 2359 2360 /* Adjust polling interval to be a power of 2 */ 2361 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status); 2362 2363 /* Find the height in the tree */ 2364 height = ehci_lattice_height(interval); 2365 2366 /* Find the leftmost leaf in the subtree specified by the node. */ 2367 leftmost = ehci_leftmost_leaf(pnode, height); 2368 2369 mutex_enter(&parent_ud->usb_mutex); 2370 2371 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) { 2372 list = ehci_index[leftmost + i]; 2373 2374 if ((parent_ud->usb_hs_hub_bandwidth[list] + 2375 bandwidth) > FS_PERIODIC_BANDWIDTH) { 2376 2377 mutex_exit(&parent_ud->usb_mutex); 2378 2379 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2380 "ehci_allocate_classic_tt_bandwidth: Reached " 2381 "maximum bandwidth value and cannot allocate " 2382 "bandwidth for low/full periodic endpoint"); 2383 2384 return (USB_NO_BANDWIDTH); 2385 } 2386 } 2387 2388 /* 2389 * All the leaves for this node must be updated with the bandwidth. 2390 */ 2391 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) { 2392 list = ehci_index[leftmost + i]; 2393 parent_ud->usb_hs_hub_bandwidth[list] += bandwidth; 2394 } 2395 2396 /* Find the leaf with the smallest allocated bandwidth */ 2397 min = parent_ud->usb_hs_hub_bandwidth[0]; 2398 2399 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) { 2400 if (parent_ud->usb_hs_hub_bandwidth[i] < min) { 2401 min = parent_ud->usb_hs_hub_bandwidth[i]; 2402 } 2403 } 2404 2405 /* Save the minimum for later use */ 2406 parent_ud->usb_hs_hub_min_bandwidth = min; 2407 2408 mutex_exit(&parent_ud->usb_mutex); 2409 2410 return (USB_SUCCESS); 2411 } 2412 2413 2414 /* 2415 * ehci_deallocate_bandwidth: 2416 * 2417 * Deallocate bandwidth for the given node in the lattice and the length 2418 * of transfer. 2419 */ 2420 void 2421 ehci_deallocate_bandwidth( 2422 ehci_state_t *ehcip, 2423 usba_pipe_handle_data_t *ph, 2424 uint_t pnode, 2425 uchar_t smask, 2426 uchar_t cmask) 2427 { 2428 /* This routine is protected by the ehci_int_mutex */ 2429 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2430 2431 ehci_deallocate_high_speed_bandwidth(ehcip, ph, pnode, smask, cmask); 2432 2433 /* 2434 * For low/full speed usb devices, deallocate classic TT bandwidth 2435 * in additional to high speed bandwidth. 2436 */ 2437 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) { 2438 2439 /* Deallocate classic TT bandwidth */ 2440 ehci_deallocate_classic_tt_bandwidth(ehcip, ph, pnode); 2441 } 2442 } 2443 2444 2445 /* 2446 * ehci_deallocate_high_speed_bandwidth: 2447 * 2448 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint. 2449 */ 2450 static void 2451 ehci_deallocate_high_speed_bandwidth( 2452 ehci_state_t *ehcip, 2453 usba_pipe_handle_data_t *ph, 2454 uint_t pnode, 2455 uchar_t smask, 2456 uchar_t cmask) 2457 { 2458 uint_t height, leftmost; 2459 uint_t list_count; 2460 uint_t sbandwidth, cbandwidth; 2461 int interval; 2462 usb_ep_descr_t *endpoint = &ph->p_ep; 2463 usba_device_t *child_ud; 2464 usb_port_status_t port_status; 2465 2466 /* This routine is protected by the ehci_int_mutex */ 2467 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2468 2469 /* Get child's usba device structure */ 2470 child_ud = ph->p_usba_device; 2471 2472 mutex_enter(&child_ud->usb_mutex); 2473 2474 /* Get the current usb device's port status */ 2475 port_status = ph->p_usba_device->usb_port_status; 2476 2477 mutex_exit(&child_ud->usb_mutex); 2478 2479 (void) ehci_compute_high_speed_bandwidth(ehcip, endpoint, 2480 port_status, &sbandwidth, &cbandwidth); 2481 2482 /* Adjust polling interval to be a power of 2 */ 2483 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status); 2484 2485 /* Find the height in the tree */ 2486 height = ehci_lattice_height(interval); 2487 2488 /* 2489 * Find the leftmost leaf in the subtree specified by the node 2490 */ 2491 leftmost = ehci_leftmost_leaf(pnode, height); 2492 2493 list_count = EHCI_NUM_INTR_QH_LISTS/interval; 2494 2495 /* Delete the bandwidth from the appropriate lists */ 2496 if (port_status == USBA_HIGH_SPEED_DEV) { 2497 2498 ehci_update_bw_availability(ehcip, -sbandwidth, 2499 leftmost, list_count, smask); 2500 } else { 2501 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) == 2502 USB_EP_ATTR_INTR) { 2503 2504 ehci_update_bw_availability(ehcip, -sbandwidth, 2505 leftmost, list_count, smask); 2506 ehci_update_bw_availability(ehcip, -cbandwidth, 2507 leftmost, list_count, cmask); 2508 } else { 2509 if ((endpoint->bEndpointAddress & 2510 USB_EP_DIR_MASK) == USB_EP_DIR_IN) { 2511 2512 ehci_update_bw_availability(ehcip, -sbandwidth, 2513 leftmost, list_count, smask); 2514 ehci_update_bw_availability(ehcip, 2515 -MAX_UFRAME_SITD_XFER, leftmost, 2516 list_count, cmask); 2517 } else { 2518 2519 ehci_update_bw_availability(ehcip, 2520 -MAX_UFRAME_SITD_XFER, leftmost, 2521 list_count, smask); 2522 } 2523 } 2524 } 2525 } 2526 2527 /* 2528 * ehci_deallocate_classic_tt_bandwidth: 2529 * 2530 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint. 2531 */ 2532 static void 2533 ehci_deallocate_classic_tt_bandwidth( 2534 ehci_state_t *ehcip, 2535 usba_pipe_handle_data_t *ph, 2536 uint_t pnode) 2537 { 2538 uint_t bandwidth, height, leftmost, list, min; 2539 int i, interval; 2540 usb_ep_descr_t *endpoint = &ph->p_ep; 2541 usba_device_t *child_ud, *parent_ud; 2542 usb_port_status_t port_status; 2543 2544 /* This routine is protected by the ehci_int_mutex */ 2545 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2546 2547 /* Get child's usba device structure */ 2548 child_ud = ph->p_usba_device; 2549 2550 mutex_enter(&child_ud->usb_mutex); 2551 2552 /* Get the current usb device's port status */ 2553 port_status = child_ud->usb_port_status; 2554 2555 /* Get the parent high speed hub's usba device structure */ 2556 parent_ud = child_ud->usb_hs_hub_usba_dev; 2557 2558 mutex_exit(&child_ud->usb_mutex); 2559 2560 /* Obtain the bandwidth */ 2561 (void) ehci_compute_classic_bandwidth(endpoint, 2562 port_status, &bandwidth); 2563 2564 /* Adjust polling interval to be a power of 2 */ 2565 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status); 2566 2567 /* Find the height in the tree */ 2568 height = ehci_lattice_height(interval); 2569 2570 /* Find the leftmost leaf in the subtree specified by the node */ 2571 leftmost = ehci_leftmost_leaf(pnode, height); 2572 2573 mutex_enter(&parent_ud->usb_mutex); 2574 2575 /* Delete the bandwidth from the appropriate lists */ 2576 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) { 2577 list = ehci_index[leftmost + i]; 2578 parent_ud->usb_hs_hub_bandwidth[list] -= bandwidth; 2579 } 2580 2581 /* Find the leaf with the smallest allocated bandwidth */ 2582 min = parent_ud->usb_hs_hub_bandwidth[0]; 2583 2584 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) { 2585 if (parent_ud->usb_hs_hub_bandwidth[i] < min) { 2586 min = parent_ud->usb_hs_hub_bandwidth[i]; 2587 } 2588 } 2589 2590 /* Save the minimum for later use */ 2591 parent_ud->usb_hs_hub_min_bandwidth = min; 2592 2593 mutex_exit(&parent_ud->usb_mutex); 2594 } 2595 2596 2597 /* 2598 * ehci_compute_high_speed_bandwidth: 2599 * 2600 * Given a periodic endpoint (interrupt or isochronous) determine the total 2601 * bandwidth for one transaction. The EHCI host controller traverses the 2602 * endpoint descriptor lists on a first-come-first-serve basis. When the HC 2603 * services an endpoint, only a single transaction attempt is made. The HC 2604 * moves to the next Endpoint Descriptor after the first transaction attempt 2605 * rather than finishing the entire Transfer Descriptor. Therefore, when a 2606 * Transfer Descriptor is inserted into the lattice, we will only count the 2607 * number of bytes for one transaction. 2608 * 2609 * The following are the formulas used for calculating bandwidth in terms 2610 * bytes and it is for the single USB high speed transaction. The protocol 2611 * overheads will be different for each of type of USB transfer & all these 2612 * formulas & protocol overheads are derived from the 5.11.3 section of the 2613 * USB 2.0 Specification. 2614 * 2615 * High-Speed: 2616 * Protocol overhead + ((MaxPktSz * 7)/6) + Host_Delay 2617 * 2618 * Split Transaction: (Low/Full speed devices connected behind usb2.0 hub) 2619 * 2620 * Protocol overhead + Split transaction overhead + 2621 * ((MaxPktSz * 7)/6) + Host_Delay; 2622 */ 2623 /* ARGSUSED */ 2624 static int 2625 ehci_compute_high_speed_bandwidth( 2626 ehci_state_t *ehcip, 2627 usb_ep_descr_t *endpoint, 2628 usb_port_status_t port_status, 2629 uint_t *sbandwidth, 2630 uint_t *cbandwidth) 2631 { 2632 ushort_t maxpacketsize = endpoint->wMaxPacketSize; 2633 2634 /* Return failure if endpoint maximum packet is zero */ 2635 if (maxpacketsize == 0) { 2636 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2637 "ehci_allocate_high_speed_bandwidth: Periodic endpoint " 2638 "with zero endpoint maximum packet size is not supported"); 2639 2640 return (USB_NOT_SUPPORTED); 2641 } 2642 2643 /* Add bit-stuffing overhead */ 2644 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6); 2645 2646 /* Add Host Controller specific delay to required bandwidth */ 2647 *sbandwidth = EHCI_HOST_CONTROLLER_DELAY; 2648 2649 /* Add xfer specific protocol overheads */ 2650 if ((endpoint->bmAttributes & 2651 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) { 2652 /* High speed interrupt transaction */ 2653 *sbandwidth += HS_NON_ISOC_PROTO_OVERHEAD; 2654 } else { 2655 /* Isochronous transaction */ 2656 *sbandwidth += HS_ISOC_PROTO_OVERHEAD; 2657 } 2658 2659 /* 2660 * For low/full speed devices, add split transaction specific 2661 * overheads. 2662 */ 2663 if (port_status != USBA_HIGH_SPEED_DEV) { 2664 /* 2665 * Add start and complete split transaction 2666 * tokens overheads. 2667 */ 2668 *cbandwidth = *sbandwidth + COMPLETE_SPLIT_OVERHEAD; 2669 *sbandwidth += START_SPLIT_OVERHEAD; 2670 2671 /* Add data overhead depending on data direction */ 2672 if ((endpoint->bEndpointAddress & 2673 USB_EP_DIR_MASK) == USB_EP_DIR_IN) { 2674 *cbandwidth += maxpacketsize; 2675 } else { 2676 if ((endpoint->bmAttributes & 2677 USB_EP_ATTR_MASK) == USB_EP_ATTR_ISOCH) { 2678 /* There is no compete splits for out */ 2679 *cbandwidth = 0; 2680 } 2681 *sbandwidth += maxpacketsize; 2682 } 2683 } else { 2684 uint_t xactions; 2685 2686 /* Get the max transactions per microframe */ 2687 xactions = ((maxpacketsize & USB_EP_MAX_XACTS_MASK) >> 2688 USB_EP_MAX_XACTS_SHIFT) + 1; 2689 2690 /* High speed transaction */ 2691 *sbandwidth += maxpacketsize; 2692 2693 /* Calculate bandwidth per micro-frame */ 2694 *sbandwidth *= xactions; 2695 2696 *cbandwidth = 0; 2697 } 2698 2699 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2700 "ehci_allocate_high_speed_bandwidth: " 2701 "Start split bandwidth %d Complete split bandwidth %d", 2702 *sbandwidth, *cbandwidth); 2703 2704 return (USB_SUCCESS); 2705 } 2706 2707 2708 /* 2709 * ehci_compute_classic_bandwidth: 2710 * 2711 * Given a periodic endpoint (interrupt or isochronous) determine the total 2712 * bandwidth for one transaction. The EHCI host controller traverses the 2713 * endpoint descriptor lists on a first-come-first-serve basis. When the HC 2714 * services an endpoint, only a single transaction attempt is made. The HC 2715 * moves to the next Endpoint Descriptor after the first transaction attempt 2716 * rather than finishing the entire Transfer Descriptor. Therefore, when a 2717 * Transfer Descriptor is inserted into the lattice, we will only count the 2718 * number of bytes for one transaction. 2719 * 2720 * The following are the formulas used for calculating bandwidth in terms 2721 * bytes and it is for the single USB high speed transaction. The protocol 2722 * overheads will be different for each of type of USB transfer & all these 2723 * formulas & protocol overheads are derived from the 5.11.3 section of the 2724 * USB 2.0 Specification. 2725 * 2726 * Low-Speed: 2727 * Protocol overhead + Hub LS overhead + 2728 * (Low Speed clock * ((MaxPktSz * 7)/6)) + TT_Delay 2729 * 2730 * Full-Speed: 2731 * Protocol overhead + ((MaxPktSz * 7)/6) + TT_Delay 2732 */ 2733 /* ARGSUSED */ 2734 static int 2735 ehci_compute_classic_bandwidth( 2736 usb_ep_descr_t *endpoint, 2737 usb_port_status_t port_status, 2738 uint_t *bandwidth) 2739 { 2740 ushort_t maxpacketsize = endpoint->wMaxPacketSize; 2741 2742 /* 2743 * If endpoint maximum packet is zero, then return immediately. 2744 */ 2745 if (maxpacketsize == 0) { 2746 2747 return (USB_NOT_SUPPORTED); 2748 } 2749 2750 /* Add TT delay to required bandwidth */ 2751 *bandwidth = TT_DELAY; 2752 2753 /* Add bit-stuffing overhead */ 2754 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6); 2755 2756 switch (port_status) { 2757 case USBA_LOW_SPEED_DEV: 2758 /* Low speed interrupt transaction */ 2759 *bandwidth += (LOW_SPEED_PROTO_OVERHEAD + 2760 HUB_LOW_SPEED_PROTO_OVERHEAD + 2761 (LOW_SPEED_CLOCK * maxpacketsize)); 2762 break; 2763 case USBA_FULL_SPEED_DEV: 2764 /* Full speed transaction */ 2765 *bandwidth += maxpacketsize; 2766 2767 /* Add xfer specific protocol overheads */ 2768 if ((endpoint->bmAttributes & 2769 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) { 2770 /* Full speed interrupt transaction */ 2771 *bandwidth += FS_NON_ISOC_PROTO_OVERHEAD; 2772 } else { 2773 /* Isochronous and input transaction */ 2774 if ((endpoint->bEndpointAddress & 2775 USB_EP_DIR_MASK) == USB_EP_DIR_IN) { 2776 *bandwidth += FS_ISOC_INPUT_PROTO_OVERHEAD; 2777 } else { 2778 /* Isochronous and output transaction */ 2779 *bandwidth += FS_ISOC_OUTPUT_PROTO_OVERHEAD; 2780 } 2781 } 2782 break; 2783 } 2784 2785 return (USB_SUCCESS); 2786 } 2787 2788 2789 /* 2790 * ehci_adjust_polling_interval: 2791 * 2792 * Adjust bandwidth according usb device speed. 2793 */ 2794 /* ARGSUSED */ 2795 int 2796 ehci_adjust_polling_interval( 2797 ehci_state_t *ehcip, 2798 usb_ep_descr_t *endpoint, 2799 usb_port_status_t port_status) 2800 { 2801 uint_t interval; 2802 int i = 0; 2803 2804 /* Get the polling interval */ 2805 interval = endpoint->bInterval; 2806 2807 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2808 "ehci_adjust_polling_interval: Polling interval 0x%x", interval); 2809 2810 /* 2811 * According USB 2.0 Specifications, a high-speed endpoint's 2812 * polling intervals are specified interms of 125us or micro 2813 * frame, where as full/low endpoint's polling intervals are 2814 * specified in milliseconds. 2815 * 2816 * A high speed interrupt/isochronous endpoints can specify 2817 * desired polling interval between 1 to 16 micro-frames, 2818 * where as full/low endpoints can specify between 1 to 255 2819 * milliseconds. 2820 */ 2821 switch (port_status) { 2822 case USBA_LOW_SPEED_DEV: 2823 /* 2824 * Low speed endpoints are limited to specifying 2825 * only 8ms to 255ms in this driver. If a device 2826 * reports a polling interval that is less than 8ms, 2827 * it will use 8 ms instead. 2828 */ 2829 if (interval < LS_MIN_POLL_INTERVAL) { 2830 2831 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2832 "Low speed endpoint's poll interval of %d ms " 2833 "is below threshold. Rounding up to %d ms", 2834 interval, LS_MIN_POLL_INTERVAL); 2835 2836 interval = LS_MIN_POLL_INTERVAL; 2837 } 2838 2839 /* 2840 * Return an error if the polling interval is greater 2841 * than 255ms. 2842 */ 2843 if (interval > LS_MAX_POLL_INTERVAL) { 2844 2845 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2846 "Low speed endpoint's poll interval is " 2847 "greater than %d ms", LS_MAX_POLL_INTERVAL); 2848 2849 return (USB_FAILURE); 2850 } 2851 break; 2852 2853 case USBA_FULL_SPEED_DEV: 2854 /* 2855 * Return an error if the polling interval is less 2856 * than 1ms and greater than 255ms. 2857 */ 2858 if ((interval < FS_MIN_POLL_INTERVAL) && 2859 (interval > FS_MAX_POLL_INTERVAL)) { 2860 2861 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2862 "Full speed endpoint's poll interval must " 2863 "be between %d and %d ms", FS_MIN_POLL_INTERVAL, 2864 FS_MAX_POLL_INTERVAL); 2865 2866 return (USB_FAILURE); 2867 } 2868 break; 2869 case USBA_HIGH_SPEED_DEV: 2870 /* 2871 * Return an error if the polling interval is less 1 2872 * and greater than 16. Convert this value to 125us 2873 * units using 2^(bInterval -1). refer usb 2.0 spec 2874 * page 51 for details. 2875 */ 2876 if ((interval < HS_MIN_POLL_INTERVAL) && 2877 (interval > HS_MAX_POLL_INTERVAL)) { 2878 2879 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2880 "High speed endpoint's poll interval " 2881 "must be between %d and %d units", 2882 HS_MIN_POLL_INTERVAL, HS_MAX_POLL_INTERVAL); 2883 2884 return (USB_FAILURE); 2885 } 2886 2887 /* Adjust high speed device polling interval */ 2888 interval = 2889 ehci_adjust_high_speed_polling_interval(ehcip, endpoint); 2890 2891 break; 2892 } 2893 2894 /* 2895 * If polling interval is greater than 32ms, 2896 * adjust polling interval equal to 32ms. 2897 */ 2898 if (interval > EHCI_NUM_INTR_QH_LISTS) { 2899 interval = EHCI_NUM_INTR_QH_LISTS; 2900 } 2901 2902 /* 2903 * Find the nearest power of 2 that's less 2904 * than interval. 2905 */ 2906 while ((ehci_pow_2(i)) <= interval) { 2907 i++; 2908 } 2909 2910 return (ehci_pow_2((i - 1))); 2911 } 2912 2913 2914 /* 2915 * ehci_adjust_high_speed_polling_interval: 2916 */ 2917 /* ARGSUSED */ 2918 static int 2919 ehci_adjust_high_speed_polling_interval( 2920 ehci_state_t *ehcip, 2921 usb_ep_descr_t *endpoint) 2922 { 2923 uint_t interval; 2924 2925 /* Get the polling interval */ 2926 interval = ehci_pow_2(endpoint->bInterval - 1); 2927 2928 /* 2929 * Convert polling interval from micro seconds 2930 * to milli seconds. 2931 */ 2932 if (interval <= EHCI_MAX_UFRAMES) { 2933 interval = 1; 2934 } else { 2935 interval = interval/EHCI_MAX_UFRAMES; 2936 } 2937 2938 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2939 "ehci_adjust_high_speed_polling_interval: " 2940 "High speed adjusted interval 0x%x", interval); 2941 2942 return (interval); 2943 } 2944 2945 2946 /* 2947 * ehci_lattice_height: 2948 * 2949 * Given the requested bandwidth, find the height in the tree at which the 2950 * nodes for this bandwidth fall. The height is measured as the number of 2951 * nodes from the leaf to the level specified by bandwidth The root of the 2952 * tree is at height TREE_HEIGHT. 2953 */ 2954 static uint_t 2955 ehci_lattice_height(uint_t interval) 2956 { 2957 return (TREE_HEIGHT - (ehci_log_2(interval))); 2958 } 2959 2960 2961 /* 2962 * ehci_lattice_parent: 2963 * 2964 * Given a node in the lattice, find the index of the parent node 2965 */ 2966 static uint_t 2967 ehci_lattice_parent(uint_t node) 2968 { 2969 if ((node % 2) == 0) { 2970 2971 return ((node/2) - 1); 2972 } else { 2973 2974 return ((node + 1)/2 - 1); 2975 } 2976 } 2977 2978 2979 /* 2980 * ehci_find_periodic_node: 2981 * 2982 * Based on the "real" array leaf node and interval, get the periodic node. 2983 */ 2984 static uint_t 2985 ehci_find_periodic_node(uint_t leaf, int interval) { 2986 uint_t lattice_leaf; 2987 uint_t height = ehci_lattice_height(interval); 2988 uint_t pnode; 2989 int i; 2990 2991 /* Get the leaf number in the lattice */ 2992 lattice_leaf = leaf + EHCI_NUM_INTR_QH_LISTS - 1; 2993 2994 /* Get the node in the lattice based on the height and leaf */ 2995 pnode = lattice_leaf; 2996 for (i = 0; i < height; i++) { 2997 pnode = ehci_lattice_parent(pnode); 2998 } 2999 3000 return (pnode); 3001 } 3002 3003 3004 /* 3005 * ehci_leftmost_leaf: 3006 * 3007 * Find the leftmost leaf in the subtree specified by the node. Height refers 3008 * to number of nodes from the bottom of the tree to the node, including the 3009 * node. 3010 * 3011 * The formula for a zero based tree is: 3012 * 2^H * Node + 2^H - 1 3013 * The leaf of the tree is an array, convert the number for the array. 3014 * Subtract the size of nodes not in the array 3015 * 2^H * Node + 2^H - 1 - (EHCI_NUM_INTR_QH_LISTS - 1) = 3016 * 2^H * Node + 2^H - EHCI_NUM_INTR_QH_LISTS = 3017 * 2^H * (Node + 1) - EHCI_NUM_INTR_QH_LISTS 3018 * 0 3019 * 1 2 3020 * 0 1 2 3 3021 */ 3022 static uint_t 3023 ehci_leftmost_leaf( 3024 uint_t node, 3025 uint_t height) 3026 { 3027 return ((ehci_pow_2(height) * (node + 1)) - EHCI_NUM_INTR_QH_LISTS); 3028 } 3029 3030 3031 /* 3032 * ehci_pow_2: 3033 * 3034 * Compute 2 to the power 3035 */ 3036 static uint_t 3037 ehci_pow_2(uint_t x) 3038 { 3039 if (x == 0) { 3040 3041 return (1); 3042 } else { 3043 3044 return (2 << (x - 1)); 3045 } 3046 } 3047 3048 3049 /* 3050 * ehci_log_2: 3051 * 3052 * Compute log base 2 of x 3053 */ 3054 static uint_t 3055 ehci_log_2(uint_t x) 3056 { 3057 int i = 0; 3058 3059 while (x != 1) { 3060 x = x >> 1; 3061 i++; 3062 } 3063 3064 return (i); 3065 } 3066 3067 3068 /* 3069 * ehci_find_bestfit_hs_mask: 3070 * 3071 * Find the smask and cmask in the bandwidth allocation, and update the 3072 * bandwidth allocation. 3073 */ 3074 static int 3075 ehci_find_bestfit_hs_mask( 3076 ehci_state_t *ehcip, 3077 uchar_t *smask, 3078 uint_t *pnode, 3079 usb_ep_descr_t *endpoint, 3080 uint_t bandwidth, 3081 int interval) 3082 { 3083 int i; 3084 uint_t elements, index; 3085 int array_leaf, best_array_leaf; 3086 uint_t node_bandwidth, best_node_bandwidth; 3087 uint_t leaf_count; 3088 uchar_t bw_mask; 3089 uchar_t best_smask; 3090 3091 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 3092 "ehci_find_bestfit_hs_mask: "); 3093 3094 /* Get all the valid smasks */ 3095 switch (ehci_pow_2(endpoint->bInterval - 1)) { 3096 case EHCI_INTR_1US_POLL: 3097 index = EHCI_1US_MASK_INDEX; 3098 elements = EHCI_INTR_1US_POLL; 3099 break; 3100 case EHCI_INTR_2US_POLL: 3101 index = EHCI_2US_MASK_INDEX; 3102 elements = EHCI_INTR_2US_POLL; 3103 break; 3104 case EHCI_INTR_4US_POLL: 3105 index = EHCI_4US_MASK_INDEX; 3106 elements = EHCI_INTR_4US_POLL; 3107 break; 3108 case EHCI_INTR_XUS_POLL: 3109 default: 3110 index = EHCI_XUS_MASK_INDEX; 3111 elements = EHCI_INTR_XUS_POLL; 3112 break; 3113 } 3114 3115 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval; 3116 3117 /* 3118 * Because of the way the leaves are setup, we will automatically 3119 * hit the leftmost leaf of every possible node with this interval. 3120 */ 3121 best_smask = 0x00; 3122 best_node_bandwidth = 0; 3123 for (array_leaf = 0; array_leaf < interval; array_leaf++) { 3124 /* Find the bandwidth mask */ 3125 node_bandwidth = ehci_calculate_bw_availability_mask(ehcip, 3126 bandwidth, ehci_index[array_leaf], leaf_count, &bw_mask); 3127 3128 /* 3129 * If this node cannot support our requirements skip to the 3130 * next leaf. 3131 */ 3132 if (bw_mask == 0x00) { 3133 continue; 3134 } 3135 3136 /* 3137 * Now make sure our bandwidth requirements can be 3138 * satisfied with one of smasks in this node. 3139 */ 3140 *smask = 0x00; 3141 for (i = index; i < (index + elements); i++) { 3142 /* Check the start split mask value */ 3143 if (ehci_start_split_mask[index] & bw_mask) { 3144 *smask = ehci_start_split_mask[index]; 3145 break; 3146 } 3147 } 3148 3149 /* 3150 * If an appropriate smask is found save the information if: 3151 * o best_smask has not been found yet. 3152 * - or - 3153 * o This is the node with the least amount of bandwidth 3154 */ 3155 if ((*smask != 0x00) && 3156 ((best_smask == 0x00) || 3157 (best_node_bandwidth > node_bandwidth))) { 3158 3159 best_node_bandwidth = node_bandwidth; 3160 best_array_leaf = array_leaf; 3161 best_smask = *smask; 3162 } 3163 } 3164 3165 /* 3166 * If we find node that can handle the bandwidth populate the 3167 * appropriate variables and return success. 3168 */ 3169 if (best_smask) { 3170 *smask = best_smask; 3171 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf], 3172 interval); 3173 ehci_update_bw_availability(ehcip, bandwidth, 3174 ehci_index[best_array_leaf], leaf_count, best_smask); 3175 3176 return (USB_SUCCESS); 3177 } 3178 3179 return (USB_FAILURE); 3180 } 3181 3182 3183 /* 3184 * ehci_find_bestfit_ls_intr_mask: 3185 * 3186 * Find the smask and cmask in the bandwidth allocation. 3187 */ 3188 static int 3189 ehci_find_bestfit_ls_intr_mask( 3190 ehci_state_t *ehcip, 3191 uchar_t *smask, 3192 uchar_t *cmask, 3193 uint_t *pnode, 3194 uint_t sbandwidth, 3195 uint_t cbandwidth, 3196 int interval) 3197 { 3198 int i; 3199 uint_t elements, index; 3200 int array_leaf, best_array_leaf; 3201 uint_t node_sbandwidth, node_cbandwidth; 3202 uint_t best_node_bandwidth; 3203 uint_t leaf_count; 3204 uchar_t bw_smask, bw_cmask; 3205 uchar_t best_smask, best_cmask; 3206 3207 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 3208 "ehci_find_bestfit_ls_intr_mask: "); 3209 3210 /* For low and full speed devices */ 3211 index = EHCI_XUS_MASK_INDEX; 3212 elements = EHCI_INTR_4MS_POLL; 3213 3214 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval; 3215 3216 /* 3217 * Because of the way the leaves are setup, we will automatically 3218 * hit the leftmost leaf of every possible node with this interval. 3219 */ 3220 best_smask = 0x00; 3221 best_node_bandwidth = 0; 3222 for (array_leaf = 0; array_leaf < interval; array_leaf++) { 3223 /* Find the bandwidth mask */ 3224 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip, 3225 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask); 3226 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip, 3227 cbandwidth, ehci_index[array_leaf], leaf_count, &bw_cmask); 3228 3229 /* 3230 * If this node cannot support our requirements skip to the 3231 * next leaf. 3232 */ 3233 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) { 3234 continue; 3235 } 3236 3237 /* 3238 * Now make sure our bandwidth requirements can be 3239 * satisfied with one of smasks in this node. 3240 */ 3241 *smask = 0x00; 3242 *cmask = 0x00; 3243 for (i = index; i < (index + elements); i++) { 3244 /* Check the start split mask value */ 3245 if ((ehci_start_split_mask[index] & bw_smask) && 3246 (ehci_intr_complete_split_mask[index] & bw_cmask)) { 3247 *smask = ehci_start_split_mask[index]; 3248 *cmask = ehci_intr_complete_split_mask[index]; 3249 break; 3250 } 3251 } 3252 3253 /* 3254 * If an appropriate smask is found save the information if: 3255 * o best_smask has not been found yet. 3256 * - or - 3257 * o This is the node with the least amount of bandwidth 3258 */ 3259 if ((*smask != 0x00) && 3260 ((best_smask == 0x00) || 3261 (best_node_bandwidth > 3262 (node_sbandwidth + node_cbandwidth)))) { 3263 best_node_bandwidth = node_sbandwidth + node_cbandwidth; 3264 best_array_leaf = array_leaf; 3265 best_smask = *smask; 3266 best_cmask = *cmask; 3267 } 3268 } 3269 3270 /* 3271 * If we find node that can handle the bandwidth populate the 3272 * appropriate variables and return success. 3273 */ 3274 if (best_smask) { 3275 *smask = best_smask; 3276 *cmask = best_cmask; 3277 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf], 3278 interval); 3279 ehci_update_bw_availability(ehcip, sbandwidth, 3280 ehci_index[best_array_leaf], leaf_count, best_smask); 3281 ehci_update_bw_availability(ehcip, cbandwidth, 3282 ehci_index[best_array_leaf], leaf_count, best_cmask); 3283 3284 return (USB_SUCCESS); 3285 } 3286 3287 return (USB_FAILURE); 3288 } 3289 3290 3291 /* 3292 * ehci_find_bestfit_sitd_in_mask: 3293 * 3294 * Find the smask and cmask in the bandwidth allocation. 3295 */ 3296 static int 3297 ehci_find_bestfit_sitd_in_mask( 3298 ehci_state_t *ehcip, 3299 uchar_t *smask, 3300 uchar_t *cmask, 3301 uint_t *pnode, 3302 uint_t sbandwidth, 3303 uint_t cbandwidth, 3304 int interval) 3305 { 3306 int i, uFrames, found; 3307 int array_leaf, best_array_leaf; 3308 uint_t node_sbandwidth, node_cbandwidth; 3309 uint_t best_node_bandwidth; 3310 uint_t leaf_count; 3311 uchar_t bw_smask, bw_cmask; 3312 uchar_t best_smask, best_cmask; 3313 3314 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 3315 "ehci_find_bestfit_sitd_in_mask: "); 3316 3317 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval; 3318 3319 /* 3320 * Because of the way the leaves are setup, we will automatically 3321 * hit the leftmost leaf of every possible node with this interval. 3322 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame. 3323 */ 3324 /* 3325 * Need to add an additional 2 uFrames, if the "L"ast 3326 * complete split is before uFrame 6. See section 3327 * 11.8.4 in USB 2.0 Spec. Currently we do not support 3328 * the "Back Ptr" which means we support on IN of 3329 * ~4*MAX_UFRAME_SITD_XFER bandwidth/ 3330 */ 3331 uFrames = (cbandwidth / MAX_UFRAME_SITD_XFER) + 2; 3332 if (cbandwidth % MAX_UFRAME_SITD_XFER) { 3333 uFrames++; 3334 } 3335 if (uFrames > 6) { 3336 3337 return (USB_FAILURE); 3338 } 3339 *smask = 0x1; 3340 *cmask = 0x00; 3341 for (i = 0; i < uFrames; i++) { 3342 *cmask = *cmask << 1; 3343 *cmask |= 0x1; 3344 } 3345 /* cmask must start 2 frames after the smask */ 3346 *cmask = *cmask << 2; 3347 3348 found = 0; 3349 best_smask = 0x00; 3350 best_node_bandwidth = 0; 3351 for (array_leaf = 0; array_leaf < interval; array_leaf++) { 3352 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip, 3353 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask); 3354 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip, 3355 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count, 3356 &bw_cmask); 3357 3358 /* 3359 * If this node cannot support our requirements skip to the 3360 * next leaf. 3361 */ 3362 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) { 3363 continue; 3364 } 3365 3366 for (i = 0; i < (EHCI_MAX_UFRAMES - uFrames - 2); i++) { 3367 if ((*smask & bw_smask) && (*cmask & bw_cmask)) { 3368 found = 1; 3369 break; 3370 } 3371 *smask = *smask << 1; 3372 *cmask = *cmask << 1; 3373 } 3374 3375 /* 3376 * If an appropriate smask is found save the information if: 3377 * o best_smask has not been found yet. 3378 * - or - 3379 * o This is the node with the least amount of bandwidth 3380 */ 3381 if (found && 3382 ((best_smask == 0x00) || 3383 (best_node_bandwidth > 3384 (node_sbandwidth + node_cbandwidth)))) { 3385 best_node_bandwidth = node_sbandwidth + node_cbandwidth; 3386 best_array_leaf = array_leaf; 3387 best_smask = *smask; 3388 best_cmask = *cmask; 3389 } 3390 } 3391 3392 /* 3393 * If we find node that can handle the bandwidth populate the 3394 * appropriate variables and return success. 3395 */ 3396 if (best_smask) { 3397 *smask = best_smask; 3398 *cmask = best_cmask; 3399 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf], 3400 interval); 3401 ehci_update_bw_availability(ehcip, sbandwidth, 3402 ehci_index[best_array_leaf], leaf_count, best_smask); 3403 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER, 3404 ehci_index[best_array_leaf], leaf_count, best_cmask); 3405 3406 return (USB_SUCCESS); 3407 } 3408 3409 return (USB_FAILURE); 3410 } 3411 3412 3413 /* 3414 * ehci_find_bestfit_sitd_out_mask: 3415 * 3416 * Find the smask in the bandwidth allocation. 3417 */ 3418 static int 3419 ehci_find_bestfit_sitd_out_mask( 3420 ehci_state_t *ehcip, 3421 uchar_t *smask, 3422 uint_t *pnode, 3423 uint_t sbandwidth, 3424 int interval) 3425 { 3426 int i, uFrames, found; 3427 int array_leaf, best_array_leaf; 3428 uint_t node_sbandwidth; 3429 uint_t best_node_bandwidth; 3430 uint_t leaf_count; 3431 uchar_t bw_smask; 3432 uchar_t best_smask; 3433 3434 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 3435 "ehci_find_bestfit_sitd_out_mask: "); 3436 3437 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval; 3438 3439 /* 3440 * Because of the way the leaves are setup, we will automatically 3441 * hit the leftmost leaf of every possible node with this interval. 3442 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame. 3443 */ 3444 *smask = 0x00; 3445 uFrames = sbandwidth / MAX_UFRAME_SITD_XFER; 3446 if (sbandwidth % MAX_UFRAME_SITD_XFER) { 3447 uFrames++; 3448 } 3449 for (i = 0; i < uFrames; i++) { 3450 *smask = *smask << 1; 3451 *smask |= 0x1; 3452 } 3453 3454 found = 0; 3455 best_smask = 0x00; 3456 best_node_bandwidth = 0; 3457 for (array_leaf = 0; array_leaf < interval; array_leaf++) { 3458 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip, 3459 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count, 3460 &bw_smask); 3461 3462 /* 3463 * If this node cannot support our requirements skip to the 3464 * next leaf. 3465 */ 3466 if (bw_smask == 0x00) { 3467 continue; 3468 } 3469 3470 /* You cannot have a start split on the 8th uFrame */ 3471 for (i = 0; (*smask & 0x80) == 0; i++) { 3472 if (*smask & bw_smask) { 3473 found = 1; 3474 break; 3475 } 3476 *smask = *smask << 1; 3477 } 3478 3479 /* 3480 * If an appropriate smask is found save the information if: 3481 * o best_smask has not been found yet. 3482 * - or - 3483 * o This is the node with the least amount of bandwidth 3484 */ 3485 if (found && 3486 ((best_smask == 0x00) || 3487 (best_node_bandwidth > node_sbandwidth))) { 3488 best_node_bandwidth = node_sbandwidth; 3489 best_array_leaf = array_leaf; 3490 best_smask = *smask; 3491 } 3492 } 3493 3494 /* 3495 * If we find node that can handle the bandwidth populate the 3496 * appropriate variables and return success. 3497 */ 3498 if (best_smask) { 3499 *smask = best_smask; 3500 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf], 3501 interval); 3502 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER, 3503 ehci_index[best_array_leaf], leaf_count, best_smask); 3504 3505 return (USB_SUCCESS); 3506 } 3507 3508 return (USB_FAILURE); 3509 } 3510 3511 3512 /* 3513 * ehci_calculate_bw_availability_mask: 3514 * 3515 * Returns the "total bandwidth used" in this node. 3516 * Populates bw_mask with the uFrames that can support the bandwidth. 3517 * 3518 * If all the Frames cannot support this bandwidth, then bw_mask 3519 * will return 0x00 and the "total bandwidth used" will be invalid. 3520 */ 3521 static uint_t 3522 ehci_calculate_bw_availability_mask( 3523 ehci_state_t *ehcip, 3524 uint_t bandwidth, 3525 int leaf, 3526 int leaf_count, 3527 uchar_t *bw_mask) 3528 { 3529 int i, j; 3530 uchar_t bw_uframe; 3531 int uframe_total; 3532 ehci_frame_bandwidth_t *fbp; 3533 uint_t total_bandwidth = 0; 3534 3535 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 3536 "ehci_calculate_bw_availability_mask: leaf %d leaf count %d", 3537 leaf, leaf_count); 3538 3539 /* Start by saying all uFrames are available */ 3540 *bw_mask = 0xFF; 3541 3542 for (i = 0; (i < leaf_count) || (*bw_mask == 0x00); i++) { 3543 fbp = &ehcip->ehci_frame_bandwidth[leaf + i]; 3544 3545 total_bandwidth += fbp->ehci_allocated_frame_bandwidth; 3546 3547 for (j = 0; j < EHCI_MAX_UFRAMES; j++) { 3548 /* 3549 * If the uFrame in bw_mask is available check to see if 3550 * it can support the additional bandwidth. 3551 */ 3552 bw_uframe = (*bw_mask & (0x1 << j)); 3553 uframe_total = 3554 fbp->ehci_micro_frame_bandwidth[j] + 3555 bandwidth; 3556 if ((bw_uframe) && 3557 (uframe_total > HS_PERIODIC_BANDWIDTH)) { 3558 *bw_mask = *bw_mask & ~bw_uframe; 3559 } 3560 } 3561 } 3562 3563 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 3564 "ehci_calculate_bw_availability_mask: bandwidth mask 0x%x", 3565 *bw_mask); 3566 3567 return (total_bandwidth); 3568 } 3569 3570 3571 /* 3572 * ehci_update_bw_availability: 3573 * 3574 * The leftmost leaf needs to be in terms of array position and 3575 * not the actual lattice position. 3576 */ 3577 static void 3578 ehci_update_bw_availability( 3579 ehci_state_t *ehcip, 3580 int bandwidth, 3581 int leftmost_leaf, 3582 int leaf_count, 3583 uchar_t mask) 3584 { 3585 int i, j; 3586 ehci_frame_bandwidth_t *fbp; 3587 int uFrame_bandwidth[8]; 3588 3589 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3590 "ehci_update_bw_availability: " 3591 "leaf %d count %d bandwidth 0x%x mask 0x%x", 3592 leftmost_leaf, leaf_count, bandwidth, mask); 3593 3594 ASSERT(leftmost_leaf < 32); 3595 ASSERT(leftmost_leaf >= 0); 3596 3597 for (j = 0; j < EHCI_MAX_UFRAMES; j++) { 3598 if (mask & 0x1) { 3599 uFrame_bandwidth[j] = bandwidth; 3600 } else { 3601 uFrame_bandwidth[j] = 0; 3602 } 3603 3604 mask = mask >> 1; 3605 } 3606 3607 /* Updated all the effected leafs with the bandwidth */ 3608 for (i = 0; i < leaf_count; i++) { 3609 fbp = &ehcip->ehci_frame_bandwidth[leftmost_leaf + i]; 3610 3611 for (j = 0; j < EHCI_MAX_UFRAMES; j++) { 3612 fbp->ehci_micro_frame_bandwidth[j] += 3613 uFrame_bandwidth[j]; 3614 fbp->ehci_allocated_frame_bandwidth += 3615 uFrame_bandwidth[j]; 3616 } 3617 } 3618 } 3619 3620 /* 3621 * Miscellaneous functions 3622 */ 3623 3624 /* 3625 * ehci_obtain_state: 3626 * 3627 * NOTE: This function is also called from POLLED MODE. 3628 */ 3629 ehci_state_t * 3630 ehci_obtain_state(dev_info_t *dip) 3631 { 3632 int instance = ddi_get_instance(dip); 3633 3634 ehci_state_t *state = ddi_get_soft_state(ehci_statep, instance); 3635 3636 ASSERT(state != NULL); 3637 3638 return (state); 3639 } 3640 3641 3642 /* 3643 * ehci_state_is_operational: 3644 * 3645 * Check the Host controller state and return proper values. 3646 */ 3647 int 3648 ehci_state_is_operational(ehci_state_t *ehcip) 3649 { 3650 int val; 3651 3652 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3653 3654 switch (ehcip->ehci_hc_soft_state) { 3655 case EHCI_CTLR_INIT_STATE: 3656 case EHCI_CTLR_SUSPEND_STATE: 3657 val = USB_FAILURE; 3658 break; 3659 case EHCI_CTLR_OPERATIONAL_STATE: 3660 val = USB_SUCCESS; 3661 break; 3662 case EHCI_CTLR_ERROR_STATE: 3663 val = USB_HC_HARDWARE_ERROR; 3664 break; 3665 default: 3666 val = USB_FAILURE; 3667 break; 3668 } 3669 3670 return (val); 3671 } 3672 3673 3674 /* 3675 * ehci_do_soft_reset 3676 * 3677 * Do soft reset of ehci host controller. 3678 */ 3679 int 3680 ehci_do_soft_reset(ehci_state_t *ehcip) 3681 { 3682 usb_frame_number_t before_frame_number, after_frame_number; 3683 ehci_regs_t *ehci_save_regs; 3684 3685 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3686 3687 /* Increment host controller error count */ 3688 ehcip->ehci_hc_error++; 3689 3690 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3691 "ehci_do_soft_reset:" 3692 "Reset ehci host controller 0x%x", ehcip->ehci_hc_error); 3693 3694 /* 3695 * Allocate space for saving current Host Controller 3696 * registers. Don't do any recovery if allocation 3697 * fails. 3698 */ 3699 ehci_save_regs = (ehci_regs_t *) 3700 kmem_zalloc(sizeof (ehci_regs_t), KM_NOSLEEP); 3701 3702 if (ehci_save_regs == NULL) { 3703 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3704 "ehci_do_soft_reset: kmem_zalloc failed"); 3705 3706 return (USB_FAILURE); 3707 } 3708 3709 /* Save current ehci registers */ 3710 ehci_save_regs->ehci_command = Get_OpReg(ehci_command); 3711 ehci_save_regs->ehci_interrupt = Get_OpReg(ehci_interrupt); 3712 ehci_save_regs->ehci_ctrl_segment = Get_OpReg(ehci_ctrl_segment); 3713 ehci_save_regs->ehci_async_list_addr = Get_OpReg(ehci_async_list_addr); 3714 ehci_save_regs->ehci_config_flag = Get_OpReg(ehci_config_flag); 3715 ehci_save_regs->ehci_periodic_list_base = 3716 Get_OpReg(ehci_periodic_list_base); 3717 3718 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3719 "ehci_do_soft_reset: Save reg = 0x%p", (void *)ehci_save_regs); 3720 3721 /* Disable all list processing and interrupts */ 3722 Set_OpReg(ehci_command, Get_OpReg(ehci_command) & 3723 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE)); 3724 3725 /* Disable all EHCI interrupts */ 3726 Set_OpReg(ehci_interrupt, 0); 3727 3728 /* Wait for few milliseconds */ 3729 drv_usecwait(EHCI_SOF_TIMEWAIT); 3730 3731 /* Do light soft reset of ehci host controller */ 3732 Set_OpReg(ehci_command, 3733 Get_OpReg(ehci_command) | EHCI_CMD_LIGHT_HC_RESET); 3734 3735 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3736 "ehci_do_soft_reset: Reset in progress"); 3737 3738 /* Wait for reset to complete */ 3739 drv_usecwait(EHCI_RESET_TIMEWAIT); 3740 3741 /* 3742 * Restore previous saved EHCI register value 3743 * into the current EHCI registers. 3744 */ 3745 Set_OpReg(ehci_ctrl_segment, (uint32_t) 3746 ehci_save_regs->ehci_ctrl_segment); 3747 3748 Set_OpReg(ehci_periodic_list_base, (uint32_t) 3749 ehci_save_regs->ehci_periodic_list_base); 3750 3751 Set_OpReg(ehci_async_list_addr, (uint32_t) 3752 ehci_save_regs->ehci_async_list_addr); 3753 3754 /* 3755 * For some reason this register might get nulled out by 3756 * the Uli M1575 South Bridge. To workaround the hardware 3757 * problem, check the value after write and retry if the 3758 * last write fails. 3759 */ 3760 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) && 3761 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) && 3762 (ehci_save_regs->ehci_async_list_addr != 3763 Get_OpReg(ehci_async_list_addr))) { 3764 int retry = 0; 3765 3766 Set_OpRegRetry(ehci_async_list_addr, (uint32_t) 3767 ehci_save_regs->ehci_async_list_addr, retry); 3768 if (retry >= EHCI_MAX_RETRY) { 3769 USB_DPRINTF_L2(PRINT_MASK_ATTA, 3770 ehcip->ehci_log_hdl, "ehci_do_soft_reset:" 3771 " ASYNCLISTADDR write failed."); 3772 3773 return (USB_FAILURE); 3774 } 3775 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3776 "ehci_do_soft_reset: ASYNCLISTADDR " 3777 "write failed, retry=%d", retry); 3778 } 3779 3780 Set_OpReg(ehci_config_flag, (uint32_t) 3781 ehci_save_regs->ehci_config_flag); 3782 3783 /* Enable both Asynchronous and Periodic Schedule if necessary */ 3784 ehci_toggle_scheduler(ehcip); 3785 3786 /* 3787 * Set ehci_interrupt to enable all interrupts except Root 3788 * Hub Status change and frame list rollover interrupts. 3789 */ 3790 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR | 3791 EHCI_INTR_FRAME_LIST_ROLLOVER | 3792 EHCI_INTR_USB_ERROR | 3793 EHCI_INTR_USB); 3794 3795 /* 3796 * Deallocate the space that allocated for saving 3797 * HC registers. 3798 */ 3799 kmem_free((void *) ehci_save_regs, sizeof (ehci_regs_t)); 3800 3801 /* 3802 * Set the desired interrupt threshold, frame list size (if 3803 * applicable) and turn EHCI host controller. 3804 */ 3805 Set_OpReg(ehci_command, ((Get_OpReg(ehci_command) & 3806 ~EHCI_CMD_INTR_THRESHOLD) | 3807 (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN))); 3808 3809 /* Wait 10ms for EHCI to start sending SOF */ 3810 drv_usecwait(EHCI_RESET_TIMEWAIT); 3811 3812 /* 3813 * Get the current usb frame number before waiting for 3814 * few milliseconds. 3815 */ 3816 before_frame_number = ehci_get_current_frame_number(ehcip); 3817 3818 /* Wait for few milliseconds */ 3819 drv_usecwait(EHCI_SOF_TIMEWAIT); 3820 3821 /* 3822 * Get the current usb frame number after waiting for 3823 * few milliseconds. 3824 */ 3825 after_frame_number = ehci_get_current_frame_number(ehcip); 3826 3827 USB_DPRINTF_L4(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3828 "ehci_do_soft_reset: Before Frame Number 0x%llx " 3829 "After Frame Number 0x%llx", 3830 (unsigned long long)before_frame_number, 3831 (unsigned long long)after_frame_number); 3832 3833 if ((after_frame_number <= before_frame_number) && 3834 (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) { 3835 3836 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3837 "ehci_do_soft_reset: Soft reset failed"); 3838 3839 return (USB_FAILURE); 3840 } 3841 3842 return (USB_SUCCESS); 3843 } 3844 3845 3846 /* 3847 * ehci_get_xfer_attrs: 3848 * 3849 * Get the attributes of a particular xfer. 3850 * 3851 * NOTE: This function is also called from POLLED MODE. 3852 */ 3853 usb_req_attrs_t 3854 ehci_get_xfer_attrs( 3855 ehci_state_t *ehcip, 3856 ehci_pipe_private_t *pp, 3857 ehci_trans_wrapper_t *tw) 3858 { 3859 usb_ep_descr_t *eptd = &pp->pp_pipe_handle->p_ep; 3860 usb_req_attrs_t attrs = USB_ATTRS_NONE; 3861 3862 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3863 "ehci_get_xfer_attrs:"); 3864 3865 switch (eptd->bmAttributes & USB_EP_ATTR_MASK) { 3866 case USB_EP_ATTR_CONTROL: 3867 attrs = ((usb_ctrl_req_t *) 3868 tw->tw_curr_xfer_reqp)->ctrl_attributes; 3869 break; 3870 case USB_EP_ATTR_BULK: 3871 attrs = ((usb_bulk_req_t *) 3872 tw->tw_curr_xfer_reqp)->bulk_attributes; 3873 break; 3874 case USB_EP_ATTR_INTR: 3875 attrs = ((usb_intr_req_t *) 3876 tw->tw_curr_xfer_reqp)->intr_attributes; 3877 break; 3878 } 3879 3880 return (attrs); 3881 } 3882 3883 3884 /* 3885 * ehci_get_current_frame_number: 3886 * 3887 * Get the current software based usb frame number. 3888 */ 3889 usb_frame_number_t 3890 ehci_get_current_frame_number(ehci_state_t *ehcip) 3891 { 3892 usb_frame_number_t usb_frame_number; 3893 usb_frame_number_t ehci_fno, micro_frame_number; 3894 3895 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3896 3897 ehci_fno = ehcip->ehci_fno; 3898 micro_frame_number = Get_OpReg(ehci_frame_index) & 0x3FFF; 3899 3900 /* 3901 * Calculate current software based usb frame number. 3902 * 3903 * This code accounts for the fact that frame number is 3904 * updated by the Host Controller before the ehci driver 3905 * gets an FrameListRollover interrupt that will adjust 3906 * Frame higher part. 3907 * 3908 * Refer ehci specification 1.0, section 2.3.2, page 21. 3909 */ 3910 micro_frame_number = ((micro_frame_number & 0x1FFF) | 3911 ehci_fno) + (((micro_frame_number & 0x3FFF) ^ 3912 ehci_fno) & 0x2000); 3913 3914 /* 3915 * Micro Frame number is equivalent to 125 usec. Eight 3916 * Micro Frame numbers are equivalent to one millsecond 3917 * or one usb frame number. 3918 */ 3919 usb_frame_number = micro_frame_number >> 3920 EHCI_uFRAMES_PER_USB_FRAME_SHIFT; 3921 3922 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3923 "ehci_get_current_frame_number: " 3924 "Current usb uframe number = 0x%llx " 3925 "Current usb frame number = 0x%llx", 3926 (unsigned long long)micro_frame_number, 3927 (unsigned long long)usb_frame_number); 3928 3929 return (usb_frame_number); 3930 } 3931 3932 3933 /* 3934 * ehci_cpr_cleanup: 3935 * 3936 * Cleanup ehci state and other ehci specific informations across 3937 * Check Point Resume (CPR). 3938 */ 3939 static void 3940 ehci_cpr_cleanup(ehci_state_t *ehcip) 3941 { 3942 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3943 3944 /* Reset software part of usb frame number */ 3945 ehcip->ehci_fno = 0; 3946 } 3947 3948 3949 /* 3950 * ehci_wait_for_sof: 3951 * 3952 * Wait for couple of SOF interrupts 3953 */ 3954 int 3955 ehci_wait_for_sof(ehci_state_t *ehcip) 3956 { 3957 usb_frame_number_t before_frame_number, after_frame_number; 3958 int error = USB_SUCCESS; 3959 3960 USB_DPRINTF_L4(PRINT_MASK_LISTS, 3961 ehcip->ehci_log_hdl, "ehci_wait_for_sof"); 3962 3963 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3964 3965 error = ehci_state_is_operational(ehcip); 3966 3967 if (error != USB_SUCCESS) { 3968 3969 return (error); 3970 } 3971 3972 /* Get the current usb frame number before waiting for two SOFs */ 3973 before_frame_number = ehci_get_current_frame_number(ehcip); 3974 3975 mutex_exit(&ehcip->ehci_int_mutex); 3976 3977 /* Wait for few milliseconds */ 3978 delay(drv_usectohz(EHCI_SOF_TIMEWAIT)); 3979 3980 mutex_enter(&ehcip->ehci_int_mutex); 3981 3982 /* Get the current usb frame number after woken up */ 3983 after_frame_number = ehci_get_current_frame_number(ehcip); 3984 3985 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3986 "ehci_wait_for_sof: framenumber: before 0x%llx " 3987 "after 0x%llx", 3988 (unsigned long long)before_frame_number, 3989 (unsigned long long)after_frame_number); 3990 3991 /* Return failure, if usb frame number has not been changed */ 3992 if (after_frame_number <= before_frame_number) { 3993 3994 if ((ehci_do_soft_reset(ehcip)) != USB_SUCCESS) { 3995 3996 USB_DPRINTF_L0(PRINT_MASK_LISTS, 3997 ehcip->ehci_log_hdl, "No SOF interrupts"); 3998 3999 /* Set host controller soft state to error */ 4000 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE; 4001 4002 return (USB_FAILURE); 4003 } 4004 4005 } 4006 4007 return (USB_SUCCESS); 4008 } 4009 4010 4011 /* 4012 * ehci_toggle_scheduler: 4013 * 4014 * Turn scheduler based on pipe open count. 4015 */ 4016 void 4017 ehci_toggle_scheduler(ehci_state_t *ehcip) 4018 { 4019 uint_t temp_reg, cmd_reg; 4020 4021 /* 4022 * For performance optimization, we need to change the bits 4023 * if (async == 1||async == 0) OR (periodic == 1||periodic == 0) 4024 * 4025 * Related bits already enabled if 4026 * async and periodic req counts are > 1 4027 * OR async req count > 1 & no periodic pipe 4028 * OR periodic req count > 1 & no async pipe 4029 */ 4030 if (((ehcip->ehci_async_req_count > 1) && 4031 (ehcip->ehci_periodic_req_count > 1)) || 4032 ((ehcip->ehci_async_req_count > 1) && 4033 (ehcip->ehci_open_periodic_count == 0)) || 4034 ((ehcip->ehci_periodic_req_count > 1) && 4035 (ehcip->ehci_open_async_count == 0))) { 4036 USB_DPRINTF_L4(PRINT_MASK_ATTA, 4037 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:" 4038 "async/periodic bits no need to change"); 4039 4040 return; 4041 } 4042 4043 cmd_reg = Get_OpReg(ehci_command); 4044 temp_reg = cmd_reg; 4045 4046 /* 4047 * Enable/Disable asynchronous scheduler, and 4048 * turn on/off async list door bell 4049 */ 4050 if (ehcip->ehci_async_req_count > 1) { 4051 /* we already enable the async bit */ 4052 USB_DPRINTF_L4(PRINT_MASK_ATTA, 4053 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:" 4054 "async bit already enabled: cmd_reg=0x%x", cmd_reg); 4055 } else if (ehcip->ehci_async_req_count == 1) { 4056 if (!(cmd_reg & EHCI_CMD_ASYNC_SCHED_ENABLE)) { 4057 /* 4058 * For some reason this address might get nulled out by 4059 * the ehci chip. Set it here just in case it is null. 4060 * If it's not null, we should not reset the 4061 * ASYNCLISTADDR, because it's updated by hardware to 4062 * point to the next queue head to be executed. 4063 */ 4064 if (!Get_OpReg(ehci_async_list_addr)) { 4065 Set_OpReg(ehci_async_list_addr, 4066 ehci_qh_cpu_to_iommu(ehcip, 4067 ehcip->ehci_head_of_async_sched_list)); 4068 } 4069 4070 /* 4071 * For some reason this register might get nulled out by 4072 * the Uli M1575 Southbridge. To workaround the HW 4073 * problem, check the value after write and retry if the 4074 * last write fails. 4075 * 4076 * If the ASYNCLISTADDR remains "stuck" after 4077 * EHCI_MAX_RETRY retries, then the M1575 is broken 4078 * and is stuck in an inconsistent state and is about 4079 * to crash the machine with a trn_oor panic when it 4080 * does a DMA read from 0x0. It is better to panic 4081 * now rather than wait for the trn_oor crash; this 4082 * way Customer Service will have a clean signature 4083 * that indicts the M1575 chip rather than a 4084 * mysterious and hard-to-diagnose trn_oor panic. 4085 */ 4086 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) && 4087 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) && 4088 (ehci_qh_cpu_to_iommu(ehcip, 4089 ehcip->ehci_head_of_async_sched_list) != 4090 Get_OpReg(ehci_async_list_addr))) { 4091 int retry = 0; 4092 4093 Set_OpRegRetry(ehci_async_list_addr, 4094 ehci_qh_cpu_to_iommu(ehcip, 4095 ehcip->ehci_head_of_async_sched_list), 4096 retry); 4097 if (retry >= EHCI_MAX_RETRY) 4098 cmn_err(CE_PANIC, 4099 "ehci_toggle_scheduler: " 4100 "ASYNCLISTADDR write failed."); 4101 4102 USB_DPRINTF_L3(PRINT_MASK_ATTA, 4103 ehcip->ehci_log_hdl, 4104 "ehci_toggle_scheduler: ASYNCLISTADDR " 4105 "write failed, retry=%d", retry); 4106 } 4107 } 4108 cmd_reg |= EHCI_CMD_ASYNC_SCHED_ENABLE; 4109 } else { 4110 cmd_reg &= ~EHCI_CMD_ASYNC_SCHED_ENABLE; 4111 } 4112 4113 if (ehcip->ehci_periodic_req_count > 1) { 4114 /* we already enable the periodic bit. */ 4115 USB_DPRINTF_L4(PRINT_MASK_ATTA, 4116 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:" 4117 "periodic bit already enabled: cmd_reg=0x%x", cmd_reg); 4118 } else if (ehcip->ehci_periodic_req_count == 1) { 4119 if (!(cmd_reg & EHCI_CMD_PERIODIC_SCHED_ENABLE)) { 4120 /* 4121 * For some reason this address get's nulled out by 4122 * the ehci chip. Set it here just in case it is null. 4123 */ 4124 Set_OpReg(ehci_periodic_list_base, 4125 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address & 4126 0xFFFFF000)); 4127 } 4128 cmd_reg |= EHCI_CMD_PERIODIC_SCHED_ENABLE; 4129 } else { 4130 cmd_reg &= ~EHCI_CMD_PERIODIC_SCHED_ENABLE; 4131 } 4132 4133 /* Just an optimization */ 4134 if (temp_reg != cmd_reg) { 4135 Set_OpReg(ehci_command, cmd_reg); 4136 4137 /* To make sure the command register is updated correctly */ 4138 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) && 4139 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) { 4140 int retry = 0; 4141 4142 Set_OpRegRetry(ehci_command, cmd_reg, retry); 4143 USB_DPRINTF_L3(PRINT_MASK_ATTA, 4144 ehcip->ehci_log_hdl, 4145 "ehci_toggle_scheduler: CMD write failed, retry=%d", 4146 retry); 4147 } 4148 4149 } 4150 } 4151 4152 /* 4153 * ehci print functions 4154 */ 4155 4156 /* 4157 * ehci_print_caps: 4158 */ 4159 void 4160 ehci_print_caps(ehci_state_t *ehcip) 4161 { 4162 uint_t i; 4163 4164 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4165 "\n\tUSB 2.0 Host Controller Characteristics\n"); 4166 4167 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4168 "Caps Length: 0x%x Version: 0x%x\n", 4169 Get_8Cap(ehci_caps_length), Get_16Cap(ehci_version)); 4170 4171 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4172 "Structural Parameters\n"); 4173 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4174 "Port indicators: %s", (Get_Cap(ehci_hcs_params) & 4175 EHCI_HCS_PORT_INDICATOR) ? "Yes" : "No"); 4176 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4177 "No of Classic host controllers: 0x%x", 4178 (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_COMP_CTRLS) 4179 >> EHCI_HCS_NUM_COMP_CTRL_SHIFT); 4180 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4181 "No of ports per Classic host controller: 0x%x", 4182 (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS_CC) 4183 >> EHCI_HCS_NUM_PORTS_CC_SHIFT); 4184 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4185 "Port routing rules: %s", (Get_Cap(ehci_hcs_params) & 4186 EHCI_HCS_PORT_ROUTING_RULES) ? "Yes" : "No"); 4187 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4188 "Port power control: %s", (Get_Cap(ehci_hcs_params) & 4189 EHCI_HCS_PORT_POWER_CONTROL) ? "Yes" : "No"); 4190 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4191 "No of root hub ports: 0x%x\n", 4192 Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); 4193 4194 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4195 "Capability Parameters\n"); 4196 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4197 "EHCI extended capability: %s", (Get_Cap(ehci_hcc_params) & 4198 EHCI_HCC_EECP) ? "Yes" : "No"); 4199 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4200 "Isoch schedule threshold: 0x%x", 4201 Get_Cap(ehci_hcc_params) & EHCI_HCC_ISOCH_SCHED_THRESHOLD); 4202 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4203 "Async schedule park capability: %s", (Get_Cap(ehci_hcc_params) & 4204 EHCI_HCC_ASYNC_SCHED_PARK_CAP) ? "Yes" : "No"); 4205 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4206 "Programmable frame list flag: %s", (Get_Cap(ehci_hcc_params) & 4207 EHCI_HCC_PROG_FRAME_LIST_FLAG) ? "256/512/1024" : "1024"); 4208 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4209 "64bit addressing capability: %s\n", (Get_Cap(ehci_hcc_params) & 4210 EHCI_HCC_64BIT_ADDR_CAP) ? "Yes" : "No"); 4211 4212 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4213 "Classic Port Route Description"); 4214 4215 for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) { 4216 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4217 "\tPort Route 0x%x: 0x%x", i, Get_8Cap(ehci_port_route[i])); 4218 } 4219 } 4220 4221 4222 /* 4223 * ehci_print_regs: 4224 */ 4225 void 4226 ehci_print_regs(ehci_state_t *ehcip) 4227 { 4228 uint_t i; 4229 4230 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4231 "\n\tEHCI%d Operational Registers\n", 4232 ddi_get_instance(ehcip->ehci_dip)); 4233 4234 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4235 "Command: 0x%x Status: 0x%x", 4236 Get_OpReg(ehci_command), Get_OpReg(ehci_status)); 4237 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4238 "Interrupt: 0x%x Frame Index: 0x%x", 4239 Get_OpReg(ehci_interrupt), Get_OpReg(ehci_frame_index)); 4240 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4241 "Control Segment: 0x%x Periodic List Base: 0x%x", 4242 Get_OpReg(ehci_ctrl_segment), Get_OpReg(ehci_periodic_list_base)); 4243 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4244 "Async List Addr: 0x%x Config Flag: 0x%x", 4245 Get_OpReg(ehci_async_list_addr), Get_OpReg(ehci_config_flag)); 4246 4247 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4248 "Root Hub Port Status"); 4249 4250 for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) { 4251 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 4252 "\tPort Status 0x%x: 0x%x ", i, 4253 Get_OpReg(ehci_rh_port_status[i])); 4254 } 4255 } 4256 4257 4258 /* 4259 * ehci_print_qh: 4260 */ 4261 void 4262 ehci_print_qh( 4263 ehci_state_t *ehcip, 4264 ehci_qh_t *qh) 4265 { 4266 uint_t i; 4267 4268 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4269 "ehci_print_qh: qh = 0x%p", (void *)qh); 4270 4271 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4272 "\tqh_link_ptr: 0x%x ", Get_QH(qh->qh_link_ptr)); 4273 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4274 "\tqh_ctrl: 0x%x ", Get_QH(qh->qh_ctrl)); 4275 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4276 "\tqh_split_ctrl: 0x%x ", Get_QH(qh->qh_split_ctrl)); 4277 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4278 "\tqh_curr_qtd: 0x%x ", Get_QH(qh->qh_curr_qtd)); 4279 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4280 "\tqh_next_qtd: 0x%x ", Get_QH(qh->qh_next_qtd)); 4281 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4282 "\tqh_alt_next_qtd: 0x%x ", Get_QH(qh->qh_alt_next_qtd)); 4283 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4284 "\tqh_status: 0x%x ", Get_QH(qh->qh_status)); 4285 4286 for (i = 0; i < 5; i++) { 4287 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4288 "\tqh_buf[%d]: 0x%x ", i, Get_QH(qh->qh_buf[i])); 4289 } 4290 4291 for (i = 0; i < 5; i++) { 4292 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4293 "\tqh_buf_high[%d]: 0x%x ", 4294 i, Get_QH(qh->qh_buf_high[i])); 4295 } 4296 4297 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4298 "\tqh_dummy_qtd: 0x%x ", Get_QH(qh->qh_dummy_qtd)); 4299 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4300 "\tqh_prev: 0x%x ", Get_QH(qh->qh_prev)); 4301 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4302 "\tqh_state: 0x%x ", Get_QH(qh->qh_state)); 4303 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4304 "\tqh_reclaim_next: 0x%x ", Get_QH(qh->qh_reclaim_next)); 4305 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4306 "\tqh_reclaim_frame: 0x%x ", Get_QH(qh->qh_reclaim_frame)); 4307 } 4308 4309 4310 /* 4311 * ehci_print_qtd: 4312 */ 4313 void 4314 ehci_print_qtd( 4315 ehci_state_t *ehcip, 4316 ehci_qtd_t *qtd) 4317 { 4318 uint_t i; 4319 4320 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4321 "ehci_print_qtd: qtd = 0x%p", (void *)qtd); 4322 4323 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4324 "\tqtd_next_qtd: 0x%x ", Get_QTD(qtd->qtd_next_qtd)); 4325 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4326 "\tqtd_alt_next_qtd: 0x%x ", Get_QTD(qtd->qtd_alt_next_qtd)); 4327 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4328 "\tqtd_ctrl: 0x%x ", Get_QTD(qtd->qtd_ctrl)); 4329 4330 for (i = 0; i < 5; i++) { 4331 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4332 "\tqtd_buf[%d]: 0x%x ", i, Get_QTD(qtd->qtd_buf[i])); 4333 } 4334 4335 for (i = 0; i < 5; i++) { 4336 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4337 "\tqtd_buf_high[%d]: 0x%x ", 4338 i, Get_QTD(qtd->qtd_buf_high[i])); 4339 } 4340 4341 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4342 "\tqtd_trans_wrapper: 0x%x ", Get_QTD(qtd->qtd_trans_wrapper)); 4343 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4344 "\tqtd_tw_next_qtd: 0x%x ", Get_QTD(qtd->qtd_tw_next_qtd)); 4345 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4346 "\tqtd_active_qtd_next: 0x%x ", Get_QTD(qtd->qtd_active_qtd_next)); 4347 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4348 "\tqtd_active_qtd_prev: 0x%x ", Get_QTD(qtd->qtd_active_qtd_prev)); 4349 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4350 "\tqtd_state: 0x%x ", Get_QTD(qtd->qtd_state)); 4351 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4352 "\tqtd_ctrl_phase: 0x%x ", Get_QTD(qtd->qtd_ctrl_phase)); 4353 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4354 "\tqtd_xfer_offs: 0x%x ", Get_QTD(qtd->qtd_xfer_offs)); 4355 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 4356 "\tqtd_xfer_len: 0x%x ", Get_QTD(qtd->qtd_xfer_len)); 4357 } 4358 4359 /* 4360 * ehci kstat functions 4361 */ 4362 4363 /* 4364 * ehci_create_stats: 4365 * 4366 * Allocate and initialize the ehci kstat structures 4367 */ 4368 void 4369 ehci_create_stats(ehci_state_t *ehcip) 4370 { 4371 char kstatname[KSTAT_STRLEN]; 4372 const char *dname = ddi_driver_name(ehcip->ehci_dip); 4373 char *usbtypes[USB_N_COUNT_KSTATS] = 4374 {"ctrl", "isoch", "bulk", "intr"}; 4375 uint_t instance = ehcip->ehci_instance; 4376 ehci_intrs_stats_t *isp; 4377 int i; 4378 4379 if (EHCI_INTRS_STATS(ehcip) == NULL) { 4380 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,intrs", 4381 dname, instance); 4382 EHCI_INTRS_STATS(ehcip) = kstat_create("usba", instance, 4383 kstatname, "usb_interrupts", KSTAT_TYPE_NAMED, 4384 sizeof (ehci_intrs_stats_t) / sizeof (kstat_named_t), 4385 KSTAT_FLAG_PERSISTENT); 4386 4387 if (EHCI_INTRS_STATS(ehcip)) { 4388 isp = EHCI_INTRS_STATS_DATA(ehcip); 4389 kstat_named_init(&isp->ehci_sts_total, 4390 "Interrupts Total", KSTAT_DATA_UINT64); 4391 kstat_named_init(&isp->ehci_sts_not_claimed, 4392 "Not Claimed", KSTAT_DATA_UINT64); 4393 kstat_named_init(&isp->ehci_sts_async_sched_status, 4394 "Async schedule status", KSTAT_DATA_UINT64); 4395 kstat_named_init(&isp->ehci_sts_periodic_sched_status, 4396 "Periodic sched status", KSTAT_DATA_UINT64); 4397 kstat_named_init(&isp->ehci_sts_empty_async_schedule, 4398 "Empty async schedule", KSTAT_DATA_UINT64); 4399 kstat_named_init(&isp->ehci_sts_host_ctrl_halted, 4400 "Host controller Halted", KSTAT_DATA_UINT64); 4401 kstat_named_init(&isp->ehci_sts_async_advance_intr, 4402 "Intr on async advance", KSTAT_DATA_UINT64); 4403 kstat_named_init(&isp->ehci_sts_host_system_error_intr, 4404 "Host system error", KSTAT_DATA_UINT64); 4405 kstat_named_init(&isp->ehci_sts_frm_list_rollover_intr, 4406 "Frame list rollover", KSTAT_DATA_UINT64); 4407 kstat_named_init(&isp->ehci_sts_rh_port_change_intr, 4408 "Port change detect", KSTAT_DATA_UINT64); 4409 kstat_named_init(&isp->ehci_sts_usb_error_intr, 4410 "USB error interrupt", KSTAT_DATA_UINT64); 4411 kstat_named_init(&isp->ehci_sts_usb_intr, 4412 "USB interrupt", KSTAT_DATA_UINT64); 4413 4414 EHCI_INTRS_STATS(ehcip)->ks_private = ehcip; 4415 EHCI_INTRS_STATS(ehcip)->ks_update = nulldev; 4416 kstat_install(EHCI_INTRS_STATS(ehcip)); 4417 } 4418 } 4419 4420 if (EHCI_TOTAL_STATS(ehcip) == NULL) { 4421 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,total", 4422 dname, instance); 4423 EHCI_TOTAL_STATS(ehcip) = kstat_create("usba", instance, 4424 kstatname, "usb_byte_count", KSTAT_TYPE_IO, 1, 4425 KSTAT_FLAG_PERSISTENT); 4426 4427 if (EHCI_TOTAL_STATS(ehcip)) { 4428 kstat_install(EHCI_TOTAL_STATS(ehcip)); 4429 } 4430 } 4431 4432 for (i = 0; i < USB_N_COUNT_KSTATS; i++) { 4433 if (ehcip->ehci_count_stats[i] == NULL) { 4434 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,%s", 4435 dname, instance, usbtypes[i]); 4436 ehcip->ehci_count_stats[i] = kstat_create("usba", 4437 instance, kstatname, "usb_byte_count", 4438 KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 4439 4440 if (ehcip->ehci_count_stats[i]) { 4441 kstat_install(ehcip->ehci_count_stats[i]); 4442 } 4443 } 4444 } 4445 } 4446 4447 4448 /* 4449 * ehci_destroy_stats: 4450 * 4451 * Clean up ehci kstat structures 4452 */ 4453 void 4454 ehci_destroy_stats(ehci_state_t *ehcip) 4455 { 4456 int i; 4457 4458 if (EHCI_INTRS_STATS(ehcip)) { 4459 kstat_delete(EHCI_INTRS_STATS(ehcip)); 4460 EHCI_INTRS_STATS(ehcip) = NULL; 4461 } 4462 4463 if (EHCI_TOTAL_STATS(ehcip)) { 4464 kstat_delete(EHCI_TOTAL_STATS(ehcip)); 4465 EHCI_TOTAL_STATS(ehcip) = NULL; 4466 } 4467 4468 for (i = 0; i < USB_N_COUNT_KSTATS; i++) { 4469 if (ehcip->ehci_count_stats[i]) { 4470 kstat_delete(ehcip->ehci_count_stats[i]); 4471 ehcip->ehci_count_stats[i] = NULL; 4472 } 4473 } 4474 } 4475 4476 4477 /* 4478 * ehci_do_intrs_stats: 4479 * 4480 * ehci status information 4481 */ 4482 void 4483 ehci_do_intrs_stats( 4484 ehci_state_t *ehcip, 4485 int val) 4486 { 4487 if (EHCI_INTRS_STATS(ehcip)) { 4488 EHCI_INTRS_STATS_DATA(ehcip)->ehci_sts_total.value.ui64++; 4489 switch (val) { 4490 case EHCI_STS_ASYNC_SCHED_STATUS: 4491 EHCI_INTRS_STATS_DATA(ehcip)-> 4492 ehci_sts_async_sched_status.value.ui64++; 4493 break; 4494 case EHCI_STS_PERIODIC_SCHED_STATUS: 4495 EHCI_INTRS_STATS_DATA(ehcip)-> 4496 ehci_sts_periodic_sched_status.value.ui64++; 4497 break; 4498 case EHCI_STS_EMPTY_ASYNC_SCHEDULE: 4499 EHCI_INTRS_STATS_DATA(ehcip)-> 4500 ehci_sts_empty_async_schedule.value.ui64++; 4501 break; 4502 case EHCI_STS_HOST_CTRL_HALTED: 4503 EHCI_INTRS_STATS_DATA(ehcip)-> 4504 ehci_sts_host_ctrl_halted.value.ui64++; 4505 break; 4506 case EHCI_STS_ASYNC_ADVANCE_INTR: 4507 EHCI_INTRS_STATS_DATA(ehcip)-> 4508 ehci_sts_async_advance_intr.value.ui64++; 4509 break; 4510 case EHCI_STS_HOST_SYSTEM_ERROR_INTR: 4511 EHCI_INTRS_STATS_DATA(ehcip)-> 4512 ehci_sts_host_system_error_intr.value.ui64++; 4513 break; 4514 case EHCI_STS_FRM_LIST_ROLLOVER_INTR: 4515 EHCI_INTRS_STATS_DATA(ehcip)-> 4516 ehci_sts_frm_list_rollover_intr.value.ui64++; 4517 break; 4518 case EHCI_STS_RH_PORT_CHANGE_INTR: 4519 EHCI_INTRS_STATS_DATA(ehcip)-> 4520 ehci_sts_rh_port_change_intr.value.ui64++; 4521 break; 4522 case EHCI_STS_USB_ERROR_INTR: 4523 EHCI_INTRS_STATS_DATA(ehcip)-> 4524 ehci_sts_usb_error_intr.value.ui64++; 4525 break; 4526 case EHCI_STS_USB_INTR: 4527 EHCI_INTRS_STATS_DATA(ehcip)-> 4528 ehci_sts_usb_intr.value.ui64++; 4529 break; 4530 default: 4531 EHCI_INTRS_STATS_DATA(ehcip)-> 4532 ehci_sts_not_claimed.value.ui64++; 4533 break; 4534 } 4535 } 4536 } 4537 4538 4539 /* 4540 * ehci_do_byte_stats: 4541 * 4542 * ehci data xfer information 4543 */ 4544 void 4545 ehci_do_byte_stats( 4546 ehci_state_t *ehcip, 4547 size_t len, 4548 uint8_t attr, 4549 uint8_t addr) 4550 { 4551 uint8_t type = attr & USB_EP_ATTR_MASK; 4552 uint8_t dir = addr & USB_EP_DIR_MASK; 4553 4554 if (dir == USB_EP_DIR_IN) { 4555 EHCI_TOTAL_STATS_DATA(ehcip)->reads++; 4556 EHCI_TOTAL_STATS_DATA(ehcip)->nread += len; 4557 switch (type) { 4558 case USB_EP_ATTR_CONTROL: 4559 EHCI_CTRL_STATS(ehcip)->reads++; 4560 EHCI_CTRL_STATS(ehcip)->nread += len; 4561 break; 4562 case USB_EP_ATTR_BULK: 4563 EHCI_BULK_STATS(ehcip)->reads++; 4564 EHCI_BULK_STATS(ehcip)->nread += len; 4565 break; 4566 case USB_EP_ATTR_INTR: 4567 EHCI_INTR_STATS(ehcip)->reads++; 4568 EHCI_INTR_STATS(ehcip)->nread += len; 4569 break; 4570 case USB_EP_ATTR_ISOCH: 4571 EHCI_ISOC_STATS(ehcip)->reads++; 4572 EHCI_ISOC_STATS(ehcip)->nread += len; 4573 break; 4574 } 4575 } else if (dir == USB_EP_DIR_OUT) { 4576 EHCI_TOTAL_STATS_DATA(ehcip)->writes++; 4577 EHCI_TOTAL_STATS_DATA(ehcip)->nwritten += len; 4578 switch (type) { 4579 case USB_EP_ATTR_CONTROL: 4580 EHCI_CTRL_STATS(ehcip)->writes++; 4581 EHCI_CTRL_STATS(ehcip)->nwritten += len; 4582 break; 4583 case USB_EP_ATTR_BULK: 4584 EHCI_BULK_STATS(ehcip)->writes++; 4585 EHCI_BULK_STATS(ehcip)->nwritten += len; 4586 break; 4587 case USB_EP_ATTR_INTR: 4588 EHCI_INTR_STATS(ehcip)->writes++; 4589 EHCI_INTR_STATS(ehcip)->nwritten += len; 4590 break; 4591 case USB_EP_ATTR_ISOCH: 4592 EHCI_ISOC_STATS(ehcip)->writes++; 4593 EHCI_ISOC_STATS(ehcip)->nwritten += len; 4594 break; 4595 } 4596 } 4597 } 4598