1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * EHCI Host Controller Driver (EHCI) 31 * 32 * The EHCI driver is a software driver which interfaces to the Universal 33 * Serial Bus layer (USBA) and the Host Controller (HC). The interface to 34 * the Host Controller is defined by the EHCI Host Controller Interface. 35 * 36 * This module contains the main EHCI driver code which handles all USB 37 * transfers, bandwidth allocations and other general functionalities. 38 */ 39 40 #include <sys/usb/hcd/ehci/ehcid.h> 41 #include <sys/usb/hcd/ehci/ehci_isoch.h> 42 #include <sys/usb/hcd/ehci/ehci_xfer.h> 43 44 /* Pointer to the state structure */ 45 extern void *ehci_statep; 46 47 extern void ehci_handle_endpoint_reclaimation(ehci_state_t *); 48 49 extern uint_t ehci_vt62x2_workaround; 50 51 /* Adjustable variables for the size of the pools */ 52 int ehci_qh_pool_size = EHCI_QH_POOL_SIZE; 53 int ehci_qtd_pool_size = EHCI_QTD_POOL_SIZE; 54 55 /* 56 * Initialize the values which the order of 32ms intr qh are executed 57 * by the host controller in the lattice tree. 58 */ 59 static uchar_t ehci_index[EHCI_NUM_INTR_QH_LISTS] = 60 {0x00, 0x10, 0x08, 0x18, 61 0x04, 0x14, 0x0c, 0x1c, 62 0x02, 0x12, 0x0a, 0x1a, 63 0x06, 0x16, 0x0e, 0x1e, 64 0x01, 0x11, 0x09, 0x19, 65 0x05, 0x15, 0x0d, 0x1d, 66 0x03, 0x13, 0x0b, 0x1b, 67 0x07, 0x17, 0x0f, 0x1f}; 68 69 /* 70 * Initialize the values which are used to calculate start split mask 71 * for the low/full/high speed interrupt and isochronous endpoints. 72 */ 73 static uint_t ehci_start_split_mask[15] = { 74 /* 75 * For high/full/low speed usb devices. For high speed 76 * device with polling interval greater than or equal 77 * to 8us (125us). 78 */ 79 0x01, /* 00000001 */ 80 0x02, /* 00000010 */ 81 0x04, /* 00000100 */ 82 0x08, /* 00001000 */ 83 0x10, /* 00010000 */ 84 0x20, /* 00100000 */ 85 0x40, /* 01000000 */ 86 0x80, /* 10000000 */ 87 88 /* Only for high speed devices with polling interval 4us */ 89 0x11, /* 00010001 */ 90 0x22, /* 00100010 */ 91 0x44, /* 01000100 */ 92 0x88, /* 10001000 */ 93 94 /* Only for high speed devices with polling interval 2us */ 95 0x55, /* 01010101 */ 96 0xaa, /* 10101010 */ 97 98 /* Only for high speed devices with polling interval 1us */ 99 0xff /* 11111111 */ 100 }; 101 102 /* 103 * Initialize the values which are used to calculate complete split mask 104 * for the low/full speed interrupt and isochronous endpoints. 105 */ 106 static uint_t ehci_intr_complete_split_mask[7] = { 107 /* Only full/low speed devices */ 108 0x1c, /* 00011100 */ 109 0x38, /* 00111000 */ 110 0x70, /* 01110000 */ 111 0xe0, /* 11100000 */ 112 0x00, /* Need FSTN feature */ 113 0x00, /* Need FSTN feature */ 114 0x00 /* Need FSTN feature */ 115 }; 116 117 118 /* 119 * EHCI Internal Function Prototypes 120 */ 121 122 /* Host Controller Driver (HCD) initialization functions */ 123 void ehci_set_dma_attributes(ehci_state_t *ehcip); 124 int ehci_allocate_pools(ehci_state_t *ehcip); 125 void ehci_decode_ddi_dma_addr_bind_handle_result( 126 ehci_state_t *ehcip, 127 int result); 128 int ehci_map_regs(ehci_state_t *ehcip); 129 int ehci_register_intrs_and_init_mutex( 130 ehci_state_t *ehcip); 131 int ehci_init_ctlr(ehci_state_t *ehcip); 132 static int ehci_take_control(ehci_state_t *ehcip); 133 static int ehci_init_periodic_frame_lst_table( 134 ehci_state_t *ehcip); 135 static void ehci_build_interrupt_lattice( 136 ehci_state_t *ehcip); 137 usba_hcdi_ops_t *ehci_alloc_hcdi_ops(ehci_state_t *ehcip); 138 139 /* Host Controller Driver (HCD) deinitialization functions */ 140 int ehci_cleanup(ehci_state_t *ehcip); 141 int ehci_cpr_suspend(ehci_state_t *ehcip); 142 int ehci_cpr_resume(ehci_state_t *ehcip); 143 144 /* Bandwidth Allocation functions */ 145 int ehci_allocate_bandwidth(ehci_state_t *ehcip, 146 usba_pipe_handle_data_t *ph, 147 uint_t *pnode, 148 uchar_t *smask, 149 uchar_t *cmask); 150 static int ehci_allocate_high_speed_bandwidth( 151 ehci_state_t *ehcip, 152 usba_pipe_handle_data_t *ph, 153 uint_t *hnode, 154 uchar_t *smask, 155 uchar_t *cmask); 156 static int ehci_allocate_classic_tt_bandwidth( 157 ehci_state_t *ehcip, 158 usba_pipe_handle_data_t *ph, 159 uint_t pnode); 160 void ehci_deallocate_bandwidth(ehci_state_t *ehcip, 161 usba_pipe_handle_data_t *ph, 162 uint_t pnode, 163 uchar_t smask, 164 uchar_t cmask); 165 static void ehci_deallocate_high_speed_bandwidth( 166 ehci_state_t *ehcip, 167 usba_pipe_handle_data_t *ph, 168 uint_t hnode, 169 uchar_t smask, 170 uchar_t cmask); 171 static void ehci_deallocate_classic_tt_bandwidth( 172 ehci_state_t *ehcip, 173 usba_pipe_handle_data_t *ph, 174 uint_t pnode); 175 static int ehci_compute_high_speed_bandwidth( 176 ehci_state_t *ehcip, 177 usb_ep_descr_t *endpoint, 178 usb_port_status_t port_status, 179 uint_t *sbandwidth, 180 uint_t *cbandwidth); 181 static int ehci_compute_classic_bandwidth( 182 usb_ep_descr_t *endpoint, 183 usb_port_status_t port_status, 184 uint_t *bandwidth); 185 int ehci_adjust_polling_interval( 186 ehci_state_t *ehcip, 187 usb_ep_descr_t *endpoint, 188 usb_port_status_t port_status); 189 static int ehci_adjust_high_speed_polling_interval( 190 ehci_state_t *ehcip, 191 usb_ep_descr_t *endpoint); 192 static uint_t ehci_lattice_height(uint_t interval); 193 static uint_t ehci_lattice_parent(uint_t node); 194 static uint_t ehci_find_periodic_node( 195 uint_t leaf, 196 int interval); 197 static uint_t ehci_leftmost_leaf(uint_t node, 198 uint_t height); 199 static uint_t ehci_pow_2(uint_t x); 200 static uint_t ehci_log_2(uint_t x); 201 static int ehci_find_bestfit_hs_mask( 202 ehci_state_t *ehcip, 203 uchar_t *smask, 204 uint_t *pnode, 205 usb_ep_descr_t *endpoint, 206 uint_t bandwidth, 207 int interval); 208 static int ehci_find_bestfit_ls_intr_mask( 209 ehci_state_t *ehcip, 210 uchar_t *smask, 211 uchar_t *cmask, 212 uint_t *pnode, 213 uint_t sbandwidth, 214 uint_t cbandwidth, 215 int interval); 216 static int ehci_find_bestfit_sitd_in_mask( 217 ehci_state_t *ehcip, 218 uchar_t *smask, 219 uchar_t *cmask, 220 uint_t *pnode, 221 uint_t sbandwidth, 222 uint_t cbandwidth, 223 int interval); 224 static int ehci_find_bestfit_sitd_out_mask( 225 ehci_state_t *ehcip, 226 uchar_t *smask, 227 uint_t *pnode, 228 uint_t sbandwidth, 229 int interval); 230 static uint_t ehci_calculate_bw_availability_mask( 231 ehci_state_t *ehcip, 232 uint_t bandwidth, 233 int leaf, 234 int leaf_count, 235 uchar_t *bw_mask); 236 static void ehci_update_bw_availability( 237 ehci_state_t *ehcip, 238 int bandwidth, 239 int leftmost_leaf, 240 int leaf_count, 241 uchar_t mask); 242 243 /* Miscellaneous functions */ 244 ehci_state_t *ehci_obtain_state( 245 dev_info_t *dip); 246 int ehci_state_is_operational( 247 ehci_state_t *ehcip); 248 int ehci_do_soft_reset( 249 ehci_state_t *ehcip); 250 usb_req_attrs_t ehci_get_xfer_attrs(ehci_state_t *ehcip, 251 ehci_pipe_private_t *pp, 252 ehci_trans_wrapper_t *tw); 253 usb_frame_number_t ehci_get_current_frame_number( 254 ehci_state_t *ehcip); 255 static void ehci_cpr_cleanup( 256 ehci_state_t *ehcip); 257 int ehci_wait_for_sof( 258 ehci_state_t *ehcip); 259 void ehci_toggle_scheduler( 260 ehci_state_t *ehcip); 261 void ehci_print_caps(ehci_state_t *ehcip); 262 void ehci_print_regs(ehci_state_t *ehcip); 263 void ehci_print_qh(ehci_state_t *ehcip, 264 ehci_qh_t *qh); 265 void ehci_print_qtd(ehci_state_t *ehcip, 266 ehci_qtd_t *qtd); 267 void ehci_create_stats(ehci_state_t *ehcip); 268 void ehci_destroy_stats(ehci_state_t *ehcip); 269 void ehci_do_intrs_stats(ehci_state_t *ehcip, 270 int val); 271 void ehci_do_byte_stats(ehci_state_t *ehcip, 272 size_t len, 273 uint8_t attr, 274 uint8_t addr); 275 276 /* 277 * Host Controller Driver (HCD) initialization functions 278 */ 279 280 /* 281 * ehci_set_dma_attributes: 282 * 283 * Set the limits in the DMA attributes structure. Most of the values used 284 * in the DMA limit structures are the default values as specified by the 285 * Writing PCI device drivers document. 286 */ 287 void 288 ehci_set_dma_attributes(ehci_state_t *ehcip) 289 { 290 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 291 "ehci_set_dma_attributes:"); 292 293 /* Initialize the DMA attributes */ 294 ehcip->ehci_dma_attr.dma_attr_version = DMA_ATTR_V0; 295 ehcip->ehci_dma_attr.dma_attr_addr_lo = 0x00000000ull; 296 ehcip->ehci_dma_attr.dma_attr_addr_hi = 0xfffffffeull; 297 298 /* 32 bit addressing */ 299 ehcip->ehci_dma_attr.dma_attr_count_max = EHCI_DMA_ATTR_COUNT_MAX; 300 301 /* Byte alignment */ 302 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 303 304 /* 305 * Since PCI specification is byte alignment, the 306 * burst size field should be set to 1 for PCI devices. 307 */ 308 ehcip->ehci_dma_attr.dma_attr_burstsizes = 0x1; 309 310 ehcip->ehci_dma_attr.dma_attr_minxfer = 0x1; 311 ehcip->ehci_dma_attr.dma_attr_maxxfer = EHCI_DMA_ATTR_MAX_XFER; 312 ehcip->ehci_dma_attr.dma_attr_seg = 0xffffffffull; 313 ehcip->ehci_dma_attr.dma_attr_sgllen = 1; 314 ehcip->ehci_dma_attr.dma_attr_granular = EHCI_DMA_ATTR_GRANULAR; 315 ehcip->ehci_dma_attr.dma_attr_flags = 0; 316 } 317 318 319 /* 320 * ehci_allocate_pools: 321 * 322 * Allocate the system memory for the Endpoint Descriptor (QH) and for the 323 * Transfer Descriptor (QTD) pools. Both QH and QTD structures must be aligned 324 * to a 16 byte boundary. 325 */ 326 int 327 ehci_allocate_pools(ehci_state_t *ehcip) 328 { 329 ddi_device_acc_attr_t dev_attr; 330 size_t real_length; 331 int result; 332 uint_t ccount; 333 int i; 334 335 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 336 "ehci_allocate_pools:"); 337 338 /* The host controller will be little endian */ 339 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 340 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 341 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 342 343 /* Byte alignment */ 344 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_TD_QH_ALIGNMENT; 345 346 /* Allocate the QTD pool DMA handle */ 347 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr, 348 DDI_DMA_SLEEP, 0, 349 &ehcip->ehci_qtd_pool_dma_handle) != DDI_SUCCESS) { 350 351 goto failure; 352 } 353 354 /* Allocate the memory for the QTD pool */ 355 if (ddi_dma_mem_alloc(ehcip->ehci_qtd_pool_dma_handle, 356 ehci_qtd_pool_size * sizeof (ehci_qtd_t), 357 &dev_attr, 358 DDI_DMA_CONSISTENT, 359 DDI_DMA_SLEEP, 360 0, 361 (caddr_t *)&ehcip->ehci_qtd_pool_addr, 362 &real_length, 363 &ehcip->ehci_qtd_pool_mem_handle)) { 364 365 goto failure; 366 } 367 368 /* Map the QTD pool into the I/O address space */ 369 result = ddi_dma_addr_bind_handle( 370 ehcip->ehci_qtd_pool_dma_handle, 371 NULL, 372 (caddr_t)ehcip->ehci_qtd_pool_addr, 373 real_length, 374 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 375 DDI_DMA_SLEEP, 376 NULL, 377 &ehcip->ehci_qtd_pool_cookie, 378 &ccount); 379 380 bzero((void *)ehcip->ehci_qtd_pool_addr, 381 ehci_qtd_pool_size * sizeof (ehci_qtd_t)); 382 383 /* Process the result */ 384 if (result == DDI_DMA_MAPPED) { 385 /* The cookie count should be 1 */ 386 if (ccount != 1) { 387 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 388 "ehci_allocate_pools: More than 1 cookie"); 389 390 goto failure; 391 } 392 } else { 393 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 394 "ehci_allocate_pools: Result = %d", result); 395 396 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result); 397 398 goto failure; 399 } 400 401 /* 402 * DMA addresses for QTD pools are bound 403 */ 404 ehcip->ehci_dma_addr_bind_flag |= EHCI_QTD_POOL_BOUND; 405 406 /* Initialize the QTD pool */ 407 for (i = 0; i < ehci_qtd_pool_size; i ++) { 408 Set_QTD(ehcip->ehci_qtd_pool_addr[i]. 409 qtd_state, EHCI_QTD_FREE); 410 } 411 412 /* Allocate the QTD pool DMA handle */ 413 if (ddi_dma_alloc_handle(ehcip->ehci_dip, 414 &ehcip->ehci_dma_attr, 415 DDI_DMA_SLEEP, 416 0, 417 &ehcip->ehci_qh_pool_dma_handle) != DDI_SUCCESS) { 418 419 goto failure; 420 } 421 422 /* Allocate the memory for the QH pool */ 423 if (ddi_dma_mem_alloc(ehcip->ehci_qh_pool_dma_handle, 424 ehci_qh_pool_size * sizeof (ehci_qh_t), 425 &dev_attr, 426 DDI_DMA_CONSISTENT, 427 DDI_DMA_SLEEP, 428 0, 429 (caddr_t *)&ehcip->ehci_qh_pool_addr, 430 &real_length, 431 &ehcip->ehci_qh_pool_mem_handle) != DDI_SUCCESS) { 432 433 goto failure; 434 } 435 436 result = ddi_dma_addr_bind_handle(ehcip->ehci_qh_pool_dma_handle, 437 NULL, 438 (caddr_t)ehcip->ehci_qh_pool_addr, 439 real_length, 440 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 441 DDI_DMA_SLEEP, 442 NULL, 443 &ehcip->ehci_qh_pool_cookie, 444 &ccount); 445 446 bzero((void *)ehcip->ehci_qh_pool_addr, 447 ehci_qh_pool_size * sizeof (ehci_qh_t)); 448 449 /* Process the result */ 450 if (result == DDI_DMA_MAPPED) { 451 /* The cookie count should be 1 */ 452 if (ccount != 1) { 453 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 454 "ehci_allocate_pools: More than 1 cookie"); 455 456 goto failure; 457 } 458 } else { 459 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result); 460 461 goto failure; 462 } 463 464 /* 465 * DMA addresses for QH pools are bound 466 */ 467 ehcip->ehci_dma_addr_bind_flag |= EHCI_QH_POOL_BOUND; 468 469 /* Initialize the QH pool */ 470 for (i = 0; i < ehci_qh_pool_size; i ++) { 471 Set_QH(ehcip->ehci_qh_pool_addr[i].qh_state, EHCI_QH_FREE); 472 } 473 474 /* Byte alignment */ 475 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 476 477 return (DDI_SUCCESS); 478 479 failure: 480 /* Byte alignment */ 481 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 482 483 return (DDI_FAILURE); 484 } 485 486 487 /* 488 * ehci_decode_ddi_dma_addr_bind_handle_result: 489 * 490 * Process the return values of ddi_dma_addr_bind_handle() 491 */ 492 void 493 ehci_decode_ddi_dma_addr_bind_handle_result( 494 ehci_state_t *ehcip, 495 int result) 496 { 497 USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl, 498 "ehci_decode_ddi_dma_addr_bind_handle_result:"); 499 500 switch (result) { 501 case DDI_DMA_PARTIAL_MAP: 502 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 503 "Partial transfers not allowed"); 504 break; 505 case DDI_DMA_INUSE: 506 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 507 "Handle is in use"); 508 break; 509 case DDI_DMA_NORESOURCES: 510 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 511 "No resources"); 512 break; 513 case DDI_DMA_NOMAPPING: 514 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 515 "No mapping"); 516 break; 517 case DDI_DMA_TOOBIG: 518 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 519 "Object is too big"); 520 break; 521 default: 522 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl, 523 "Unknown dma error"); 524 } 525 } 526 527 528 /* 529 * ehci_map_regs: 530 * 531 * The Host Controller (HC) contains a set of on-chip operational registers 532 * and which should be mapped into a non-cacheable portion of the system 533 * addressable space. 534 */ 535 int 536 ehci_map_regs(ehci_state_t *ehcip) 537 { 538 ddi_device_acc_attr_t attr; 539 uint16_t cmd_reg; 540 uint_t length; 541 542 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_map_regs:"); 543 544 /* Check to make sure we have memory access */ 545 if (pci_config_setup(ehcip->ehci_dip, 546 &ehcip->ehci_config_handle) != DDI_SUCCESS) { 547 548 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 549 "ehci_map_regs: Config error"); 550 551 return (DDI_FAILURE); 552 } 553 554 /* Make sure Memory Access Enable is set */ 555 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM); 556 557 if (!(cmd_reg & PCI_COMM_MAE)) { 558 559 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 560 "ehci_map_regs: Memory base address access disabled"); 561 562 return (DDI_FAILURE); 563 } 564 565 /* The host controller will be little endian */ 566 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 567 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 568 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 569 570 /* Map in EHCI Capability registers */ 571 if (ddi_regs_map_setup(ehcip->ehci_dip, 1, 572 (caddr_t *)&ehcip->ehci_capsp, 0, 573 sizeof (ehci_caps_t), &attr, 574 &ehcip->ehci_caps_handle) != DDI_SUCCESS) { 575 576 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 577 "ehci_map_regs: Map setup error"); 578 579 return (DDI_FAILURE); 580 } 581 582 length = ddi_get8(ehcip->ehci_caps_handle, 583 (uint8_t *)&ehcip->ehci_capsp->ehci_caps_length); 584 585 /* Free the original mapping */ 586 ddi_regs_map_free(&ehcip->ehci_caps_handle); 587 588 /* Re-map in EHCI Capability and Operational registers */ 589 if (ddi_regs_map_setup(ehcip->ehci_dip, 1, 590 (caddr_t *)&ehcip->ehci_capsp, 0, 591 length + sizeof (ehci_regs_t), &attr, 592 &ehcip->ehci_caps_handle) != DDI_SUCCESS) { 593 594 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 595 "ehci_map_regs: Map setup error"); 596 597 return (DDI_FAILURE); 598 } 599 600 /* Get the pointer to EHCI Operational Register */ 601 ehcip->ehci_regsp = (ehci_regs_t *) 602 ((uintptr_t)ehcip->ehci_capsp + length); 603 604 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 605 "ehci_map_regs: Capsp 0x%p Regsp 0x%p\n", 606 ehcip->ehci_capsp, ehcip->ehci_regsp); 607 608 return (DDI_SUCCESS); 609 } 610 611 612 /* 613 * ehci_register_intrs_and_init_mutex: 614 * 615 * Register interrupts and initialize each mutex and condition variables 616 */ 617 int 618 ehci_register_intrs_and_init_mutex(ehci_state_t *ehcip) 619 { 620 int type, count = 0, actual, ret; 621 622 #if defined(__x86) 623 uint8_t iline; 624 #endif 625 626 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 627 "ehci_register_intrs_and_init_mutex:"); 628 629 #if defined(__x86) 630 /* 631 * Make sure that the interrupt pin is connected to the 632 * interrupt controller on x86. Interrupt line 255 means 633 * "unknown" or "not connected" (PCI spec 6.2.4, footnote 43). 634 */ 635 iline = pci_config_get8(ehcip->ehci_config_handle, 636 PCI_CONF_ILINE); 637 638 if (iline == 255) { 639 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 640 "ehci_register_intrs_and_init_mutex: " 641 "interrupt line value out of range (%d)", 642 iline); 643 644 return (DDI_FAILURE); 645 } 646 #endif /* __x86 */ 647 648 ret = ddi_intr_get_supported_types(ehcip->ehci_dip, &type); 649 650 if ((ret != DDI_SUCCESS) || (!(type & DDI_INTR_TYPE_FIXED))) { 651 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 652 "ehci_register_intrs_and_init_mutex: " 653 "Fixed type interrupt is not supported"); 654 655 return (DDI_FAILURE); 656 } 657 658 ret = ddi_intr_get_nintrs(ehcip->ehci_dip, DDI_INTR_TYPE_FIXED, &count); 659 660 /* 661 * Fixed interrupts can only have one interrupt. Check to make 662 * sure that number of supported interrupts and number of 663 * available interrupts are both equal to 1. 664 */ 665 if ((ret != DDI_SUCCESS) || (count != 1)) { 666 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 667 "ehci_register_intrs_and_init_mutex: " 668 "no fixed interrupts"); 669 670 return (DDI_FAILURE); 671 } 672 673 ehcip->ehci_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP); 674 ret = ddi_intr_alloc(ehcip->ehci_dip, ehcip->ehci_htable, 675 DDI_INTR_TYPE_FIXED, 0, count, &actual, 0); 676 677 if ((ret != DDI_SUCCESS) || (actual != 1)) { 678 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 679 "ehci_register_intrs_and_init_mutex: " 680 "ddi_intr_alloc() failed 0x%x", ret); 681 682 kmem_free(ehcip->ehci_htable, sizeof (ddi_intr_handle_t)); 683 684 return (DDI_FAILURE); 685 } 686 687 /* Sanity check that count and avail are the same. */ 688 ASSERT(count == actual); 689 690 if (ddi_intr_get_pri(ehcip->ehci_htable[0], &ehcip->ehci_intr_pri)) { 691 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 692 "ehci_register_intrs_and_init_mutex: " 693 "ddi_intr_get_pri() failed"); 694 695 (void) ddi_intr_free(ehcip->ehci_htable[0]); 696 kmem_free(ehcip->ehci_htable, sizeof (ddi_intr_handle_t)); 697 698 return (DDI_FAILURE); 699 } 700 701 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 702 "Supported Interrupt priority = 0x%x", ehcip->ehci_intr_pri); 703 704 /* Test for high level mutex */ 705 if (ehcip->ehci_intr_pri >= ddi_intr_get_hilevel_pri()) { 706 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 707 "ehci_register_intrs_and_init_mutex: " 708 "Hi level interrupt not supported"); 709 710 (void) ddi_intr_free(ehcip->ehci_htable[0]); 711 kmem_free(ehcip->ehci_htable, sizeof (ddi_intr_handle_t)); 712 713 return (DDI_FAILURE); 714 } 715 716 /* Initialize the mutex */ 717 mutex_init(&ehcip->ehci_int_mutex, NULL, MUTEX_DRIVER, 718 (void *)(uintptr_t)ehcip->ehci_intr_pri); 719 720 if (ddi_intr_add_handler(ehcip->ehci_htable[0], 721 (ddi_intr_handler_t *)ehci_intr, (caddr_t)ehcip, NULL) != 722 DDI_SUCCESS) { 723 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 724 "ehci_register_intrs_and_init_mutex: " 725 "ddi_intr_add_handler() failed"); 726 727 mutex_destroy(&ehcip->ehci_int_mutex); 728 (void) ddi_intr_free(ehcip->ehci_htable[0]); 729 kmem_free(ehcip->ehci_htable, sizeof (ddi_intr_handle_t)); 730 731 return (DDI_FAILURE); 732 } 733 734 if (ddi_intr_enable(ehcip->ehci_htable[0]) != DDI_SUCCESS) { 735 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 736 "ehci_register_intrs_and_init_mutex: " 737 "ddi_intr_enable() failed"); 738 739 (void) ddi_intr_remove_handler(ehcip->ehci_htable[0]); 740 mutex_destroy(&ehcip->ehci_int_mutex); 741 (void) ddi_intr_free(ehcip->ehci_htable[0]); 742 kmem_free(ehcip->ehci_htable, sizeof (ddi_intr_handle_t)); 743 744 return (DDI_FAILURE); 745 } 746 747 /* Create prototype for advance on async schedule */ 748 cv_init(&ehcip->ehci_async_schedule_advance_cv, 749 NULL, CV_DRIVER, NULL); 750 751 return (DDI_SUCCESS); 752 } 753 754 755 /* 756 * ehci_init_ctlr: 757 * 758 * Initialize the Host Controller (HC). 759 */ 760 int 761 ehci_init_ctlr(ehci_state_t *ehcip) 762 { 763 int revision; 764 uint16_t cmd_reg; 765 clock_t sof_time_wait; 766 int abort_on_BIOS_take_over_failure; 767 768 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_init_ctlr:"); 769 770 /* Take control from the BIOS */ 771 if (ehci_take_control(ehcip) != USB_SUCCESS) { 772 773 /* read .conf file properties */ 774 abort_on_BIOS_take_over_failure = 775 ddi_prop_get_int(DDI_DEV_T_ANY, 776 ehcip->ehci_dip, DDI_PROP_DONTPASS, 777 "abort-on-BIOS-take-over-failure", 0); 778 779 if (abort_on_BIOS_take_over_failure) { 780 781 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 782 "Unable to take control from BIOS."); 783 784 return (DDI_FAILURE); 785 } 786 787 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 788 "Unable to take control from BIOS. Failure is ignored."); 789 } 790 791 /* set Memory Master Enable */ 792 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM); 793 cmd_reg |= (PCI_COMM_MAE | PCI_COMM_ME); 794 pci_config_put16(ehcip->ehci_config_handle, PCI_CONF_COMM, cmd_reg); 795 796 /* Reset the EHCI host controller */ 797 Set_OpReg(ehci_command, 798 Get_OpReg(ehci_command) | EHCI_CMD_HOST_CTRL_RESET); 799 800 /* Wait 10ms for reset to complete */ 801 drv_usecwait(EHCI_RESET_TIMEWAIT); 802 803 ASSERT(Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED); 804 805 /* Verify the version number */ 806 revision = Get_16Cap(ehci_version); 807 808 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 809 "ehci_init_ctlr: Revision 0x%x", revision); 810 811 /* 812 * EHCI driver supports EHCI host controllers compliant to 813 * 0.95 and higher revisions of EHCI specifications. 814 */ 815 if (revision < EHCI_REVISION_0_95) { 816 817 USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 818 "Revision 0x%x is not supported", revision); 819 820 return (DDI_FAILURE); 821 } 822 823 if (ehcip->ehci_hc_soft_state == EHCI_CTLR_INIT_STATE) { 824 825 /* Get the ehci chip vendor and device id */ 826 ehcip->ehci_vendor_id = pci_config_get16( 827 ehcip->ehci_config_handle, PCI_CONF_VENID); 828 ehcip->ehci_device_id = pci_config_get16( 829 ehcip->ehci_config_handle, PCI_CONF_DEVID); 830 ehcip->ehci_rev_id = pci_config_get8( 831 ehcip->ehci_config_handle, PCI_CONF_REVID); 832 833 /* Initialize the Frame list base address area */ 834 if (ehci_init_periodic_frame_lst_table(ehcip) != DDI_SUCCESS) { 835 836 return (DDI_FAILURE); 837 } 838 839 /* 840 * For performance reasons, do not insert anything into the 841 * asynchronous list or activate the asynch list schedule until 842 * there is a valid QH. 843 */ 844 ehcip->ehci_head_of_async_sched_list = NULL; 845 846 if ((ehcip->ehci_vendor_id == PCI_VENDOR_VIA) && 847 (ehci_vt62x2_workaround & EHCI_VIA_ASYNC_SCHEDULE)) { 848 /* 849 * The driver is unable to reliably stop the asynch 850 * list schedule on VIA VT6202 controllers, so we 851 * always keep a dummy QH on the list. 852 */ 853 ehci_qh_t *dummy_async_qh = 854 ehci_alloc_qh(ehcip, NULL, NULL); 855 856 Set_QH(dummy_async_qh->qh_link_ptr, 857 ((ehci_qh_cpu_to_iommu(ehcip, dummy_async_qh) & 858 EHCI_QH_LINK_PTR) | EHCI_QH_LINK_REF_QH)); 859 860 /* Set this QH to be the "head" of the circular list */ 861 Set_QH(dummy_async_qh->qh_ctrl, 862 Get_QH(dummy_async_qh->qh_ctrl) | 863 EHCI_QH_CTRL_RECLAIM_HEAD); 864 865 Set_QH(dummy_async_qh->qh_next_qtd, 866 EHCI_QH_NEXT_QTD_PTR_VALID); 867 Set_QH(dummy_async_qh->qh_alt_next_qtd, 868 EHCI_QH_ALT_NEXT_QTD_PTR_VALID); 869 870 ehcip->ehci_head_of_async_sched_list = dummy_async_qh; 871 ehcip->ehci_open_async_count++; 872 } 873 } 874 875 /* 876 * Check for Asynchronous schedule park capability feature. If this 877 * feature is supported, then, program ehci command register with 878 * appropriate values.. 879 */ 880 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_ASYNC_SCHED_PARK_CAP) { 881 882 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 883 "ehci_init_ctlr: Async park mode is supported"); 884 885 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) | 886 (EHCI_CMD_ASYNC_PARK_ENABLE | 887 EHCI_CMD_ASYNC_PARK_COUNT_3))); 888 } 889 890 /* 891 * Check for programmable periodic frame list feature. If this 892 * feature is supported, then, program ehci command register with 893 * 1024 frame list value. 894 */ 895 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_PROG_FRAME_LIST_FLAG) { 896 897 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 898 "ehci_init_ctlr: Variable programmable periodic " 899 "frame list is supported"); 900 901 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) | 902 EHCI_CMD_FRAME_1024_SIZE)); 903 } 904 905 /* 906 * Currently EHCI driver doesn't support 64 bit addressing. 907 * 908 * If we are using 64 bit addressing capability, then, program 909 * ehci_ctrl_segment register with 4 Gigabyte segment where all 910 * of the interface data structures are allocated. 911 */ 912 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_64BIT_ADDR_CAP) { 913 914 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 915 "ehci_init_ctlr: EHCI driver doesn't support " 916 "64 bit addressing"); 917 } 918 919 /* 64 bit addressing is not support */ 920 Set_OpReg(ehci_ctrl_segment, 0x00000000); 921 922 /* Turn on/off the schedulers */ 923 ehci_toggle_scheduler(ehcip); 924 925 /* 926 * Set the Periodic Frame List Base Address register with the 927 * starting physical address of the Periodic Frame List. 928 */ 929 Set_OpReg(ehci_periodic_list_base, 930 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address & 0xFFFFF000)); 931 932 /* 933 * Set ehci_interrupt to enable all interrupts except Root 934 * Hub Status change interrupt. 935 */ 936 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR | 937 EHCI_INTR_FRAME_LIST_ROLLOVER | EHCI_INTR_USB_ERROR | 938 EHCI_INTR_USB); 939 940 /* 941 * Set the desired interrupt threshold and turn on EHCI host controller. 942 */ 943 Set_OpReg(ehci_command, 944 ((Get_OpReg(ehci_command) & ~EHCI_CMD_INTR_THRESHOLD) | 945 (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN))); 946 947 ASSERT(Get_OpReg(ehci_command) & EHCI_CMD_HOST_CTRL_RUN); 948 949 /* 950 * Acer Labs Inc. M5273 EHCI controller does not send 951 * interrupts unless the Root hub ports are routed to the EHCI 952 * host controller; so route the ports now, before we test for 953 * the presence of SOFs interrupts. 954 */ 955 if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) { 956 /* Route all Root hub ports to EHCI host controller */ 957 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI); 958 } 959 960 /* 961 * VIA chips have some issues and may not work reliably. 962 * Revisions >= 0x80 are part of a southbridge and appear 963 * to be reliable with the workaround. 964 * For revisions < 0x80, if we were bound using class 965 * complain, else proceed. This will allow the user to 966 * bind ehci specifically to this chip and not have the 967 * warnings 968 */ 969 if (ehcip->ehci_vendor_id == PCI_VENDOR_VIA) { 970 971 if (ehcip->ehci_rev_id >= PCI_VIA_REVISION_6212) { 972 973 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 974 "ehci_init_ctlr: Applying VIA workarounds for " 975 "the 6212 chip."); 976 977 } else if (strcmp(DEVI(ehcip->ehci_dip)->devi_binding_name, 978 "pciclass,0c0320") == 0) { 979 980 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 981 "Due to recently discovered incompatibilities"); 982 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 983 "with this USB controller, USB2.x transfer"); 984 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 985 "support has been disabled. This device will"); 986 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 987 "continue to function as a USB1.x controller."); 988 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 989 "If you are interested in enabling USB2.x"); 990 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 991 "support please, refer to the ehci(7D) man page."); 992 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 993 "Please also refer to www.sun.com/io for"); 994 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 995 "Solaris Ready products and to"); 996 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 997 "www.sun.com/bigadmin/hcl for additional"); 998 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 999 "compatible USB products."); 1000 1001 return (DDI_FAILURE); 1002 1003 } else if (ehci_vt62x2_workaround) { 1004 1005 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1006 "Applying VIA workarounds"); 1007 } 1008 } 1009 1010 /* 1011 * Get the number of clock ticks to wait. 1012 * This is based on the maximum time it takes for a frame list rollover 1013 * and maximum time wait for SOFs to begin. 1014 */ 1015 sof_time_wait = drv_usectohz((EHCI_NUM_PERIODIC_FRAME_LISTS * 1000) + 1016 EHCI_SOF_TIMEWAIT); 1017 1018 /* Tell the ISR to broadcast ehci_async_schedule_advance_cv */ 1019 ehcip->ehci_flags |= EHCI_CV_INTR; 1020 1021 /* We need to add a delay to allow the chip time to start running */ 1022 (void) cv_timedwait(&ehcip->ehci_async_schedule_advance_cv, 1023 &ehcip->ehci_int_mutex, ddi_get_lbolt() + sof_time_wait); 1024 1025 /* 1026 * Check EHCI host controller is running, otherwise return failure. 1027 */ 1028 if ((ehcip->ehci_flags & EHCI_CV_INTR) || 1029 (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) { 1030 1031 USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1032 "No SOF interrupts have been received, this USB EHCI host" 1033 "controller is unusable"); 1034 1035 /* 1036 * Route all Root hub ports to Classic host 1037 * controller, in case this is an unusable ALI M5273 1038 * EHCI controller. 1039 */ 1040 if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) { 1041 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC); 1042 } 1043 1044 return (DDI_FAILURE); 1045 } 1046 1047 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1048 "ehci_init_ctlr: SOF's have started"); 1049 1050 /* Route all Root hub ports to EHCI host controller */ 1051 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI); 1052 1053 /* Set host controller soft state to operational */ 1054 ehcip->ehci_hc_soft_state = EHCI_CTLR_OPERATIONAL_STATE; 1055 1056 return (DDI_SUCCESS); 1057 } 1058 1059 /* 1060 * ehci_take_control: 1061 * 1062 * Handshake to take EHCI control from BIOS if necessary. Its only valid for 1063 * x86 machines, because sparc doesn't have a BIOS. 1064 * On x86 machine, the take control process includes 1065 * o get the base address of the extended capability list 1066 * o find out the capability for handoff synchronization in the list. 1067 * o check if BIOS has owned the host controller. 1068 * o set the OS Owned semaphore bit, ask the BIOS to release the ownership. 1069 * o wait for a constant time and check if BIOS has relinquished control. 1070 */ 1071 /* ARGSUSED */ 1072 static int 1073 ehci_take_control(ehci_state_t *ehcip) 1074 { 1075 #if defined(__x86) 1076 uint32_t extended_cap; 1077 uint32_t extended_cap_offset; 1078 uint32_t extended_cap_id; 1079 uint_t retry; 1080 1081 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1082 "ehci_take_control:"); 1083 1084 /* 1085 * According EHCI Spec 2.2.4, get EECP base address from HCCPARAMS 1086 * register. 1087 */ 1088 extended_cap_offset = (Get_Cap(ehci_hcc_params) & EHCI_HCC_EECP) >> 1089 EHCI_HCC_EECP_SHIFT; 1090 1091 /* 1092 * According EHCI Spec 2.2.4, if the extended capability offset is 1093 * less than 40h then its not valid. This means we don't need to 1094 * worry about BIOS handoff. 1095 */ 1096 if (extended_cap_offset < EHCI_HCC_EECP_MIN_OFFSET) { 1097 1098 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1099 "ehci_take_control: Hardware doesn't support legacy."); 1100 1101 goto success; 1102 } 1103 1104 /* 1105 * According EHCI Spec 2.1.7, A zero offset indicates the 1106 * end of the extended capability list. 1107 */ 1108 while (extended_cap_offset) { 1109 1110 /* Get the extended capability value. */ 1111 extended_cap = pci_config_get32(ehcip->ehci_config_handle, 1112 extended_cap_offset); 1113 1114 /* Get the capability ID */ 1115 extended_cap_id = (extended_cap & EHCI_EX_CAP_ID) >> 1116 EHCI_EX_CAP_ID_SHIFT; 1117 1118 /* Check if the card support legacy */ 1119 if (extended_cap_id == EHCI_EX_CAP_ID_BIOS_HANDOFF) { 1120 break; 1121 } 1122 1123 /* Get the offset of the next capability */ 1124 extended_cap_offset = (extended_cap & EHCI_EX_CAP_NEXT_PTR) >> 1125 EHCI_EX_CAP_NEXT_PTR_SHIFT; 1126 } 1127 1128 /* 1129 * Unable to find legacy support in hardware's extended capability list. 1130 * This means we don't need to worry about BIOS handoff. 1131 */ 1132 if (extended_cap_id != EHCI_EX_CAP_ID_BIOS_HANDOFF) { 1133 1134 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1135 "ehci_take_control: Hardware doesn't support legacy"); 1136 1137 goto success; 1138 } 1139 1140 /* Check if BIOS has owned it. */ 1141 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) { 1142 1143 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1144 "ehci_take_control: BIOS does not own EHCI"); 1145 1146 goto success; 1147 } 1148 1149 /* 1150 * According EHCI Spec 5.1, The OS driver initiates an ownership 1151 * request by setting the OS Owned semaphore to a one. The OS 1152 * waits for the BIOS Owned bit to go to a zero before attempting 1153 * to use the EHCI controller. The time that OS must wait for BIOS 1154 * to respond to the request for ownership is beyond the scope of 1155 * this specification. 1156 * It waits up to EHCI_TAKEOVER_WAIT_COUNT*EHCI_TAKEOVER_DELAY ms 1157 * for BIOS to release the ownership. 1158 */ 1159 extended_cap |= EHCI_LEGSUP_OS_OWNED_SEM; 1160 pci_config_put32(ehcip->ehci_config_handle, extended_cap_offset, 1161 extended_cap); 1162 1163 for (retry = 0; retry < EHCI_TAKEOVER_WAIT_COUNT; retry++) { 1164 1165 /* wait a special interval */ 1166 delay(drv_usectohz(EHCI_TAKEOVER_DELAY)); 1167 1168 /* Check to see if the BIOS has released the ownership */ 1169 extended_cap = pci_config_get32( 1170 ehcip->ehci_config_handle, extended_cap_offset); 1171 1172 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) { 1173 1174 USB_DPRINTF_L3(PRINT_MASK_ATTA, 1175 ehcip->ehci_log_hdl, 1176 "ehci_take_control: BIOS has released " 1177 "the ownership. retry = %d", retry); 1178 1179 goto success; 1180 } 1181 1182 } 1183 1184 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1185 "ehci_take_control: take control from BIOS failed."); 1186 1187 return (USB_FAILURE); 1188 1189 success: 1190 1191 #endif /* __x86 */ 1192 return (USB_SUCCESS); 1193 } 1194 1195 1196 /* 1197 * ehci_init_periodic_frame_list_table : 1198 * 1199 * Allocate the system memory and initialize Host Controller 1200 * Periodic Frame List table area. The starting of the Periodic 1201 * Frame List Table area must be 4096 byte aligned. 1202 */ 1203 static int 1204 ehci_init_periodic_frame_lst_table(ehci_state_t *ehcip) 1205 { 1206 ddi_device_acc_attr_t dev_attr; 1207 size_t real_length; 1208 uint_t ccount; 1209 int result; 1210 1211 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 1212 1213 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1214 "ehci_init_periodic_frame_lst_table:"); 1215 1216 /* The host controller will be little endian */ 1217 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 1218 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 1219 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1220 1221 /* Force the required 4K restrictive alignment */ 1222 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_PFL_ALIGNMENT; 1223 1224 /* Create space for the Periodic Frame List */ 1225 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr, 1226 DDI_DMA_SLEEP, 0, &ehcip->ehci_pflt_dma_handle) != DDI_SUCCESS) { 1227 1228 goto failure; 1229 } 1230 1231 if (ddi_dma_mem_alloc(ehcip->ehci_pflt_dma_handle, 1232 sizeof (ehci_periodic_frame_list_t), 1233 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 1234 0, (caddr_t *)&ehcip->ehci_periodic_frame_list_tablep, 1235 &real_length, &ehcip->ehci_pflt_mem_handle)) { 1236 1237 goto failure; 1238 } 1239 1240 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1241 "ehci_init_periodic_frame_lst_table: " 1242 "Real length %lu", real_length); 1243 1244 /* Map the whole Periodic Frame List into the I/O address space */ 1245 result = ddi_dma_addr_bind_handle(ehcip->ehci_pflt_dma_handle, 1246 NULL, (caddr_t)ehcip->ehci_periodic_frame_list_tablep, 1247 real_length, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1248 DDI_DMA_SLEEP, NULL, &ehcip->ehci_pflt_cookie, &ccount); 1249 1250 if (result == DDI_DMA_MAPPED) { 1251 /* The cookie count should be 1 */ 1252 if (ccount != 1) { 1253 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1254 "ehci_init_periodic_frame_lst_table: " 1255 "More than 1 cookie"); 1256 1257 goto failure; 1258 } 1259 } else { 1260 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result); 1261 1262 goto failure; 1263 } 1264 1265 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1266 "ehci_init_periodic_frame_lst_table: virtual 0x%p physical 0x%x", 1267 (void *)ehcip->ehci_periodic_frame_list_tablep, 1268 ehcip->ehci_pflt_cookie.dmac_address); 1269 1270 /* 1271 * DMA addresses for Periodic Frame List are bound. 1272 */ 1273 ehcip->ehci_dma_addr_bind_flag |= EHCI_PFLT_DMA_BOUND; 1274 1275 bzero((void *)ehcip->ehci_periodic_frame_list_tablep, real_length); 1276 1277 /* Initialize the Periodic Frame List */ 1278 ehci_build_interrupt_lattice(ehcip); 1279 1280 /* Reset Byte Alignment to Default */ 1281 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 1282 1283 return (DDI_SUCCESS); 1284 failure: 1285 /* Byte alignment */ 1286 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT; 1287 1288 return (DDI_FAILURE); 1289 } 1290 1291 1292 /* 1293 * ehci_build_interrupt_lattice: 1294 * 1295 * Construct the interrupt lattice tree using static Endpoint Descriptors 1296 * (QH). This interrupt lattice tree will have total of 32 interrupt QH 1297 * lists and the Host Controller (HC) processes one interrupt QH list in 1298 * every frame. The Host Controller traverses the periodic schedule by 1299 * constructing an array offset reference from the Periodic List Base Address 1300 * register and bits 12 to 3 of Frame Index register. It fetches the element 1301 * and begins traversing the graph of linked schedule data structures. 1302 */ 1303 static void 1304 ehci_build_interrupt_lattice(ehci_state_t *ehcip) 1305 { 1306 ehci_qh_t *list_array = ehcip->ehci_qh_pool_addr; 1307 ushort_t ehci_index[EHCI_NUM_PERIODIC_FRAME_LISTS]; 1308 ehci_periodic_frame_list_t *periodic_frame_list = 1309 ehcip->ehci_periodic_frame_list_tablep; 1310 ushort_t *temp, num_of_nodes; 1311 uintptr_t addr; 1312 int i, j, k; 1313 1314 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1315 "ehci_build_interrupt_lattice:"); 1316 1317 /* 1318 * Reserve the first 63 Endpoint Descriptor (QH) structures 1319 * in the pool as static endpoints & these are required for 1320 * constructing interrupt lattice tree. 1321 */ 1322 for (i = 0; i < EHCI_NUM_STATIC_NODES; i++) { 1323 Set_QH(list_array[i].qh_state, EHCI_QH_STATIC); 1324 Set_QH(list_array[i].qh_status, EHCI_QH_STS_HALTED); 1325 Set_QH(list_array[i].qh_next_qtd, EHCI_QH_NEXT_QTD_PTR_VALID); 1326 Set_QH(list_array[i].qh_alt_next_qtd, 1327 EHCI_QH_ALT_NEXT_QTD_PTR_VALID); 1328 } 1329 1330 /* 1331 * Make sure that last Endpoint on the periodic frame list terminates 1332 * periodic schedule. 1333 */ 1334 Set_QH(list_array[0].qh_link_ptr, EHCI_QH_LINK_PTR_VALID); 1335 1336 /* Build the interrupt lattice tree */ 1337 for (i = 0; i < (EHCI_NUM_STATIC_NODES / 2); i++) { 1338 /* 1339 * The next pointer in the host controller endpoint 1340 * descriptor must contain an iommu address. Calculate 1341 * the offset into the cpu address and add this to the 1342 * starting iommu address. 1343 */ 1344 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *)&list_array[i]); 1345 1346 Set_QH(list_array[2*i + 1].qh_link_ptr, 1347 addr | EHCI_QH_LINK_REF_QH); 1348 Set_QH(list_array[2*i + 2].qh_link_ptr, 1349 addr | EHCI_QH_LINK_REF_QH); 1350 } 1351 1352 /* Build the tree bottom */ 1353 temp = (unsigned short *) 1354 kmem_zalloc(EHCI_NUM_PERIODIC_FRAME_LISTS * 2, KM_SLEEP); 1355 1356 num_of_nodes = 1; 1357 1358 /* 1359 * Initialize the values which are used for setting up head pointers 1360 * for the 32ms scheduling lists which starts from the Periodic Frame 1361 * List. 1362 */ 1363 for (i = 0; i < ehci_log_2(EHCI_NUM_PERIODIC_FRAME_LISTS); i++) { 1364 for (j = 0, k = 0; k < num_of_nodes; k++, j++) { 1365 ehci_index[j++] = temp[k]; 1366 ehci_index[j] = temp[k] + ehci_pow_2(i); 1367 } 1368 1369 num_of_nodes *= 2; 1370 for (k = 0; k < num_of_nodes; k++) 1371 temp[k] = ehci_index[k]; 1372 } 1373 1374 kmem_free((void *)temp, (EHCI_NUM_PERIODIC_FRAME_LISTS * 2)); 1375 1376 /* 1377 * Initialize the interrupt list in the Periodic Frame List Table 1378 * so that it points to the bottom of the tree. 1379 */ 1380 for (i = 0, j = 0; i < ehci_pow_2(TREE_HEIGHT); i++) { 1381 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *) 1382 (&list_array[((EHCI_NUM_STATIC_NODES + 1) / 2) + i - 1])); 1383 1384 ASSERT(addr); 1385 1386 for (k = 0; k < ehci_pow_2(TREE_HEIGHT); k++) { 1387 Set_PFLT(periodic_frame_list-> 1388 ehci_periodic_frame_list_table[ehci_index[j++]], 1389 (uint32_t)(addr | EHCI_QH_LINK_REF_QH)); 1390 } 1391 } 1392 } 1393 1394 1395 /* 1396 * ehci_alloc_hcdi_ops: 1397 * 1398 * The HCDI interfaces or entry points are the software interfaces used by 1399 * the Universal Serial Bus Driver (USBA) to access the services of the 1400 * Host Controller Driver (HCD). During HCD initialization, inform USBA 1401 * about all available HCDI interfaces or entry points. 1402 */ 1403 usba_hcdi_ops_t * 1404 ehci_alloc_hcdi_ops(ehci_state_t *ehcip) 1405 { 1406 usba_hcdi_ops_t *usba_hcdi_ops; 1407 1408 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1409 "ehci_alloc_hcdi_ops:"); 1410 1411 usba_hcdi_ops = usba_alloc_hcdi_ops(); 1412 1413 usba_hcdi_ops->usba_hcdi_ops_version = HCDI_OPS_VERSION; 1414 1415 usba_hcdi_ops->usba_hcdi_pipe_open = ehci_hcdi_pipe_open; 1416 usba_hcdi_ops->usba_hcdi_pipe_close = ehci_hcdi_pipe_close; 1417 1418 usba_hcdi_ops->usba_hcdi_pipe_reset = ehci_hcdi_pipe_reset; 1419 1420 usba_hcdi_ops->usba_hcdi_pipe_ctrl_xfer = ehci_hcdi_pipe_ctrl_xfer; 1421 usba_hcdi_ops->usba_hcdi_pipe_bulk_xfer = ehci_hcdi_pipe_bulk_xfer; 1422 usba_hcdi_ops->usba_hcdi_pipe_intr_xfer = ehci_hcdi_pipe_intr_xfer; 1423 usba_hcdi_ops->usba_hcdi_pipe_isoc_xfer = ehci_hcdi_pipe_isoc_xfer; 1424 1425 usba_hcdi_ops->usba_hcdi_bulk_transfer_size = 1426 ehci_hcdi_bulk_transfer_size; 1427 1428 usba_hcdi_ops->usba_hcdi_pipe_stop_intr_polling = 1429 ehci_hcdi_pipe_stop_intr_polling; 1430 usba_hcdi_ops->usba_hcdi_pipe_stop_isoc_polling = 1431 ehci_hcdi_pipe_stop_isoc_polling; 1432 1433 usba_hcdi_ops->usba_hcdi_get_current_frame_number = 1434 ehci_hcdi_get_current_frame_number; 1435 usba_hcdi_ops->usba_hcdi_get_max_isoc_pkts = 1436 ehci_hcdi_get_max_isoc_pkts; 1437 1438 usba_hcdi_ops->usba_hcdi_console_input_init = 1439 ehci_hcdi_polled_input_init; 1440 usba_hcdi_ops->usba_hcdi_console_input_enter = 1441 ehci_hcdi_polled_input_enter; 1442 usba_hcdi_ops->usba_hcdi_console_read = 1443 ehci_hcdi_polled_read; 1444 usba_hcdi_ops->usba_hcdi_console_input_exit = 1445 ehci_hcdi_polled_input_exit; 1446 usba_hcdi_ops->usba_hcdi_console_input_fini = 1447 ehci_hcdi_polled_input_fini; 1448 return (usba_hcdi_ops); 1449 } 1450 1451 1452 /* 1453 * Host Controller Driver (HCD) deinitialization functions 1454 */ 1455 1456 /* 1457 * ehci_cleanup: 1458 * 1459 * Cleanup on attach failure or detach 1460 */ 1461 int 1462 ehci_cleanup(ehci_state_t *ehcip) 1463 { 1464 ehci_trans_wrapper_t *tw; 1465 ehci_pipe_private_t *pp; 1466 ehci_qtd_t *qtd; 1467 int i, ctrl, rval; 1468 int flags = ehcip->ehci_flags; 1469 1470 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_cleanup:"); 1471 1472 if (flags & EHCI_RHREG) { 1473 /* Unload the root hub driver */ 1474 if (ehci_unload_root_hub_driver(ehcip) != USB_SUCCESS) { 1475 1476 return (DDI_FAILURE); 1477 } 1478 } 1479 1480 if (flags & EHCI_USBAREG) { 1481 /* Unregister this HCD instance with USBA */ 1482 usba_hcdi_unregister(ehcip->ehci_dip); 1483 } 1484 1485 if (flags & EHCI_INTR) { 1486 1487 mutex_enter(&ehcip->ehci_int_mutex); 1488 1489 /* Route all Root hub ports to Classic host controller */ 1490 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC); 1491 1492 /* Disable all EHCI QH list processing */ 1493 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) & 1494 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | 1495 EHCI_CMD_PERIODIC_SCHED_ENABLE))); 1496 1497 /* Disable all EHCI interrupts */ 1498 Set_OpReg(ehci_interrupt, 0); 1499 1500 /* wait for the next SOF */ 1501 (void) ehci_wait_for_sof(ehcip); 1502 1503 /* Stop the EHCI host controller */ 1504 Set_OpReg(ehci_command, 1505 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN); 1506 1507 /* Wait for sometime */ 1508 drv_usecwait(EHCI_TIMEWAIT); 1509 1510 mutex_exit(&ehcip->ehci_int_mutex); 1511 1512 /* disable interrupt */ 1513 (void) ddi_intr_disable(ehcip->ehci_htable[0]); 1514 1515 /* Remove interrupt handler */ 1516 (void) ddi_intr_remove_handler(ehcip->ehci_htable[0]); 1517 1518 /* free interrupt handle */ 1519 (void) ddi_intr_free(ehcip->ehci_htable[0]); 1520 1521 /* free memory */ 1522 kmem_free(ehcip->ehci_htable, sizeof (ddi_intr_handle_t)); 1523 } 1524 1525 /* Unmap the EHCI registers */ 1526 if (ehcip->ehci_caps_handle) { 1527 ddi_regs_map_free(&ehcip->ehci_caps_handle); 1528 } 1529 1530 if (ehcip->ehci_config_handle) { 1531 pci_config_teardown(&ehcip->ehci_config_handle); 1532 } 1533 1534 /* Free all the buffers */ 1535 if (ehcip->ehci_qtd_pool_addr && ehcip->ehci_qtd_pool_mem_handle) { 1536 for (i = 0; i < ehci_qtd_pool_size; i ++) { 1537 qtd = &ehcip->ehci_qtd_pool_addr[i]; 1538 ctrl = Get_QTD(ehcip-> 1539 ehci_qtd_pool_addr[i].qtd_state); 1540 1541 if ((ctrl != EHCI_QTD_FREE) && 1542 (ctrl != EHCI_QTD_DUMMY) && 1543 (qtd->qtd_trans_wrapper)) { 1544 1545 mutex_enter(&ehcip->ehci_int_mutex); 1546 1547 tw = (ehci_trans_wrapper_t *) 1548 EHCI_LOOKUP_ID((uint32_t) 1549 Get_QTD(qtd->qtd_trans_wrapper)); 1550 1551 /* Obtain the pipe private structure */ 1552 pp = tw->tw_pipe_private; 1553 1554 /* Stop the the transfer timer */ 1555 ehci_stop_xfer_timer(ehcip, tw, 1556 EHCI_REMOVE_XFER_ALWAYS); 1557 1558 ehci_deallocate_tw(ehcip, pp, tw); 1559 1560 mutex_exit(&ehcip->ehci_int_mutex); 1561 } 1562 } 1563 1564 /* 1565 * If EHCI_QTD_POOL_BOUND flag is set, then unbind 1566 * the handle for QTD pools. 1567 */ 1568 if ((ehcip->ehci_dma_addr_bind_flag & 1569 EHCI_QTD_POOL_BOUND) == EHCI_QTD_POOL_BOUND) { 1570 1571 rval = ddi_dma_unbind_handle( 1572 ehcip->ehci_qtd_pool_dma_handle); 1573 1574 ASSERT(rval == DDI_SUCCESS); 1575 } 1576 ddi_dma_mem_free(&ehcip->ehci_qtd_pool_mem_handle); 1577 } 1578 1579 /* Free the QTD pool */ 1580 if (ehcip->ehci_qtd_pool_dma_handle) { 1581 ddi_dma_free_handle(&ehcip->ehci_qtd_pool_dma_handle); 1582 } 1583 1584 if (ehcip->ehci_qh_pool_addr && ehcip->ehci_qh_pool_mem_handle) { 1585 /* 1586 * If EHCI_QH_POOL_BOUND flag is set, then unbind 1587 * the handle for QH pools. 1588 */ 1589 if ((ehcip->ehci_dma_addr_bind_flag & 1590 EHCI_QH_POOL_BOUND) == EHCI_QH_POOL_BOUND) { 1591 1592 rval = ddi_dma_unbind_handle( 1593 ehcip->ehci_qh_pool_dma_handle); 1594 1595 ASSERT(rval == DDI_SUCCESS); 1596 } 1597 1598 ddi_dma_mem_free(&ehcip->ehci_qh_pool_mem_handle); 1599 } 1600 1601 /* Free the QH pool */ 1602 if (ehcip->ehci_qh_pool_dma_handle) { 1603 ddi_dma_free_handle(&ehcip->ehci_qh_pool_dma_handle); 1604 } 1605 1606 /* Free the Periodic frame list table (PFLT) area */ 1607 if (ehcip->ehci_periodic_frame_list_tablep && 1608 ehcip->ehci_pflt_mem_handle) { 1609 /* 1610 * If EHCI_PFLT_DMA_BOUND flag is set, then unbind 1611 * the handle for PFLT. 1612 */ 1613 if ((ehcip->ehci_dma_addr_bind_flag & 1614 EHCI_PFLT_DMA_BOUND) == EHCI_PFLT_DMA_BOUND) { 1615 1616 rval = ddi_dma_unbind_handle( 1617 ehcip->ehci_pflt_dma_handle); 1618 1619 ASSERT(rval == DDI_SUCCESS); 1620 } 1621 1622 ddi_dma_mem_free(&ehcip->ehci_pflt_mem_handle); 1623 } 1624 1625 (void) ehci_isoc_cleanup(ehcip); 1626 1627 if (ehcip->ehci_pflt_dma_handle) { 1628 ddi_dma_free_handle(&ehcip->ehci_pflt_dma_handle); 1629 } 1630 1631 if (flags & EHCI_INTR) { 1632 /* Destroy the mutex */ 1633 mutex_destroy(&ehcip->ehci_int_mutex); 1634 1635 /* Destroy the async schedule advance condition variable */ 1636 cv_destroy(&ehcip->ehci_async_schedule_advance_cv); 1637 } 1638 1639 /* clean up kstat structs */ 1640 ehci_destroy_stats(ehcip); 1641 1642 /* Free ehci hcdi ops */ 1643 if (ehcip->ehci_hcdi_ops) { 1644 usba_free_hcdi_ops(ehcip->ehci_hcdi_ops); 1645 } 1646 1647 if (flags & EHCI_ZALLOC) { 1648 1649 usb_free_log_hdl(ehcip->ehci_log_hdl); 1650 1651 /* Remove all properties that might have been created */ 1652 ddi_prop_remove_all(ehcip->ehci_dip); 1653 1654 /* Free the soft state */ 1655 ddi_soft_state_free(ehci_statep, 1656 ddi_get_instance(ehcip->ehci_dip)); 1657 } 1658 1659 return (DDI_SUCCESS); 1660 } 1661 1662 1663 /* 1664 * ehci_cpr_suspend 1665 */ 1666 int 1667 ehci_cpr_suspend(ehci_state_t *ehcip) 1668 { 1669 int i; 1670 1671 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1672 "ehci_cpr_suspend:"); 1673 1674 /* Call into the root hub and suspend it */ 1675 if (usba_hubdi_detach(ehcip->ehci_dip, DDI_SUSPEND) != DDI_SUCCESS) { 1676 1677 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1678 "ehci_cpr_suspend: root hub fails to suspend"); 1679 1680 return (DDI_FAILURE); 1681 } 1682 1683 /* Only root hub's intr pipe should be open at this time */ 1684 mutex_enter(&ehcip->ehci_int_mutex); 1685 1686 ASSERT(ehcip->ehci_open_pipe_count == 0); 1687 1688 /* Just wait till all resources are reclaimed */ 1689 i = 0; 1690 while ((ehcip->ehci_reclaim_list != NULL) && (i++ < 3)) { 1691 ehci_handle_endpoint_reclaimation(ehcip); 1692 (void) ehci_wait_for_sof(ehcip); 1693 } 1694 ASSERT(ehcip->ehci_reclaim_list == NULL); 1695 1696 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1697 "ehci_cpr_suspend: Disable HC QH list processing"); 1698 1699 /* Disable all EHCI QH list processing */ 1700 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) & 1701 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE))); 1702 1703 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1704 "ehci_cpr_suspend: Disable HC interrupts"); 1705 1706 /* Disable all EHCI interrupts */ 1707 Set_OpReg(ehci_interrupt, 0); 1708 1709 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1710 "ehci_cpr_suspend: Wait for the next SOF"); 1711 1712 /* Wait for the next SOF */ 1713 if (ehci_wait_for_sof(ehcip) != USB_SUCCESS) { 1714 1715 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1716 "ehci_cpr_suspend: ehci host controller suspend failed"); 1717 1718 mutex_exit(&ehcip->ehci_int_mutex); 1719 return (DDI_FAILURE); 1720 } 1721 1722 /* 1723 * Stop the ehci host controller 1724 * if usb keyboard is not connected. 1725 */ 1726 if (ehcip->ehci_polled_kbd_count == 0) { 1727 Set_OpReg(ehci_command, 1728 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN); 1729 } 1730 1731 /* Set host controller soft state to suspend */ 1732 ehcip->ehci_hc_soft_state = EHCI_CTLR_SUSPEND_STATE; 1733 1734 mutex_exit(&ehcip->ehci_int_mutex); 1735 1736 return (DDI_SUCCESS); 1737 } 1738 1739 1740 /* 1741 * ehci_cpr_resume 1742 */ 1743 int 1744 ehci_cpr_resume(ehci_state_t *ehcip) 1745 { 1746 mutex_enter(&ehcip->ehci_int_mutex); 1747 1748 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1749 "ehci_cpr_resume: Restart the controller"); 1750 1751 /* Cleanup ehci specific information across cpr */ 1752 ehci_cpr_cleanup(ehcip); 1753 1754 /* Restart the controller */ 1755 if (ehci_init_ctlr(ehcip) != DDI_SUCCESS) { 1756 1757 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 1758 "ehci_cpr_resume: ehci host controller resume failed "); 1759 1760 mutex_exit(&ehcip->ehci_int_mutex); 1761 1762 return (DDI_FAILURE); 1763 } 1764 1765 mutex_exit(&ehcip->ehci_int_mutex); 1766 1767 /* Now resume the root hub */ 1768 if (usba_hubdi_attach(ehcip->ehci_dip, DDI_RESUME) != DDI_SUCCESS) { 1769 1770 return (DDI_FAILURE); 1771 } 1772 1773 return (DDI_SUCCESS); 1774 } 1775 1776 1777 /* 1778 * Bandwidth Allocation functions 1779 */ 1780 1781 /* 1782 * ehci_allocate_bandwidth: 1783 * 1784 * Figure out whether or not this interval may be supported. Return the index 1785 * into the lattice if it can be supported. Return allocation failure if it 1786 * can not be supported. 1787 */ 1788 int 1789 ehci_allocate_bandwidth( 1790 ehci_state_t *ehcip, 1791 usba_pipe_handle_data_t *ph, 1792 uint_t *pnode, 1793 uchar_t *smask, 1794 uchar_t *cmask) 1795 { 1796 int error = USB_SUCCESS; 1797 1798 /* This routine is protected by the ehci_int_mutex */ 1799 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 1800 1801 /* Reset the pnode to the last checked pnode */ 1802 *pnode = 0; 1803 1804 /* Allocate high speed bandwidth */ 1805 if ((error = ehci_allocate_high_speed_bandwidth(ehcip, 1806 ph, pnode, smask, cmask)) != USB_SUCCESS) { 1807 1808 return (error); 1809 } 1810 1811 /* 1812 * For low/full speed usb devices, allocate classic TT bandwidth 1813 * in additional to high speed bandwidth. 1814 */ 1815 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) { 1816 1817 /* Allocate classic TT bandwidth */ 1818 if ((error = ehci_allocate_classic_tt_bandwidth( 1819 ehcip, ph, *pnode)) != USB_SUCCESS) { 1820 1821 /* Deallocate high speed bandwidth */ 1822 ehci_deallocate_high_speed_bandwidth( 1823 ehcip, ph, *pnode, *smask, *cmask); 1824 } 1825 } 1826 1827 return (error); 1828 } 1829 1830 1831 /* 1832 * ehci_allocate_high_speed_bandwidth: 1833 * 1834 * Allocate high speed bandwidth for the low/full/high speed interrupt and 1835 * isochronous endpoints. 1836 */ 1837 static int 1838 ehci_allocate_high_speed_bandwidth( 1839 ehci_state_t *ehcip, 1840 usba_pipe_handle_data_t *ph, 1841 uint_t *pnode, 1842 uchar_t *smask, 1843 uchar_t *cmask) 1844 { 1845 uint_t sbandwidth, cbandwidth; 1846 int interval; 1847 usb_ep_descr_t *endpoint = &ph->p_ep; 1848 usba_device_t *child_ud; 1849 usb_port_status_t port_status; 1850 int error; 1851 1852 /* This routine is protected by the ehci_int_mutex */ 1853 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 1854 1855 /* Get child's usba device structure */ 1856 child_ud = ph->p_usba_device; 1857 1858 mutex_enter(&child_ud->usb_mutex); 1859 1860 /* Get the current usb device's port status */ 1861 port_status = ph->p_usba_device->usb_port_status; 1862 1863 mutex_exit(&child_ud->usb_mutex); 1864 1865 /* 1866 * Calculate the length in bytes of a transaction on this 1867 * periodic endpoint. Return failure if maximum packet is 1868 * zero. 1869 */ 1870 error = ehci_compute_high_speed_bandwidth(ehcip, endpoint, 1871 port_status, &sbandwidth, &cbandwidth); 1872 if (error != USB_SUCCESS) { 1873 1874 return (error); 1875 } 1876 1877 /* 1878 * Adjust polling interval to be a power of 2. 1879 * If this interval can't be supported, return 1880 * allocation failure. 1881 */ 1882 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status); 1883 if (interval == USB_FAILURE) { 1884 1885 return (USB_FAILURE); 1886 } 1887 1888 if (port_status == USBA_HIGH_SPEED_DEV) { 1889 /* Allocate bandwidth for high speed devices, except ITD */ 1890 error = ehci_find_bestfit_hs_mask(ehcip, smask, pnode, 1891 endpoint, sbandwidth, interval); 1892 *cmask = 0x00; 1893 1894 } else { 1895 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) == 1896 USB_EP_ATTR_INTR) { 1897 1898 /* Allocate bandwidth for low speed interrupt */ 1899 error = ehci_find_bestfit_ls_intr_mask(ehcip, 1900 smask, cmask, pnode, sbandwidth, cbandwidth, 1901 interval); 1902 } else { 1903 if ((endpoint->bEndpointAddress & 1904 USB_EP_DIR_MASK) == USB_EP_DIR_IN) { 1905 1906 /* Allocate bandwidth for sitd in */ 1907 error = ehci_find_bestfit_sitd_in_mask(ehcip, 1908 smask, cmask, pnode, sbandwidth, cbandwidth, 1909 interval); 1910 } else { 1911 1912 /* Allocate bandwidth for sitd out */ 1913 error = ehci_find_bestfit_sitd_out_mask(ehcip, 1914 smask, pnode, sbandwidth, interval); 1915 *cmask = 0x00; 1916 } 1917 } 1918 } 1919 1920 if (error != USB_SUCCESS) { 1921 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 1922 "ehci_allocate_high_speed_bandwidth: Reached maximum " 1923 "bandwidth value and cannot allocate bandwidth for a " 1924 "given high-speed periodic endpoint"); 1925 1926 return (USB_NO_BANDWIDTH); 1927 } 1928 1929 return (error); 1930 } 1931 1932 1933 /* 1934 * ehci_allocate_classic_tt_speed_bandwidth: 1935 * 1936 * Allocate classic TT bandwidth for the low/full speed interrupt and 1937 * isochronous endpoints. 1938 */ 1939 static int 1940 ehci_allocate_classic_tt_bandwidth( 1941 ehci_state_t *ehcip, 1942 usba_pipe_handle_data_t *ph, 1943 uint_t pnode) 1944 { 1945 uint_t bandwidth, min; 1946 uint_t height, leftmost, list; 1947 usb_ep_descr_t *endpoint = &ph->p_ep; 1948 usba_device_t *child_ud, *parent_ud; 1949 usb_port_status_t port_status; 1950 int i, interval; 1951 1952 /* This routine is protected by the ehci_int_mutex */ 1953 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 1954 1955 /* Get child's usba device structure */ 1956 child_ud = ph->p_usba_device; 1957 1958 mutex_enter(&child_ud->usb_mutex); 1959 1960 /* Get the current usb device's port status */ 1961 port_status = child_ud->usb_port_status; 1962 1963 /* Get the parent high speed hub's usba device structure */ 1964 parent_ud = child_ud->usb_hs_hub_usba_dev; 1965 1966 mutex_exit(&child_ud->usb_mutex); 1967 1968 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 1969 "ehci_allocate_classic_tt_bandwidth: " 1970 "child_ud 0x%p parent_ud 0x%p", child_ud, parent_ud); 1971 1972 /* 1973 * Calculate the length in bytes of a transaction on this 1974 * periodic endpoint. Return failure if maximum packet is 1975 * zero. 1976 */ 1977 if (ehci_compute_classic_bandwidth(endpoint, 1978 port_status, &bandwidth) != USB_SUCCESS) { 1979 1980 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 1981 "ehci_allocate_classic_tt_bandwidth: Periodic endpoint " 1982 "with zero endpoint maximum packet size is not supported"); 1983 1984 return (USB_NOT_SUPPORTED); 1985 } 1986 1987 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 1988 "ehci_allocate_classic_tt_bandwidth: bandwidth %d", bandwidth); 1989 1990 mutex_enter(&parent_ud->usb_mutex); 1991 1992 /* 1993 * If the length in bytes plus the allocated bandwidth exceeds 1994 * the maximum, return bandwidth allocation failure. 1995 */ 1996 if ((parent_ud->usb_hs_hub_min_bandwidth + bandwidth) > 1997 FS_PERIODIC_BANDWIDTH) { 1998 1999 mutex_exit(&parent_ud->usb_mutex); 2000 2001 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2002 "ehci_allocate_classic_tt_bandwidth: Reached maximum " 2003 "bandwidth value and cannot allocate bandwidth for a " 2004 "given low/full speed periodic endpoint"); 2005 2006 return (USB_NO_BANDWIDTH); 2007 } 2008 2009 mutex_exit(&parent_ud->usb_mutex); 2010 2011 /* Adjust polling interval to be a power of 2 */ 2012 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status); 2013 2014 /* Find the height in the tree */ 2015 height = ehci_lattice_height(interval); 2016 2017 /* Find the leftmost leaf in the subtree specified by the node. */ 2018 leftmost = ehci_leftmost_leaf(pnode, height); 2019 2020 mutex_enter(&parent_ud->usb_mutex); 2021 2022 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) { 2023 list = ehci_index[leftmost + i]; 2024 2025 if ((parent_ud->usb_hs_hub_bandwidth[list] + 2026 bandwidth) > FS_PERIODIC_BANDWIDTH) { 2027 2028 mutex_exit(&parent_ud->usb_mutex); 2029 2030 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2031 "ehci_allocate_classic_tt_bandwidth: Reached " 2032 "maximum bandwidth value and cannot allocate " 2033 "bandwidth for low/full periodic endpoint"); 2034 2035 return (USB_NO_BANDWIDTH); 2036 } 2037 } 2038 2039 /* 2040 * All the leaves for this node must be updated with the bandwidth. 2041 */ 2042 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) { 2043 list = ehci_index[leftmost + i]; 2044 parent_ud->usb_hs_hub_bandwidth[list] += bandwidth; 2045 } 2046 2047 /* Find the leaf with the smallest allocated bandwidth */ 2048 min = parent_ud->usb_hs_hub_bandwidth[0]; 2049 2050 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) { 2051 if (parent_ud->usb_hs_hub_bandwidth[i] < min) { 2052 min = parent_ud->usb_hs_hub_bandwidth[i]; 2053 } 2054 } 2055 2056 /* Save the minimum for later use */ 2057 parent_ud->usb_hs_hub_min_bandwidth = min; 2058 2059 mutex_exit(&parent_ud->usb_mutex); 2060 2061 return (USB_SUCCESS); 2062 } 2063 2064 2065 /* 2066 * ehci_deallocate_bandwidth: 2067 * 2068 * Deallocate bandwidth for the given node in the lattice and the length 2069 * of transfer. 2070 */ 2071 void 2072 ehci_deallocate_bandwidth( 2073 ehci_state_t *ehcip, 2074 usba_pipe_handle_data_t *ph, 2075 uint_t pnode, 2076 uchar_t smask, 2077 uchar_t cmask) 2078 { 2079 /* This routine is protected by the ehci_int_mutex */ 2080 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2081 2082 ehci_deallocate_high_speed_bandwidth(ehcip, ph, pnode, smask, cmask); 2083 2084 /* 2085 * For low/full speed usb devices, deallocate classic TT bandwidth 2086 * in additional to high speed bandwidth. 2087 */ 2088 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) { 2089 2090 /* Deallocate classic TT bandwidth */ 2091 ehci_deallocate_classic_tt_bandwidth(ehcip, ph, pnode); 2092 } 2093 } 2094 2095 2096 /* 2097 * ehci_deallocate_high_speed_bandwidth: 2098 * 2099 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint. 2100 */ 2101 static void 2102 ehci_deallocate_high_speed_bandwidth( 2103 ehci_state_t *ehcip, 2104 usba_pipe_handle_data_t *ph, 2105 uint_t pnode, 2106 uchar_t smask, 2107 uchar_t cmask) 2108 { 2109 uint_t height, leftmost; 2110 uint_t list_count; 2111 uint_t sbandwidth, cbandwidth; 2112 int interval; 2113 usb_ep_descr_t *endpoint = &ph->p_ep; 2114 usba_device_t *child_ud; 2115 usb_port_status_t port_status; 2116 2117 /* This routine is protected by the ehci_int_mutex */ 2118 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2119 2120 /* Get child's usba device structure */ 2121 child_ud = ph->p_usba_device; 2122 2123 mutex_enter(&child_ud->usb_mutex); 2124 2125 /* Get the current usb device's port status */ 2126 port_status = ph->p_usba_device->usb_port_status; 2127 2128 mutex_exit(&child_ud->usb_mutex); 2129 2130 (void) ehci_compute_high_speed_bandwidth(ehcip, endpoint, 2131 port_status, &sbandwidth, &cbandwidth); 2132 2133 /* Adjust polling interval to be a power of 2 */ 2134 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status); 2135 2136 /* Find the height in the tree */ 2137 height = ehci_lattice_height(interval); 2138 2139 /* 2140 * Find the leftmost leaf in the subtree specified by the node 2141 */ 2142 leftmost = ehci_leftmost_leaf(pnode, height); 2143 2144 list_count = EHCI_NUM_INTR_QH_LISTS/interval; 2145 2146 /* Delete the bandwidth from the appropriate lists */ 2147 if (port_status == USBA_HIGH_SPEED_DEV) { 2148 2149 ehci_update_bw_availability(ehcip, -sbandwidth, 2150 leftmost, list_count, smask); 2151 } else { 2152 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) == 2153 USB_EP_ATTR_INTR) { 2154 2155 ehci_update_bw_availability(ehcip, -sbandwidth, 2156 leftmost, list_count, smask); 2157 ehci_update_bw_availability(ehcip, -cbandwidth, 2158 leftmost, list_count, cmask); 2159 } else { 2160 if ((endpoint->bEndpointAddress & 2161 USB_EP_DIR_MASK) == USB_EP_DIR_IN) { 2162 2163 ehci_update_bw_availability(ehcip, -sbandwidth, 2164 leftmost, list_count, smask); 2165 ehci_update_bw_availability(ehcip, 2166 -MAX_UFRAME_SITD_XFER, leftmost, 2167 list_count, cmask); 2168 } else { 2169 2170 ehci_update_bw_availability(ehcip, 2171 -MAX_UFRAME_SITD_XFER, leftmost, 2172 list_count, smask); 2173 } 2174 } 2175 } 2176 } 2177 2178 /* 2179 * ehci_deallocate_classic_tt_bandwidth: 2180 * 2181 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint. 2182 */ 2183 static void 2184 ehci_deallocate_classic_tt_bandwidth( 2185 ehci_state_t *ehcip, 2186 usba_pipe_handle_data_t *ph, 2187 uint_t pnode) 2188 { 2189 uint_t bandwidth, height, leftmost, list, min; 2190 int i, interval; 2191 usb_ep_descr_t *endpoint = &ph->p_ep; 2192 usba_device_t *child_ud, *parent_ud; 2193 usb_port_status_t port_status; 2194 2195 /* This routine is protected by the ehci_int_mutex */ 2196 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 2197 2198 /* Get child's usba device structure */ 2199 child_ud = ph->p_usba_device; 2200 2201 mutex_enter(&child_ud->usb_mutex); 2202 2203 /* Get the current usb device's port status */ 2204 port_status = child_ud->usb_port_status; 2205 2206 /* Get the parent high speed hub's usba device structure */ 2207 parent_ud = child_ud->usb_hs_hub_usba_dev; 2208 2209 mutex_exit(&child_ud->usb_mutex); 2210 2211 /* Obtain the bandwidth */ 2212 (void) ehci_compute_classic_bandwidth(endpoint, 2213 port_status, &bandwidth); 2214 2215 /* Adjust polling interval to be a power of 2 */ 2216 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status); 2217 2218 /* Find the height in the tree */ 2219 height = ehci_lattice_height(interval); 2220 2221 /* Find the leftmost leaf in the subtree specified by the node */ 2222 leftmost = ehci_leftmost_leaf(pnode, height); 2223 2224 mutex_enter(&parent_ud->usb_mutex); 2225 2226 /* Delete the bandwidth from the appropriate lists */ 2227 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) { 2228 list = ehci_index[leftmost + i]; 2229 parent_ud->usb_hs_hub_bandwidth[list] -= bandwidth; 2230 } 2231 2232 /* Find the leaf with the smallest allocated bandwidth */ 2233 min = parent_ud->usb_hs_hub_bandwidth[0]; 2234 2235 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) { 2236 if (parent_ud->usb_hs_hub_bandwidth[i] < min) { 2237 min = parent_ud->usb_hs_hub_bandwidth[i]; 2238 } 2239 } 2240 2241 /* Save the minimum for later use */ 2242 parent_ud->usb_hs_hub_min_bandwidth = min; 2243 2244 mutex_exit(&parent_ud->usb_mutex); 2245 } 2246 2247 2248 /* 2249 * ehci_compute_high_speed_bandwidth: 2250 * 2251 * Given a periodic endpoint (interrupt or isochronous) determine the total 2252 * bandwidth for one transaction. The EHCI host controller traverses the 2253 * endpoint descriptor lists on a first-come-first-serve basis. When the HC 2254 * services an endpoint, only a single transaction attempt is made. The HC 2255 * moves to the next Endpoint Descriptor after the first transaction attempt 2256 * rather than finishing the entire Transfer Descriptor. Therefore, when a 2257 * Transfer Descriptor is inserted into the lattice, we will only count the 2258 * number of bytes for one transaction. 2259 * 2260 * The following are the formulas used for calculating bandwidth in terms 2261 * bytes and it is for the single USB high speed transaction. The protocol 2262 * overheads will be different for each of type of USB transfer & all these 2263 * formulas & protocol overheads are derived from the 5.11.3 section of the 2264 * USB 2.0 Specification. 2265 * 2266 * High-Speed: 2267 * Protocol overhead + ((MaxPktSz * 7)/6) + Host_Delay 2268 * 2269 * Split Transaction: (Low/Full speed devices connected behind usb2.0 hub) 2270 * 2271 * Protocol overhead + Split transaction overhead + 2272 * ((MaxPktSz * 7)/6) + Host_Delay; 2273 */ 2274 /* ARGSUSED */ 2275 static int 2276 ehci_compute_high_speed_bandwidth( 2277 ehci_state_t *ehcip, 2278 usb_ep_descr_t *endpoint, 2279 usb_port_status_t port_status, 2280 uint_t *sbandwidth, 2281 uint_t *cbandwidth) 2282 { 2283 ushort_t maxpacketsize = endpoint->wMaxPacketSize; 2284 2285 /* Return failure if endpoint maximum packet is zero */ 2286 if (maxpacketsize == 0) { 2287 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2288 "ehci_allocate_high_speed_bandwidth: Periodic endpoint " 2289 "with zero endpoint maximum packet size is not supported"); 2290 2291 return (USB_NOT_SUPPORTED); 2292 } 2293 2294 /* Add bit-stuffing overhead */ 2295 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6); 2296 2297 /* Add Host Controller specific delay to required bandwidth */ 2298 *sbandwidth = EHCI_HOST_CONTROLLER_DELAY; 2299 2300 /* Add xfer specific protocol overheads */ 2301 if ((endpoint->bmAttributes & 2302 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) { 2303 /* High speed interrupt transaction */ 2304 *sbandwidth += HS_NON_ISOC_PROTO_OVERHEAD; 2305 } else { 2306 /* Isochronous transaction */ 2307 *sbandwidth += HS_ISOC_PROTO_OVERHEAD; 2308 } 2309 2310 /* 2311 * For low/full speed devices, add split transaction specific 2312 * overheads. 2313 */ 2314 if (port_status != USBA_HIGH_SPEED_DEV) { 2315 /* 2316 * Add start and complete split transaction 2317 * tokens overheads. 2318 */ 2319 *cbandwidth = *sbandwidth + COMPLETE_SPLIT_OVERHEAD; 2320 *sbandwidth += START_SPLIT_OVERHEAD; 2321 2322 /* Add data overhead depending on data direction */ 2323 if ((endpoint->bEndpointAddress & 2324 USB_EP_DIR_MASK) == USB_EP_DIR_IN) { 2325 *cbandwidth += maxpacketsize; 2326 } else { 2327 if ((endpoint->bmAttributes & 2328 USB_EP_ATTR_MASK) == USB_EP_ATTR_ISOCH) { 2329 /* There is no compete splits for out */ 2330 *cbandwidth = 0; 2331 } 2332 *sbandwidth += maxpacketsize; 2333 } 2334 } else { 2335 uint_t xactions; 2336 2337 /* Get the max transactions per microframe */ 2338 xactions = ((maxpacketsize & USB_EP_MAX_XACTS_MASK) >> 2339 USB_EP_MAX_XACTS_SHIFT) + 1; 2340 2341 /* High speed transaction */ 2342 *sbandwidth += maxpacketsize; 2343 2344 /* Calculate bandwidth per micro-frame */ 2345 *sbandwidth *= xactions; 2346 2347 *cbandwidth = 0; 2348 } 2349 2350 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2351 "ehci_allocate_high_speed_bandwidth: " 2352 "Start split bandwidth %d Complete split bandwidth %d", 2353 *sbandwidth, *cbandwidth); 2354 2355 return (USB_SUCCESS); 2356 } 2357 2358 2359 /* 2360 * ehci_compute_classic_bandwidth: 2361 * 2362 * Given a periodic endpoint (interrupt or isochronous) determine the total 2363 * bandwidth for one transaction. The EHCI host controller traverses the 2364 * endpoint descriptor lists on a first-come-first-serve basis. When the HC 2365 * services an endpoint, only a single transaction attempt is made. The HC 2366 * moves to the next Endpoint Descriptor after the first transaction attempt 2367 * rather than finishing the entire Transfer Descriptor. Therefore, when a 2368 * Transfer Descriptor is inserted into the lattice, we will only count the 2369 * number of bytes for one transaction. 2370 * 2371 * The following are the formulas used for calculating bandwidth in terms 2372 * bytes and it is for the single USB high speed transaction. The protocol 2373 * overheads will be different for each of type of USB transfer & all these 2374 * formulas & protocol overheads are derived from the 5.11.3 section of the 2375 * USB 2.0 Specification. 2376 * 2377 * Low-Speed: 2378 * Protocol overhead + Hub LS overhead + 2379 * (Low Speed clock * ((MaxPktSz * 7)/6)) + TT_Delay 2380 * 2381 * Full-Speed: 2382 * Protocol overhead + ((MaxPktSz * 7)/6) + TT_Delay 2383 */ 2384 /* ARGSUSED */ 2385 static int 2386 ehci_compute_classic_bandwidth( 2387 usb_ep_descr_t *endpoint, 2388 usb_port_status_t port_status, 2389 uint_t *bandwidth) 2390 { 2391 ushort_t maxpacketsize = endpoint->wMaxPacketSize; 2392 2393 /* 2394 * If endpoint maximum packet is zero, then return immediately. 2395 */ 2396 if (maxpacketsize == 0) { 2397 2398 return (USB_NOT_SUPPORTED); 2399 } 2400 2401 /* Add TT delay to required bandwidth */ 2402 *bandwidth = TT_DELAY; 2403 2404 /* Add bit-stuffing overhead */ 2405 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6); 2406 2407 switch (port_status) { 2408 case USBA_LOW_SPEED_DEV: 2409 /* Low speed interrupt transaction */ 2410 *bandwidth += (LOW_SPEED_PROTO_OVERHEAD + 2411 HUB_LOW_SPEED_PROTO_OVERHEAD + 2412 (LOW_SPEED_CLOCK * maxpacketsize)); 2413 break; 2414 case USBA_FULL_SPEED_DEV: 2415 /* Full speed transaction */ 2416 *bandwidth += maxpacketsize; 2417 2418 /* Add xfer specific protocol overheads */ 2419 if ((endpoint->bmAttributes & 2420 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) { 2421 /* Full speed interrupt transaction */ 2422 *bandwidth += FS_NON_ISOC_PROTO_OVERHEAD; 2423 } else { 2424 /* Isochronous and input transaction */ 2425 if ((endpoint->bEndpointAddress & 2426 USB_EP_DIR_MASK) == USB_EP_DIR_IN) { 2427 *bandwidth += FS_ISOC_INPUT_PROTO_OVERHEAD; 2428 } else { 2429 /* Isochronous and output transaction */ 2430 *bandwidth += FS_ISOC_OUTPUT_PROTO_OVERHEAD; 2431 } 2432 } 2433 break; 2434 } 2435 2436 return (USB_SUCCESS); 2437 } 2438 2439 2440 /* 2441 * ehci_adjust_polling_interval: 2442 * 2443 * Adjust bandwidth according usb device speed. 2444 */ 2445 /* ARGSUSED */ 2446 int 2447 ehci_adjust_polling_interval( 2448 ehci_state_t *ehcip, 2449 usb_ep_descr_t *endpoint, 2450 usb_port_status_t port_status) 2451 { 2452 uint_t interval; 2453 int i = 0; 2454 2455 /* Get the polling interval */ 2456 interval = endpoint->bInterval; 2457 2458 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2459 "ehci_adjust_polling_interval: Polling interval 0x%x", interval); 2460 2461 /* 2462 * According USB 2.0 Specifications, a high-speed endpoint's 2463 * polling intervals are specified interms of 125us or micro 2464 * frame, where as full/low endpoint's polling intervals are 2465 * specified in milliseconds. 2466 * 2467 * A high speed interrupt/isochronous endpoints can specify 2468 * desired polling interval between 1 to 16 micro-frames, 2469 * where as full/low endpoints can specify between 1 to 255 2470 * milliseconds. 2471 */ 2472 switch (port_status) { 2473 case USBA_LOW_SPEED_DEV: 2474 /* 2475 * Low speed endpoints are limited to specifying 2476 * only 8ms to 255ms in this driver. If a device 2477 * reports a polling interval that is less than 8ms, 2478 * it will use 8 ms instead. 2479 */ 2480 if (interval < LS_MIN_POLL_INTERVAL) { 2481 2482 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2483 "Low speed endpoint's poll interval of %d ms " 2484 "is below threshold. Rounding up to %d ms", 2485 interval, LS_MIN_POLL_INTERVAL); 2486 2487 interval = LS_MIN_POLL_INTERVAL; 2488 } 2489 2490 /* 2491 * Return an error if the polling interval is greater 2492 * than 255ms. 2493 */ 2494 if (interval > LS_MAX_POLL_INTERVAL) { 2495 2496 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2497 "Low speed endpoint's poll interval is " 2498 "greater than %d ms", LS_MAX_POLL_INTERVAL); 2499 2500 return (USB_FAILURE); 2501 } 2502 break; 2503 2504 case USBA_FULL_SPEED_DEV: 2505 /* 2506 * Return an error if the polling interval is less 2507 * than 1ms and greater than 255ms. 2508 */ 2509 if ((interval < FS_MIN_POLL_INTERVAL) && 2510 (interval > FS_MAX_POLL_INTERVAL)) { 2511 2512 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2513 "Full speed endpoint's poll interval must " 2514 "be between %d and %d ms", FS_MIN_POLL_INTERVAL, 2515 FS_MAX_POLL_INTERVAL); 2516 2517 return (USB_FAILURE); 2518 } 2519 break; 2520 case USBA_HIGH_SPEED_DEV: 2521 /* 2522 * Return an error if the polling interval is less 1 2523 * and greater than 16. Convert this value to 125us 2524 * units using 2^(bInterval -1). refer usb 2.0 spec 2525 * page 51 for details. 2526 */ 2527 if ((interval < HS_MIN_POLL_INTERVAL) && 2528 (interval > HS_MAX_POLL_INTERVAL)) { 2529 2530 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2531 "High speed endpoint's poll interval " 2532 "must be between %d and %d units", 2533 HS_MIN_POLL_INTERVAL, HS_MAX_POLL_INTERVAL); 2534 2535 return (USB_FAILURE); 2536 } 2537 2538 /* Adjust high speed device polling interval */ 2539 interval = 2540 ehci_adjust_high_speed_polling_interval(ehcip, endpoint); 2541 2542 break; 2543 } 2544 2545 /* 2546 * If polling interval is greater than 32ms, 2547 * adjust polling interval equal to 32ms. 2548 */ 2549 if (interval > EHCI_NUM_INTR_QH_LISTS) { 2550 interval = EHCI_NUM_INTR_QH_LISTS; 2551 } 2552 2553 /* 2554 * Find the nearest power of 2 that's less 2555 * than interval. 2556 */ 2557 while ((ehci_pow_2(i)) <= interval) { 2558 i++; 2559 } 2560 2561 return (ehci_pow_2((i - 1))); 2562 } 2563 2564 2565 /* 2566 * ehci_adjust_high_speed_polling_interval: 2567 */ 2568 /* ARGSUSED */ 2569 static int 2570 ehci_adjust_high_speed_polling_interval( 2571 ehci_state_t *ehcip, 2572 usb_ep_descr_t *endpoint) 2573 { 2574 uint_t interval; 2575 2576 /* Get the polling interval */ 2577 interval = ehci_pow_2(endpoint->bInterval - 1); 2578 2579 /* 2580 * Convert polling interval from micro seconds 2581 * to milli seconds. 2582 */ 2583 if (interval <= EHCI_MAX_UFRAMES) { 2584 interval = 1; 2585 } else { 2586 interval = interval/EHCI_MAX_UFRAMES; 2587 } 2588 2589 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2590 "ehci_adjust_high_speed_polling_interval: " 2591 "High speed adjusted interval 0x%x", interval); 2592 2593 return (interval); 2594 } 2595 2596 2597 /* 2598 * ehci_lattice_height: 2599 * 2600 * Given the requested bandwidth, find the height in the tree at which the 2601 * nodes for this bandwidth fall. The height is measured as the number of 2602 * nodes from the leaf to the level specified by bandwidth The root of the 2603 * tree is at height TREE_HEIGHT. 2604 */ 2605 static uint_t 2606 ehci_lattice_height(uint_t interval) 2607 { 2608 return (TREE_HEIGHT - (ehci_log_2(interval))); 2609 } 2610 2611 2612 /* 2613 * ehci_lattice_parent: 2614 * 2615 * Given a node in the lattice, find the index of the parent node 2616 */ 2617 static uint_t 2618 ehci_lattice_parent(uint_t node) 2619 { 2620 if ((node % 2) == 0) { 2621 2622 return ((node/2) - 1); 2623 } else { 2624 2625 return ((node + 1)/2 - 1); 2626 } 2627 } 2628 2629 2630 /* 2631 * ehci_find_periodic_node: 2632 * 2633 * Based on the "real" array leaf node and interval, get the periodic node. 2634 */ 2635 static uint_t 2636 ehci_find_periodic_node(uint_t leaf, int interval) { 2637 uint_t lattice_leaf; 2638 uint_t height = ehci_lattice_height(interval); 2639 uint_t pnode; 2640 int i; 2641 2642 /* Get the leaf number in the lattice */ 2643 lattice_leaf = leaf + EHCI_NUM_INTR_QH_LISTS - 1; 2644 2645 /* Get the node in the lattice based on the height and leaf */ 2646 pnode = lattice_leaf; 2647 for (i = 0; i < height; i++) { 2648 pnode = ehci_lattice_parent(pnode); 2649 } 2650 2651 return (pnode); 2652 } 2653 2654 2655 /* 2656 * ehci_leftmost_leaf: 2657 * 2658 * Find the leftmost leaf in the subtree specified by the node. Height refers 2659 * to number of nodes from the bottom of the tree to the node, including the 2660 * node. 2661 * 2662 * The formula for a zero based tree is: 2663 * 2^H * Node + 2^H - 1 2664 * The leaf of the tree is an array, convert the number for the array. 2665 * Subtract the size of nodes not in the array 2666 * 2^H * Node + 2^H - 1 - (EHCI_NUM_INTR_QH_LISTS - 1) = 2667 * 2^H * Node + 2^H - EHCI_NUM_INTR_QH_LISTS = 2668 * 2^H * (Node + 1) - EHCI_NUM_INTR_QH_LISTS 2669 * 0 2670 * 1 2 2671 * 0 1 2 3 2672 */ 2673 static uint_t 2674 ehci_leftmost_leaf( 2675 uint_t node, 2676 uint_t height) 2677 { 2678 return ((ehci_pow_2(height) * (node + 1)) - EHCI_NUM_INTR_QH_LISTS); 2679 } 2680 2681 2682 /* 2683 * ehci_pow_2: 2684 * 2685 * Compute 2 to the power 2686 */ 2687 static uint_t 2688 ehci_pow_2(uint_t x) 2689 { 2690 if (x == 0) { 2691 2692 return (1); 2693 } else { 2694 2695 return (2 << (x - 1)); 2696 } 2697 } 2698 2699 2700 /* 2701 * ehci_log_2: 2702 * 2703 * Compute log base 2 of x 2704 */ 2705 static uint_t 2706 ehci_log_2(uint_t x) 2707 { 2708 int i = 0; 2709 2710 while (x != 1) { 2711 x = x >> 1; 2712 i++; 2713 } 2714 2715 return (i); 2716 } 2717 2718 2719 /* 2720 * ehci_find_bestfit_hs_mask: 2721 * 2722 * Find the smask and cmask in the bandwidth allocation, and update the 2723 * bandwidth allocation. 2724 */ 2725 static int 2726 ehci_find_bestfit_hs_mask( 2727 ehci_state_t *ehcip, 2728 uchar_t *smask, 2729 uint_t *pnode, 2730 usb_ep_descr_t *endpoint, 2731 uint_t bandwidth, 2732 int interval) 2733 { 2734 int i; 2735 uint_t elements, index; 2736 int array_leaf, best_array_leaf; 2737 uint_t node_bandwidth, best_node_bandwidth; 2738 uint_t leaf_count; 2739 uchar_t bw_mask; 2740 uchar_t best_smask; 2741 2742 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2743 "ehci_find_bestfit_hs_mask: "); 2744 2745 /* Get all the valid smasks */ 2746 switch (ehci_pow_2(endpoint->bInterval - 1)) { 2747 case EHCI_INTR_1US_POLL: 2748 index = EHCI_1US_MASK_INDEX; 2749 elements = EHCI_INTR_1US_POLL; 2750 break; 2751 case EHCI_INTR_2US_POLL: 2752 index = EHCI_2US_MASK_INDEX; 2753 elements = EHCI_INTR_2US_POLL; 2754 break; 2755 case EHCI_INTR_4US_POLL: 2756 index = EHCI_4US_MASK_INDEX; 2757 elements = EHCI_INTR_4US_POLL; 2758 break; 2759 case EHCI_INTR_XUS_POLL: 2760 default: 2761 index = EHCI_XUS_MASK_INDEX; 2762 elements = EHCI_INTR_XUS_POLL; 2763 break; 2764 } 2765 2766 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval; 2767 2768 /* 2769 * Because of the way the leaves are setup, we will automatically 2770 * hit the leftmost leaf of every possible node with this interval. 2771 */ 2772 best_smask = 0x00; 2773 best_node_bandwidth = 0; 2774 for (array_leaf = 0; array_leaf < interval; array_leaf++) { 2775 /* Find the bandwidth mask */ 2776 node_bandwidth = ehci_calculate_bw_availability_mask(ehcip, 2777 bandwidth, ehci_index[array_leaf], leaf_count, &bw_mask); 2778 2779 /* 2780 * If this node cannot support our requirements skip to the 2781 * next leaf. 2782 */ 2783 if (bw_mask == 0x00) { 2784 continue; 2785 } 2786 2787 /* 2788 * Now make sure our bandwidth requirements can be 2789 * satisfied with one of smasks in this node. 2790 */ 2791 *smask = 0x00; 2792 for (i = index; i < (index + elements); i++) { 2793 /* Check the start split mask value */ 2794 if (ehci_start_split_mask[index] & bw_mask) { 2795 *smask = ehci_start_split_mask[index]; 2796 break; 2797 } 2798 } 2799 2800 /* 2801 * If an appropriate smask is found save the information if: 2802 * o best_smask has not been found yet. 2803 * - or - 2804 * o This is the node with the least amount of bandwidth 2805 */ 2806 if ((*smask != 0x00) && 2807 ((best_smask == 0x00) || 2808 (best_node_bandwidth > node_bandwidth))) { 2809 2810 best_node_bandwidth = node_bandwidth; 2811 best_array_leaf = array_leaf; 2812 best_smask = *smask; 2813 } 2814 } 2815 2816 /* 2817 * If we find node that can handle the bandwidth populate the 2818 * appropriate variables and return success. 2819 */ 2820 if (best_smask) { 2821 *smask = best_smask; 2822 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf], 2823 interval); 2824 ehci_update_bw_availability(ehcip, bandwidth, 2825 ehci_index[best_array_leaf], leaf_count, best_smask); 2826 2827 return (USB_SUCCESS); 2828 } 2829 2830 return (USB_FAILURE); 2831 } 2832 2833 2834 /* 2835 * ehci_find_bestfit_ls_intr_mask: 2836 * 2837 * Find the smask and cmask in the bandwidth allocation. 2838 */ 2839 static int 2840 ehci_find_bestfit_ls_intr_mask( 2841 ehci_state_t *ehcip, 2842 uchar_t *smask, 2843 uchar_t *cmask, 2844 uint_t *pnode, 2845 uint_t sbandwidth, 2846 uint_t cbandwidth, 2847 int interval) 2848 { 2849 int i; 2850 uint_t elements, index; 2851 int array_leaf, best_array_leaf; 2852 uint_t node_sbandwidth, node_cbandwidth; 2853 uint_t best_node_bandwidth; 2854 uint_t leaf_count; 2855 uchar_t bw_smask, bw_cmask; 2856 uchar_t best_smask, best_cmask; 2857 2858 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2859 "ehci_find_bestfit_ls_intr_mask: "); 2860 2861 /* For low and full speed devices */ 2862 index = EHCI_XUS_MASK_INDEX; 2863 elements = EHCI_INTR_4MS_POLL; 2864 2865 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval; 2866 2867 /* 2868 * Because of the way the leaves are setup, we will automatically 2869 * hit the leftmost leaf of every possible node with this interval. 2870 */ 2871 best_smask = 0x00; 2872 best_node_bandwidth = 0; 2873 for (array_leaf = 0; array_leaf < interval; array_leaf++) { 2874 /* Find the bandwidth mask */ 2875 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip, 2876 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask); 2877 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip, 2878 cbandwidth, ehci_index[array_leaf], leaf_count, &bw_cmask); 2879 2880 /* 2881 * If this node cannot support our requirements skip to the 2882 * next leaf. 2883 */ 2884 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) { 2885 continue; 2886 } 2887 2888 /* 2889 * Now make sure our bandwidth requirements can be 2890 * satisfied with one of smasks in this node. 2891 */ 2892 *smask = 0x00; 2893 *cmask = 0x00; 2894 for (i = index; i < (index + elements); i++) { 2895 /* Check the start split mask value */ 2896 if ((ehci_start_split_mask[index] & bw_smask) && 2897 (ehci_intr_complete_split_mask[index] & bw_cmask)) { 2898 *smask = ehci_start_split_mask[index]; 2899 *cmask = ehci_intr_complete_split_mask[index]; 2900 break; 2901 } 2902 } 2903 2904 /* 2905 * If an appropriate smask is found save the information if: 2906 * o best_smask has not been found yet. 2907 * - or - 2908 * o This is the node with the least amount of bandwidth 2909 */ 2910 if ((*smask != 0x00) && 2911 ((best_smask == 0x00) || 2912 (best_node_bandwidth > 2913 (node_sbandwidth + node_cbandwidth)))) { 2914 best_node_bandwidth = node_sbandwidth + node_cbandwidth; 2915 best_array_leaf = array_leaf; 2916 best_smask = *smask; 2917 best_cmask = *cmask; 2918 } 2919 } 2920 2921 /* 2922 * If we find node that can handle the bandwidth populate the 2923 * appropriate variables and return success. 2924 */ 2925 if (best_smask) { 2926 *smask = best_smask; 2927 *cmask = best_cmask; 2928 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf], 2929 interval); 2930 ehci_update_bw_availability(ehcip, sbandwidth, 2931 ehci_index[best_array_leaf], leaf_count, best_smask); 2932 ehci_update_bw_availability(ehcip, cbandwidth, 2933 ehci_index[best_array_leaf], leaf_count, best_cmask); 2934 2935 return (USB_SUCCESS); 2936 } 2937 2938 return (USB_FAILURE); 2939 } 2940 2941 2942 /* 2943 * ehci_find_bestfit_sitd_in_mask: 2944 * 2945 * Find the smask and cmask in the bandwidth allocation. 2946 */ 2947 static int 2948 ehci_find_bestfit_sitd_in_mask( 2949 ehci_state_t *ehcip, 2950 uchar_t *smask, 2951 uchar_t *cmask, 2952 uint_t *pnode, 2953 uint_t sbandwidth, 2954 uint_t cbandwidth, 2955 int interval) 2956 { 2957 int i, uFrames, found; 2958 int array_leaf, best_array_leaf; 2959 uint_t node_sbandwidth, node_cbandwidth; 2960 uint_t best_node_bandwidth; 2961 uint_t leaf_count; 2962 uchar_t bw_smask, bw_cmask; 2963 uchar_t best_smask, best_cmask; 2964 2965 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 2966 "ehci_find_bestfit_sitd_in_mask: "); 2967 2968 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval; 2969 2970 /* 2971 * Because of the way the leaves are setup, we will automatically 2972 * hit the leftmost leaf of every possible node with this interval. 2973 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame. 2974 */ 2975 /* 2976 * Need to add an additional 2 uFrames, if the "L"ast 2977 * complete split is before uFrame 6. See section 2978 * 11.8.4 in USB 2.0 Spec. Currently we do not support 2979 * the "Back Ptr" which means we support on IN of 2980 * ~4*MAX_UFRAME_SITD_XFER bandwidth/ 2981 */ 2982 uFrames = (cbandwidth / MAX_UFRAME_SITD_XFER) + 2; 2983 if (cbandwidth % MAX_UFRAME_SITD_XFER) { 2984 uFrames++; 2985 } 2986 if (uFrames > 6) { 2987 2988 return (USB_FAILURE); 2989 } 2990 *smask = 0x1; 2991 *cmask = 0x00; 2992 for (i = 0; i < uFrames; i++) { 2993 *cmask = *cmask << 1; 2994 *cmask |= 0x1; 2995 } 2996 /* cmask must start 2 frames after the smask */ 2997 *cmask = *cmask << 2; 2998 2999 found = 0; 3000 best_smask = 0x00; 3001 best_node_bandwidth = 0; 3002 for (array_leaf = 0; array_leaf < interval; array_leaf++) { 3003 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip, 3004 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask); 3005 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip, 3006 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count, 3007 &bw_cmask); 3008 3009 /* 3010 * If this node cannot support our requirements skip to the 3011 * next leaf. 3012 */ 3013 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) { 3014 continue; 3015 } 3016 3017 for (i = 0; i < (EHCI_MAX_UFRAMES - uFrames - 2); i++) { 3018 if ((*smask & bw_smask) && (*cmask & bw_cmask)) { 3019 found = 1; 3020 break; 3021 } 3022 *smask = *smask << 1; 3023 *cmask = *cmask << 1; 3024 } 3025 3026 /* 3027 * If an appropriate smask is found save the information if: 3028 * o best_smask has not been found yet. 3029 * - or - 3030 * o This is the node with the least amount of bandwidth 3031 */ 3032 if (found && 3033 ((best_smask == 0x00) || 3034 (best_node_bandwidth > 3035 (node_sbandwidth + node_cbandwidth)))) { 3036 best_node_bandwidth = node_sbandwidth + node_cbandwidth; 3037 best_array_leaf = array_leaf; 3038 best_smask = *smask; 3039 best_cmask = *cmask; 3040 } 3041 } 3042 3043 /* 3044 * If we find node that can handle the bandwidth populate the 3045 * appropriate variables and return success. 3046 */ 3047 if (best_smask) { 3048 *smask = best_smask; 3049 *cmask = best_cmask; 3050 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf], 3051 interval); 3052 ehci_update_bw_availability(ehcip, sbandwidth, 3053 ehci_index[best_array_leaf], leaf_count, best_smask); 3054 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER, 3055 ehci_index[best_array_leaf], leaf_count, best_cmask); 3056 3057 return (USB_SUCCESS); 3058 } 3059 3060 return (USB_FAILURE); 3061 } 3062 3063 3064 /* 3065 * ehci_find_bestfit_sitd_out_mask: 3066 * 3067 * Find the smask in the bandwidth allocation. 3068 */ 3069 static int 3070 ehci_find_bestfit_sitd_out_mask( 3071 ehci_state_t *ehcip, 3072 uchar_t *smask, 3073 uint_t *pnode, 3074 uint_t sbandwidth, 3075 int interval) 3076 { 3077 int i, uFrames, found; 3078 int array_leaf, best_array_leaf; 3079 uint_t node_sbandwidth; 3080 uint_t best_node_bandwidth; 3081 uint_t leaf_count; 3082 uchar_t bw_smask; 3083 uchar_t best_smask; 3084 3085 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 3086 "ehci_find_bestfit_sitd_out_mask: "); 3087 3088 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval; 3089 3090 /* 3091 * Because of the way the leaves are setup, we will automatically 3092 * hit the leftmost leaf of every possible node with this interval. 3093 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame. 3094 */ 3095 *smask = 0x00; 3096 uFrames = sbandwidth / MAX_UFRAME_SITD_XFER; 3097 if (sbandwidth % MAX_UFRAME_SITD_XFER) { 3098 uFrames++; 3099 } 3100 for (i = 0; i < uFrames; i++) { 3101 *smask = *smask << 1; 3102 *smask |= 0x1; 3103 } 3104 3105 found = 0; 3106 best_smask = 0x00; 3107 best_node_bandwidth = 0; 3108 for (array_leaf = 0; array_leaf < interval; array_leaf++) { 3109 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip, 3110 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count, 3111 &bw_smask); 3112 3113 /* 3114 * If this node cannot support our requirements skip to the 3115 * next leaf. 3116 */ 3117 if (bw_smask == 0x00) { 3118 continue; 3119 } 3120 3121 /* You cannot have a start split on the 8th uFrame */ 3122 for (i = 0; (*smask & 0x80) == 0; i++) { 3123 if (*smask & bw_smask) { 3124 found = 1; 3125 break; 3126 } 3127 *smask = *smask << 1; 3128 } 3129 3130 /* 3131 * If an appropriate smask is found save the information if: 3132 * o best_smask has not been found yet. 3133 * - or - 3134 * o This is the node with the least amount of bandwidth 3135 */ 3136 if (found && 3137 ((best_smask == 0x00) || 3138 (best_node_bandwidth > node_sbandwidth))) { 3139 best_node_bandwidth = node_sbandwidth; 3140 best_array_leaf = array_leaf; 3141 best_smask = *smask; 3142 } 3143 } 3144 3145 /* 3146 * If we find node that can handle the bandwidth populate the 3147 * appropriate variables and return success. 3148 */ 3149 if (best_smask) { 3150 *smask = best_smask; 3151 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf], 3152 interval); 3153 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER, 3154 ehci_index[best_array_leaf], leaf_count, best_smask); 3155 3156 return (USB_SUCCESS); 3157 } 3158 3159 return (USB_FAILURE); 3160 } 3161 3162 3163 /* 3164 * ehci_calculate_bw_availability_mask: 3165 * 3166 * Returns the "total bandwidth used" in this node. 3167 * Populates bw_mask with the uFrames that can support the bandwidth. 3168 * 3169 * If all the Frames cannot support this bandwidth, then bw_mask 3170 * will return 0x00 and the "total bandwidth used" will be invalid. 3171 */ 3172 static uint_t 3173 ehci_calculate_bw_availability_mask( 3174 ehci_state_t *ehcip, 3175 uint_t bandwidth, 3176 int leaf, 3177 int leaf_count, 3178 uchar_t *bw_mask) 3179 { 3180 int i, j; 3181 uchar_t bw_uframe; 3182 int uframe_total; 3183 ehci_frame_bandwidth_t *fbp; 3184 uint_t total_bandwidth = 0; 3185 3186 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 3187 "ehci_calculate_bw_availability_mask: leaf %d leaf count %d", 3188 leaf, leaf_count); 3189 3190 /* Start by saying all uFrames are available */ 3191 *bw_mask = 0xFF; 3192 3193 for (i = 0; (i < leaf_count) || (*bw_mask == 0x00); i++) { 3194 fbp = &ehcip->ehci_frame_bandwidth[leaf + i]; 3195 3196 total_bandwidth += fbp->ehci_allocated_frame_bandwidth; 3197 3198 for (j = 0; j < EHCI_MAX_UFRAMES; j++) { 3199 /* 3200 * If the uFrame in bw_mask is available check to see if 3201 * it can support the additional bandwidth. 3202 */ 3203 bw_uframe = (*bw_mask & (0x1 << j)); 3204 uframe_total = 3205 fbp->ehci_micro_frame_bandwidth[j] + 3206 bandwidth; 3207 if ((bw_uframe) && 3208 (uframe_total > HS_PERIODIC_BANDWIDTH)) { 3209 *bw_mask = *bw_mask & ~bw_uframe; 3210 } 3211 } 3212 } 3213 3214 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl, 3215 "ehci_calculate_bw_availability_mask: bandwidth mask 0x%x", 3216 *bw_mask); 3217 3218 return (total_bandwidth); 3219 } 3220 3221 3222 /* 3223 * ehci_update_bw_availability: 3224 * 3225 * The leftmost leaf needs to be in terms of array position and 3226 * not the actual lattice position. 3227 */ 3228 static void 3229 ehci_update_bw_availability( 3230 ehci_state_t *ehcip, 3231 int bandwidth, 3232 int leftmost_leaf, 3233 int leaf_count, 3234 uchar_t mask) 3235 { 3236 int i, j; 3237 ehci_frame_bandwidth_t *fbp; 3238 int uFrame_bandwidth[8]; 3239 3240 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3241 "ehci_update_bw_availability: " 3242 "leaf %d count %d bandwidth 0x%x mask 0x%x", 3243 leftmost_leaf, leaf_count, bandwidth, mask); 3244 3245 ASSERT(leftmost_leaf < 32); 3246 ASSERT(leftmost_leaf >= 0); 3247 3248 for (j = 0; j < EHCI_MAX_UFRAMES; j++) { 3249 if (mask & 0x1) { 3250 uFrame_bandwidth[j] = bandwidth; 3251 } else { 3252 uFrame_bandwidth[j] = 0; 3253 } 3254 3255 mask = mask >> 1; 3256 } 3257 3258 /* Updated all the effected leafs with the bandwidth */ 3259 for (i = 0; i < leaf_count; i++) { 3260 fbp = &ehcip->ehci_frame_bandwidth[leftmost_leaf + i]; 3261 3262 for (j = 0; j < EHCI_MAX_UFRAMES; j++) { 3263 fbp->ehci_micro_frame_bandwidth[j] += 3264 uFrame_bandwidth[j]; 3265 fbp->ehci_allocated_frame_bandwidth += 3266 uFrame_bandwidth[j]; 3267 } 3268 } 3269 } 3270 3271 /* 3272 * Miscellaneous functions 3273 */ 3274 3275 /* 3276 * ehci_obtain_state: 3277 * 3278 * NOTE: This function is also called from POLLED MODE. 3279 */ 3280 ehci_state_t * 3281 ehci_obtain_state(dev_info_t *dip) 3282 { 3283 int instance = ddi_get_instance(dip); 3284 3285 ehci_state_t *state = ddi_get_soft_state(ehci_statep, instance); 3286 3287 ASSERT(state != NULL); 3288 3289 return (state); 3290 } 3291 3292 3293 /* 3294 * ehci_state_is_operational: 3295 * 3296 * Check the Host controller state and return proper values. 3297 */ 3298 int 3299 ehci_state_is_operational(ehci_state_t *ehcip) 3300 { 3301 int val; 3302 3303 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3304 3305 switch (ehcip->ehci_hc_soft_state) { 3306 case EHCI_CTLR_INIT_STATE: 3307 case EHCI_CTLR_SUSPEND_STATE: 3308 val = USB_FAILURE; 3309 break; 3310 case EHCI_CTLR_OPERATIONAL_STATE: 3311 val = USB_SUCCESS; 3312 break; 3313 case EHCI_CTLR_ERROR_STATE: 3314 val = USB_HC_HARDWARE_ERROR; 3315 break; 3316 default: 3317 val = USB_FAILURE; 3318 break; 3319 } 3320 3321 return (val); 3322 } 3323 3324 3325 /* 3326 * ehci_do_soft_reset 3327 * 3328 * Do soft reset of ehci host controller. 3329 */ 3330 int 3331 ehci_do_soft_reset(ehci_state_t *ehcip) 3332 { 3333 usb_frame_number_t before_frame_number, after_frame_number; 3334 ehci_regs_t *ehci_save_regs; 3335 3336 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3337 3338 /* Increment host controller error count */ 3339 ehcip->ehci_hc_error++; 3340 3341 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3342 "ehci_do_soft_reset:" 3343 "Reset ehci host controller 0x%x", ehcip->ehci_hc_error); 3344 3345 /* 3346 * Allocate space for saving current Host Controller 3347 * registers. Don't do any recovery if allocation 3348 * fails. 3349 */ 3350 ehci_save_regs = (ehci_regs_t *) 3351 kmem_zalloc(sizeof (ehci_regs_t), KM_NOSLEEP); 3352 3353 if (ehci_save_regs == NULL) { 3354 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3355 "ehci_do_soft_reset: kmem_zalloc failed"); 3356 3357 return (USB_FAILURE); 3358 } 3359 3360 /* Save current ehci registers */ 3361 ehci_save_regs->ehci_command = Get_OpReg(ehci_command); 3362 ehci_save_regs->ehci_interrupt = Get_OpReg(ehci_interrupt); 3363 ehci_save_regs->ehci_ctrl_segment = Get_OpReg(ehci_ctrl_segment); 3364 ehci_save_regs->ehci_async_list_addr = Get_OpReg(ehci_async_list_addr); 3365 ehci_save_regs->ehci_config_flag = Get_OpReg(ehci_config_flag); 3366 ehci_save_regs->ehci_periodic_list_base = 3367 Get_OpReg(ehci_periodic_list_base); 3368 3369 USB_DPRINTF_L4(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3370 "ehci_do_soft_reset: Save reg = 0x%p", ehci_save_regs); 3371 3372 /* Disable all list processing and interrupts */ 3373 Set_OpReg(ehci_command, Get_OpReg(ehci_command) & 3374 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE)); 3375 3376 /* Disable all EHCI interrupts */ 3377 Set_OpReg(ehci_interrupt, 0); 3378 3379 /* Wait for few milliseconds */ 3380 drv_usecwait(EHCI_SOF_TIMEWAIT); 3381 3382 /* Do light soft reset of ehci host controller */ 3383 Set_OpReg(ehci_command, 3384 Get_OpReg(ehci_command) | EHCI_CMD_LIGHT_HC_RESET); 3385 3386 USB_DPRINTF_L4(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3387 "ehci_do_soft_reset: Reset in progress"); 3388 3389 /* Wait for reset to complete */ 3390 drv_usecwait(EHCI_RESET_TIMEWAIT); 3391 3392 /* 3393 * Restore previous saved EHCI register value 3394 * into the current EHCI registers. 3395 */ 3396 Set_OpReg(ehci_ctrl_segment, (uint32_t) 3397 ehci_save_regs->ehci_ctrl_segment); 3398 3399 Set_OpReg(ehci_periodic_list_base, (uint32_t) 3400 ehci_save_regs->ehci_periodic_list_base); 3401 3402 Set_OpReg(ehci_async_list_addr, (uint32_t) 3403 ehci_save_regs->ehci_async_list_addr); 3404 3405 Set_OpReg(ehci_config_flag, (uint32_t) 3406 ehci_save_regs->ehci_config_flag); 3407 3408 /* Enable both Asynchronous and Periodic Schedule if necessary */ 3409 ehci_toggle_scheduler(ehcip); 3410 3411 /* 3412 * Set ehci_interrupt to enable all interrupts except Root 3413 * Hub Status change and frame list rollover interrupts. 3414 */ 3415 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR | 3416 EHCI_INTR_FRAME_LIST_ROLLOVER | 3417 EHCI_INTR_USB_ERROR | 3418 EHCI_INTR_USB); 3419 3420 /* 3421 * Deallocate the space that allocated for saving 3422 * HC registers. 3423 */ 3424 kmem_free((void *) ehci_save_regs, sizeof (ehci_regs_t)); 3425 3426 /* 3427 * Set the desired interrupt threshold, frame list size (if 3428 * applicable) and turn EHCI host controller. 3429 */ 3430 Set_OpReg(ehci_command, ((Get_OpReg(ehci_command) & 3431 ~EHCI_CMD_INTR_THRESHOLD) | 3432 (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN))); 3433 3434 /* Wait 10ms for EHCI to start sending SOF */ 3435 drv_usecwait(EHCI_RESET_TIMEWAIT); 3436 3437 /* 3438 * Get the current usb frame number before waiting for 3439 * few milliseconds. 3440 */ 3441 before_frame_number = ehci_get_current_frame_number(ehcip); 3442 3443 /* Wait for few milliseconds */ 3444 drv_usecwait(EHCI_SOF_TIMEWAIT); 3445 3446 /* 3447 * Get the current usb frame number after waiting for 3448 * few milliseconds. 3449 */ 3450 after_frame_number = ehci_get_current_frame_number(ehcip); 3451 3452 USB_DPRINTF_L4(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3453 "ehci_do_soft_reset: Before Frame Number 0x%llx " 3454 "After Frame Number 0x%llx", 3455 before_frame_number, after_frame_number); 3456 3457 if ((after_frame_number <= before_frame_number) && 3458 (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) { 3459 3460 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl, 3461 "ehci_do_soft_reset: Soft reset failed"); 3462 3463 return (USB_FAILURE); 3464 } 3465 3466 return (USB_SUCCESS); 3467 } 3468 3469 3470 /* 3471 * ehci_get_xfer_attrs: 3472 * 3473 * Get the attributes of a particular xfer. 3474 * 3475 * NOTE: This function is also called from POLLED MODE. 3476 */ 3477 usb_req_attrs_t 3478 ehci_get_xfer_attrs( 3479 ehci_state_t *ehcip, 3480 ehci_pipe_private_t *pp, 3481 ehci_trans_wrapper_t *tw) 3482 { 3483 usb_ep_descr_t *eptd = &pp->pp_pipe_handle->p_ep; 3484 usb_req_attrs_t attrs = USB_ATTRS_NONE; 3485 3486 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3487 "ehci_get_xfer_attrs:"); 3488 3489 switch (eptd->bmAttributes & USB_EP_ATTR_MASK) { 3490 case USB_EP_ATTR_CONTROL: 3491 attrs = ((usb_ctrl_req_t *) 3492 tw->tw_curr_xfer_reqp)->ctrl_attributes; 3493 break; 3494 case USB_EP_ATTR_BULK: 3495 attrs = ((usb_bulk_req_t *) 3496 tw->tw_curr_xfer_reqp)->bulk_attributes; 3497 break; 3498 case USB_EP_ATTR_INTR: 3499 attrs = ((usb_intr_req_t *) 3500 tw->tw_curr_xfer_reqp)->intr_attributes; 3501 break; 3502 } 3503 3504 return (attrs); 3505 } 3506 3507 3508 /* 3509 * ehci_get_current_frame_number: 3510 * 3511 * Get the current software based usb frame number. 3512 */ 3513 usb_frame_number_t 3514 ehci_get_current_frame_number(ehci_state_t *ehcip) 3515 { 3516 usb_frame_number_t usb_frame_number; 3517 usb_frame_number_t ehci_fno, micro_frame_number; 3518 3519 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3520 3521 ehci_fno = ehcip->ehci_fno; 3522 micro_frame_number = Get_OpReg(ehci_frame_index) & 0x3FFF; 3523 3524 /* 3525 * Calculate current software based usb frame number. 3526 * 3527 * This code accounts for the fact that frame number is 3528 * updated by the Host Controller before the ehci driver 3529 * gets an FrameListRollover interrupt that will adjust 3530 * Frame higher part. 3531 * 3532 * Refer ehci specification 1.0, section 2.3.2, page 21. 3533 */ 3534 micro_frame_number = ((micro_frame_number & 0x1FFF) | 3535 ehci_fno) + (((micro_frame_number & 0x3FFF) ^ 3536 ehci_fno) & 0x2000); 3537 3538 /* 3539 * Micro Frame number is equivalent to 125 usec. Eight 3540 * Micro Frame numbers are equivalent to one millsecond 3541 * or one usb frame number. 3542 */ 3543 usb_frame_number = micro_frame_number >> 3544 EHCI_uFRAMES_PER_USB_FRAME_SHIFT; 3545 3546 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3547 "ehci_get_current_frame_number: " 3548 "Current usb uframe number = 0x%llx " 3549 "Current usb frame number = 0x%llx", 3550 micro_frame_number, usb_frame_number); 3551 3552 return (usb_frame_number); 3553 } 3554 3555 3556 /* 3557 * ehci_cpr_cleanup: 3558 * 3559 * Cleanup ehci state and other ehci specific informations across 3560 * Check Point Resume (CPR). 3561 */ 3562 static void 3563 ehci_cpr_cleanup(ehci_state_t *ehcip) 3564 { 3565 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3566 3567 /* Reset software part of usb frame number */ 3568 ehcip->ehci_fno = 0; 3569 } 3570 3571 3572 /* 3573 * ehci_wait_for_sof: 3574 * 3575 * Wait for couple of SOF interrupts 3576 */ 3577 int 3578 ehci_wait_for_sof(ehci_state_t *ehcip) 3579 { 3580 usb_frame_number_t before_frame_number, after_frame_number; 3581 int error = USB_SUCCESS; 3582 3583 USB_DPRINTF_L4(PRINT_MASK_LISTS, 3584 ehcip->ehci_log_hdl, "ehci_wait_for_sof"); 3585 3586 ASSERT(mutex_owned(&ehcip->ehci_int_mutex)); 3587 3588 error = ehci_state_is_operational(ehcip); 3589 3590 if (error != USB_SUCCESS) { 3591 3592 return (error); 3593 } 3594 3595 /* Get the current usb frame number before waiting for two SOFs */ 3596 before_frame_number = ehci_get_current_frame_number(ehcip); 3597 3598 mutex_exit(&ehcip->ehci_int_mutex); 3599 3600 /* Wait for few milliseconds */ 3601 delay(drv_usectohz(EHCI_SOF_TIMEWAIT)); 3602 3603 mutex_enter(&ehcip->ehci_int_mutex); 3604 3605 /* Get the current usb frame number after woken up */ 3606 after_frame_number = ehci_get_current_frame_number(ehcip); 3607 3608 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3609 "ehci_wait_for_sof: framenumber: before 0x%llx " 3610 "after 0x%llx", before_frame_number, after_frame_number); 3611 3612 /* Return failure, if usb frame number has not been changed */ 3613 if (after_frame_number <= before_frame_number) { 3614 3615 if ((ehci_do_soft_reset(ehcip)) != USB_SUCCESS) { 3616 3617 USB_DPRINTF_L0(PRINT_MASK_LISTS, 3618 ehcip->ehci_log_hdl, "No SOF interrupts"); 3619 3620 /* Set host controller soft state to error */ 3621 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE; 3622 3623 return (USB_FAILURE); 3624 } 3625 3626 /* Get new usb frame number */ 3627 after_frame_number = before_frame_number = 3628 ehci_get_current_frame_number(ehcip); 3629 } 3630 3631 ASSERT(after_frame_number > before_frame_number); 3632 3633 return (USB_SUCCESS); 3634 } 3635 3636 3637 /* 3638 * ehci_toggle_scheduler: 3639 * 3640 * Turn scheduler based on pipe open count. 3641 */ 3642 void 3643 ehci_toggle_scheduler(ehci_state_t *ehcip) { 3644 uint_t temp_reg, cmd_reg; 3645 3646 cmd_reg = Get_OpReg(ehci_command); 3647 temp_reg = cmd_reg; 3648 3649 /* 3650 * Enable/Disable asynchronous scheduler, and 3651 * turn on/off async list door bell 3652 */ 3653 if (ehcip->ehci_open_async_count) { 3654 if (!(cmd_reg & EHCI_CMD_ASYNC_SCHED_ENABLE)) { 3655 /* 3656 * For some reason this address might get nulled out by 3657 * the ehci chip. Set it here just in case it is null. 3658 */ 3659 Set_OpReg(ehci_async_list_addr, 3660 ehci_qh_cpu_to_iommu(ehcip, 3661 ehcip->ehci_head_of_async_sched_list)); 3662 } 3663 cmd_reg |= EHCI_CMD_ASYNC_SCHED_ENABLE; 3664 } else { 3665 cmd_reg &= ~EHCI_CMD_ASYNC_SCHED_ENABLE; 3666 } 3667 3668 if (ehcip->ehci_open_periodic_count) { 3669 if (!(cmd_reg & EHCI_CMD_PERIODIC_SCHED_ENABLE)) { 3670 /* 3671 * For some reason this address get's nulled out by 3672 * the ehci chip. Set it here just in case it is null. 3673 */ 3674 Set_OpReg(ehci_periodic_list_base, 3675 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address & 3676 0xFFFFF000)); 3677 } 3678 cmd_reg |= EHCI_CMD_PERIODIC_SCHED_ENABLE; 3679 } else { 3680 cmd_reg &= ~EHCI_CMD_PERIODIC_SCHED_ENABLE; 3681 } 3682 3683 /* Just an optimization */ 3684 if (temp_reg != cmd_reg) { 3685 Set_OpReg(ehci_command, cmd_reg); 3686 } 3687 } 3688 3689 /* 3690 * ehci print functions 3691 */ 3692 3693 /* 3694 * ehci_print_caps: 3695 */ 3696 void 3697 ehci_print_caps(ehci_state_t *ehcip) 3698 { 3699 uint_t i; 3700 3701 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3702 "\n\tUSB 2.0 Host Controller Characteristics\n"); 3703 3704 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3705 "Caps Length: 0x%x Version: 0x%x\n", 3706 Get_8Cap(ehci_caps_length), Get_16Cap(ehci_version)); 3707 3708 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3709 "Structural Parameters\n"); 3710 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3711 "Port indicators: %s", (Get_Cap(ehci_hcs_params) & 3712 EHCI_HCS_PORT_INDICATOR) ? "Yes" : "No"); 3713 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3714 "No of Classic host controllers: 0x%x", 3715 (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_COMP_CTRLS) 3716 >> EHCI_HCS_NUM_COMP_CTRL_SHIFT); 3717 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3718 "No of ports per Classic host controller: 0x%x", 3719 (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS_CC) 3720 >> EHCI_HCS_NUM_PORTS_CC_SHIFT); 3721 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3722 "Port routing rules: %s", (Get_Cap(ehci_hcs_params) & 3723 EHCI_HCS_PORT_ROUTING_RULES) ? "Yes" : "No"); 3724 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3725 "Port power control: %s", (Get_Cap(ehci_hcs_params) & 3726 EHCI_HCS_PORT_POWER_CONTROL) ? "Yes" : "No"); 3727 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3728 "No of root hub ports: 0x%x\n", 3729 Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); 3730 3731 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3732 "Capability Parameters\n"); 3733 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3734 "EHCI extended capability: %s", (Get_Cap(ehci_hcc_params) & 3735 EHCI_HCC_EECP) ? "Yes" : "No"); 3736 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3737 "Isoch schedule threshold: 0x%x", 3738 Get_Cap(ehci_hcc_params) & EHCI_HCC_ISOCH_SCHED_THRESHOLD); 3739 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3740 "Async schedule park capability: %s", (Get_Cap(ehci_hcc_params) & 3741 EHCI_HCC_ASYNC_SCHED_PARK_CAP) ? "Yes" : "No"); 3742 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3743 "Programmable frame list flag: %s", (Get_Cap(ehci_hcc_params) & 3744 EHCI_HCC_PROG_FRAME_LIST_FLAG) ? "256/512/1024" : "1024"); 3745 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3746 "64bit addressing capability: %s\n", (Get_Cap(ehci_hcc_params) & 3747 EHCI_HCC_64BIT_ADDR_CAP) ? "Yes" : "No"); 3748 3749 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3750 "Classic Port Route Description"); 3751 3752 for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) { 3753 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3754 "\tPort Route 0x%x: 0x%x", i, Get_8Cap(ehci_port_route[i])); 3755 } 3756 } 3757 3758 3759 /* 3760 * ehci_print_regs: 3761 */ 3762 void 3763 ehci_print_regs(ehci_state_t *ehcip) 3764 { 3765 uint_t i; 3766 3767 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3768 "\n\tEHCI%d Operational Registers\n", 3769 ddi_get_instance(ehcip->ehci_dip)); 3770 3771 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3772 "Command: 0x%x Status: 0x%x", 3773 Get_OpReg(ehci_command), Get_OpReg(ehci_status)); 3774 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3775 "Interrupt: 0x%x Frame Index: 0x%x", 3776 Get_OpReg(ehci_interrupt), Get_OpReg(ehci_frame_index)); 3777 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3778 "Control Segment: 0x%x Periodic List Base: 0x%x", 3779 Get_OpReg(ehci_ctrl_segment), Get_OpReg(ehci_periodic_list_base)); 3780 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3781 "Async List Addr: 0x%x Config Flag: 0x%x", 3782 Get_OpReg(ehci_async_list_addr), Get_OpReg(ehci_config_flag)); 3783 3784 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3785 "Root Hub Port Status"); 3786 3787 for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) { 3788 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, 3789 "\tPort Status 0x%x: 0x%x ", i, 3790 Get_OpReg(ehci_rh_port_status[i])); 3791 } 3792 } 3793 3794 3795 /* 3796 * ehci_print_qh: 3797 */ 3798 void 3799 ehci_print_qh( 3800 ehci_state_t *ehcip, 3801 ehci_qh_t *qh) 3802 { 3803 uint_t i; 3804 3805 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3806 "ehci_print_qh: qh = 0x%p", (void *)qh); 3807 3808 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3809 "\tqh_link_ptr: 0x%x ", Get_QH(qh->qh_link_ptr)); 3810 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3811 "\tqh_ctrl: 0x%x ", Get_QH(qh->qh_ctrl)); 3812 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3813 "\tqh_split_ctrl: 0x%x ", Get_QH(qh->qh_split_ctrl)); 3814 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3815 "\tqh_curr_qtd: 0x%x ", Get_QH(qh->qh_curr_qtd)); 3816 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3817 "\tqh_next_qtd: 0x%x ", Get_QH(qh->qh_next_qtd)); 3818 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3819 "\tqh_alt_next_qtd: 0x%x ", Get_QH(qh->qh_alt_next_qtd)); 3820 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3821 "\tqh_status: 0x%x ", Get_QH(qh->qh_status)); 3822 3823 for (i = 0; i < 5; i++) { 3824 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3825 "\tqh_buf[%d]: 0x%x ", i, Get_QH(qh->qh_buf[i])); 3826 } 3827 3828 for (i = 0; i < 5; i++) { 3829 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3830 "\tqh_buf_high[%d]: 0x%x ", 3831 i, Get_QH(qh->qh_buf_high[i])); 3832 } 3833 3834 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3835 "\tqh_dummy_qtd: 0x%x ", Get_QH(qh->qh_dummy_qtd)); 3836 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3837 "\tqh_prev: 0x%x ", Get_QH(qh->qh_prev)); 3838 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3839 "\tqh_state: 0x%x ", Get_QH(qh->qh_state)); 3840 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3841 "\tqh_reclaim_next: 0x%x ", Get_QH(qh->qh_reclaim_next)); 3842 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3843 "\tqh_reclaim_frame: 0x%x ", Get_QH(qh->qh_reclaim_frame)); 3844 } 3845 3846 3847 /* 3848 * ehci_print_qtd: 3849 */ 3850 void 3851 ehci_print_qtd( 3852 ehci_state_t *ehcip, 3853 ehci_qtd_t *qtd) 3854 { 3855 uint_t i; 3856 3857 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3858 "ehci_print_qtd: qtd = 0x%p", (void *)qtd); 3859 3860 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3861 "\tqtd_next_qtd: 0x%x ", Get_QTD(qtd->qtd_next_qtd)); 3862 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3863 "\tqtd_alt_next_qtd: 0x%x ", Get_QTD(qtd->qtd_alt_next_qtd)); 3864 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3865 "\tqtd_ctrl: 0x%x ", Get_QTD(qtd->qtd_ctrl)); 3866 3867 for (i = 0; i < 5; i++) { 3868 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3869 "\tqtd_buf[%d]: 0x%x ", i, Get_QTD(qtd->qtd_buf[i])); 3870 } 3871 3872 for (i = 0; i < 5; i++) { 3873 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3874 "\tqtd_buf_high[%d]: 0x%x ", 3875 i, Get_QTD(qtd->qtd_buf_high[i])); 3876 } 3877 3878 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3879 "\tqtd_trans_wrapper: 0x%x ", Get_QTD(qtd->qtd_trans_wrapper)); 3880 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3881 "\tqtd_tw_next_qtd: 0x%x ", Get_QTD(qtd->qtd_tw_next_qtd)); 3882 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3883 "\tqtd_active_qtd_next: 0x%x ", Get_QTD(qtd->qtd_active_qtd_next)); 3884 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3885 "\tqtd_active_qtd_prev: 0x%x ", Get_QTD(qtd->qtd_active_qtd_prev)); 3886 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3887 "\tqtd_state: 0x%x ", Get_QTD(qtd->qtd_state)); 3888 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3889 "\tqtd_ctrl_phase: 0x%x ", Get_QTD(qtd->qtd_ctrl_phase)); 3890 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3891 "\tqtd_xfer_addr: 0x%x ", Get_QTD(qtd->qtd_xfer_addr)); 3892 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl, 3893 "\tqtd_xfer_len: 0x%x ", Get_QTD(qtd->qtd_xfer_len)); 3894 } 3895 3896 /* 3897 * ehci kstat functions 3898 */ 3899 3900 /* 3901 * ehci_create_stats: 3902 * 3903 * Allocate and initialize the ehci kstat structures 3904 */ 3905 void 3906 ehci_create_stats(ehci_state_t *ehcip) 3907 { 3908 char kstatname[KSTAT_STRLEN]; 3909 const char *dname = ddi_driver_name(ehcip->ehci_dip); 3910 char *usbtypes[USB_N_COUNT_KSTATS] = 3911 {"ctrl", "isoch", "bulk", "intr"}; 3912 uint_t instance = ehcip->ehci_instance; 3913 ehci_intrs_stats_t *isp; 3914 int i; 3915 3916 if (EHCI_INTRS_STATS(ehcip) == NULL) { 3917 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,intrs", 3918 dname, instance); 3919 EHCI_INTRS_STATS(ehcip) = kstat_create("usba", instance, 3920 kstatname, "usb_interrupts", KSTAT_TYPE_NAMED, 3921 sizeof (ehci_intrs_stats_t) / sizeof (kstat_named_t), 3922 KSTAT_FLAG_PERSISTENT); 3923 3924 if (EHCI_INTRS_STATS(ehcip)) { 3925 isp = EHCI_INTRS_STATS_DATA(ehcip); 3926 kstat_named_init(&isp->ehci_sts_total, 3927 "Interrupts Total", KSTAT_DATA_UINT64); 3928 kstat_named_init(&isp->ehci_sts_not_claimed, 3929 "Not Claimed", KSTAT_DATA_UINT64); 3930 kstat_named_init(&isp->ehci_sts_async_sched_status, 3931 "Async schedule status", KSTAT_DATA_UINT64); 3932 kstat_named_init(&isp->ehci_sts_periodic_sched_status, 3933 "Periodic sched status", KSTAT_DATA_UINT64); 3934 kstat_named_init(&isp->ehci_sts_empty_async_schedule, 3935 "Empty async schedule", KSTAT_DATA_UINT64); 3936 kstat_named_init(&isp->ehci_sts_host_ctrl_halted, 3937 "Host controller Halted", KSTAT_DATA_UINT64); 3938 kstat_named_init(&isp->ehci_sts_async_advance_intr, 3939 "Intr on async advance", KSTAT_DATA_UINT64); 3940 kstat_named_init(&isp->ehci_sts_host_system_error_intr, 3941 "Host system error", KSTAT_DATA_UINT64); 3942 kstat_named_init(&isp->ehci_sts_frm_list_rollover_intr, 3943 "Frame list rollover", KSTAT_DATA_UINT64); 3944 kstat_named_init(&isp->ehci_sts_rh_port_change_intr, 3945 "Port change detect", KSTAT_DATA_UINT64); 3946 kstat_named_init(&isp->ehci_sts_usb_error_intr, 3947 "USB error interrupt", KSTAT_DATA_UINT64); 3948 kstat_named_init(&isp->ehci_sts_usb_intr, 3949 "USB interrupt", KSTAT_DATA_UINT64); 3950 3951 EHCI_INTRS_STATS(ehcip)->ks_private = ehcip; 3952 EHCI_INTRS_STATS(ehcip)->ks_update = nulldev; 3953 kstat_install(EHCI_INTRS_STATS(ehcip)); 3954 } 3955 } 3956 3957 if (EHCI_TOTAL_STATS(ehcip) == NULL) { 3958 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,total", 3959 dname, instance); 3960 EHCI_TOTAL_STATS(ehcip) = kstat_create("usba", instance, 3961 kstatname, "usb_byte_count", KSTAT_TYPE_IO, 1, 3962 KSTAT_FLAG_PERSISTENT); 3963 3964 if (EHCI_TOTAL_STATS(ehcip)) { 3965 kstat_install(EHCI_TOTAL_STATS(ehcip)); 3966 } 3967 } 3968 3969 for (i = 0; i < USB_N_COUNT_KSTATS; i++) { 3970 if (ehcip->ehci_count_stats[i] == NULL) { 3971 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,%s", 3972 dname, instance, usbtypes[i]); 3973 ehcip->ehci_count_stats[i] = kstat_create("usba", 3974 instance, kstatname, "usb_byte_count", 3975 KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 3976 3977 if (ehcip->ehci_count_stats[i]) { 3978 kstat_install(ehcip->ehci_count_stats[i]); 3979 } 3980 } 3981 } 3982 } 3983 3984 3985 /* 3986 * ehci_destroy_stats: 3987 * 3988 * Clean up ehci kstat structures 3989 */ 3990 void 3991 ehci_destroy_stats(ehci_state_t *ehcip) 3992 { 3993 int i; 3994 3995 if (EHCI_INTRS_STATS(ehcip)) { 3996 kstat_delete(EHCI_INTRS_STATS(ehcip)); 3997 EHCI_INTRS_STATS(ehcip) = NULL; 3998 } 3999 4000 if (EHCI_TOTAL_STATS(ehcip)) { 4001 kstat_delete(EHCI_TOTAL_STATS(ehcip)); 4002 EHCI_TOTAL_STATS(ehcip) = NULL; 4003 } 4004 4005 for (i = 0; i < USB_N_COUNT_KSTATS; i++) { 4006 if (ehcip->ehci_count_stats[i]) { 4007 kstat_delete(ehcip->ehci_count_stats[i]); 4008 ehcip->ehci_count_stats[i] = NULL; 4009 } 4010 } 4011 } 4012 4013 4014 /* 4015 * ehci_do_intrs_stats: 4016 * 4017 * ehci status information 4018 */ 4019 void 4020 ehci_do_intrs_stats( 4021 ehci_state_t *ehcip, 4022 int val) 4023 { 4024 if (EHCI_INTRS_STATS(ehcip)) { 4025 EHCI_INTRS_STATS_DATA(ehcip)->ehci_sts_total.value.ui64++; 4026 switch (val) { 4027 case EHCI_STS_ASYNC_SCHED_STATUS: 4028 EHCI_INTRS_STATS_DATA(ehcip)-> 4029 ehci_sts_async_sched_status.value.ui64++; 4030 break; 4031 case EHCI_STS_PERIODIC_SCHED_STATUS: 4032 EHCI_INTRS_STATS_DATA(ehcip)-> 4033 ehci_sts_periodic_sched_status.value.ui64++; 4034 break; 4035 case EHCI_STS_EMPTY_ASYNC_SCHEDULE: 4036 EHCI_INTRS_STATS_DATA(ehcip)-> 4037 ehci_sts_empty_async_schedule.value.ui64++; 4038 break; 4039 case EHCI_STS_HOST_CTRL_HALTED: 4040 EHCI_INTRS_STATS_DATA(ehcip)-> 4041 ehci_sts_host_ctrl_halted.value.ui64++; 4042 break; 4043 case EHCI_STS_ASYNC_ADVANCE_INTR: 4044 EHCI_INTRS_STATS_DATA(ehcip)-> 4045 ehci_sts_async_advance_intr.value.ui64++; 4046 break; 4047 case EHCI_STS_HOST_SYSTEM_ERROR_INTR: 4048 EHCI_INTRS_STATS_DATA(ehcip)-> 4049 ehci_sts_host_system_error_intr.value.ui64++; 4050 break; 4051 case EHCI_STS_FRM_LIST_ROLLOVER_INTR: 4052 EHCI_INTRS_STATS_DATA(ehcip)-> 4053 ehci_sts_frm_list_rollover_intr.value.ui64++; 4054 break; 4055 case EHCI_STS_RH_PORT_CHANGE_INTR: 4056 EHCI_INTRS_STATS_DATA(ehcip)-> 4057 ehci_sts_rh_port_change_intr.value.ui64++; 4058 break; 4059 case EHCI_STS_USB_ERROR_INTR: 4060 EHCI_INTRS_STATS_DATA(ehcip)-> 4061 ehci_sts_usb_error_intr.value.ui64++; 4062 break; 4063 case EHCI_STS_USB_INTR: 4064 EHCI_INTRS_STATS_DATA(ehcip)-> 4065 ehci_sts_usb_intr.value.ui64++; 4066 break; 4067 default: 4068 EHCI_INTRS_STATS_DATA(ehcip)-> 4069 ehci_sts_not_claimed.value.ui64++; 4070 break; 4071 } 4072 } 4073 } 4074 4075 4076 /* 4077 * ehci_do_byte_stats: 4078 * 4079 * ehci data xfer information 4080 */ 4081 void 4082 ehci_do_byte_stats( 4083 ehci_state_t *ehcip, 4084 size_t len, 4085 uint8_t attr, 4086 uint8_t addr) 4087 { 4088 uint8_t type = attr & USB_EP_ATTR_MASK; 4089 uint8_t dir = addr & USB_EP_DIR_MASK; 4090 4091 if (dir == USB_EP_DIR_IN) { 4092 EHCI_TOTAL_STATS_DATA(ehcip)->reads++; 4093 EHCI_TOTAL_STATS_DATA(ehcip)->nread += len; 4094 switch (type) { 4095 case USB_EP_ATTR_CONTROL: 4096 EHCI_CTRL_STATS(ehcip)->reads++; 4097 EHCI_CTRL_STATS(ehcip)->nread += len; 4098 break; 4099 case USB_EP_ATTR_BULK: 4100 EHCI_BULK_STATS(ehcip)->reads++; 4101 EHCI_BULK_STATS(ehcip)->nread += len; 4102 break; 4103 case USB_EP_ATTR_INTR: 4104 EHCI_INTR_STATS(ehcip)->reads++; 4105 EHCI_INTR_STATS(ehcip)->nread += len; 4106 break; 4107 case USB_EP_ATTR_ISOCH: 4108 EHCI_ISOC_STATS(ehcip)->reads++; 4109 EHCI_ISOC_STATS(ehcip)->nread += len; 4110 break; 4111 } 4112 } else if (dir == USB_EP_DIR_OUT) { 4113 EHCI_TOTAL_STATS_DATA(ehcip)->writes++; 4114 EHCI_TOTAL_STATS_DATA(ehcip)->nwritten += len; 4115 switch (type) { 4116 case USB_EP_ATTR_CONTROL: 4117 EHCI_CTRL_STATS(ehcip)->writes++; 4118 EHCI_CTRL_STATS(ehcip)->nwritten += len; 4119 break; 4120 case USB_EP_ATTR_BULK: 4121 EHCI_BULK_STATS(ehcip)->writes++; 4122 EHCI_BULK_STATS(ehcip)->nwritten += len; 4123 break; 4124 case USB_EP_ATTR_INTR: 4125 EHCI_INTR_STATS(ehcip)->writes++; 4126 EHCI_INTR_STATS(ehcip)->nwritten += len; 4127 break; 4128 case USB_EP_ATTR_ISOCH: 4129 EHCI_ISOC_STATS(ehcip)->writes++; 4130 EHCI_ISOC_STATS(ehcip)->nwritten += len; 4131 break; 4132 } 4133 } 4134 } 4135