1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * Universal Host Controller Driver (UHCI) 31 * 32 * The UHCI driver is a driver which interfaces to the Universal 33 * Serial Bus Driver (USBA) and the Host Controller (HC). The interface to 34 * the Host Controller is defined by the UHCI. 35 * This file contains misc functions. 36 */ 37 #include <sys/usb/hcd/uhci/uhcid.h> 38 #include <sys/usb/hcd/uhci/uhciutil.h> 39 #include <sys/usb/hcd/uhci/uhcipolled.h> 40 41 42 /* Globals */ 43 extern uint_t uhci_td_pool_size; /* Num TDs */ 44 extern uint_t uhci_qh_pool_size; /* Num QHs */ 45 extern ushort_t uhci_tree_bottom_nodes[]; 46 extern void *uhci_statep; 47 48 /* function prototypes */ 49 static void uhci_build_interrupt_lattice(uhci_state_t *uhcip); 50 static int uhci_init_frame_lst_table(dev_info_t *dip, uhci_state_t *uhcip); 51 52 static uint_t uhci_lattice_height(uint_t bandwidth); 53 static uint_t uhci_lattice_parent(uint_t node); 54 static uint_t uhci_leftmost_leaf(uint_t node, uint_t height); 55 static uint_t uhci_compute_total_bandwidth(usb_ep_descr_t *endpoint, 56 usb_port_status_t port_status); 57 58 static int uhci_bandwidth_adjust(uhci_state_t *uhcip, 59 usb_ep_descr_t *endpoint, usb_port_status_t port_status); 60 61 static uhci_td_t *uhci_allocate_td_from_pool(uhci_state_t *uhcip); 62 static void uhci_fill_in_td(uhci_state_t *uhcip, 63 uhci_td_t *td, uhci_td_t *current_dummy, 64 uint32_t buffer_address, size_t length, 65 uhci_pipe_private_t *pp, uchar_t PID, 66 usb_req_attrs_t attrs); 67 static uhci_trans_wrapper_t *uhci_create_transfer_wrapper( 68 uhci_state_t *uhcip, uhci_pipe_private_t *pp, 69 size_t length, usb_flags_t usb_flags); 70 71 static int uhci_create_setup_pkt(uhci_state_t *uhcip, 72 uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw); 73 static void uhci_insert_ctrl_qh(uhci_state_t *uhcip, 74 uhci_pipe_private_t *pp); 75 static void uhci_remove_ctrl_qh(uhci_state_t *uhcip, 76 uhci_pipe_private_t *pp); 77 static void uhci_insert_intr_qh(uhci_state_t *uhcip, 78 uhci_pipe_private_t *pp); 79 static void uhci_remove_intr_qh(uhci_state_t *uhcip, 80 uhci_pipe_private_t *pp); 81 static void uhci_remove_bulk_qh(uhci_state_t *uhcip, 82 uhci_pipe_private_t *pp); 83 static void uhci_insert_bulk_qh(uhci_state_t *uhcip, 84 uhci_pipe_private_t *pp); 85 static void uhci_handle_bulk_td_errors(uhci_state_t *uhcip, uhci_td_t *td); 86 static int uhci_alloc_memory_for_tds(uhci_state_t *uhcip, uint_t num_tds, 87 uhci_bulk_isoc_xfer_t *info); 88 89 static int uhci_handle_isoc_receive(uhci_state_t *uhcip, 90 uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw); 91 static void uhci_delete_isoc_td(uhci_state_t *uhcip, 92 uhci_td_t *td); 93 #ifdef DEBUG 94 static void uhci_print_td(uhci_state_t *uhcip, uhci_td_t *td); 95 static void uhci_print_qh(uhci_state_t *uhcip, queue_head_t *qh); 96 #endif 97 98 99 /* 100 * uhci_build_interrupt_lattice: 101 * 102 * Construct the interrupt lattice tree using static Queue Head pointers. 103 * This interrupt lattice tree will have total of 63 queue heads and the 104 * Host Controller (HC) processes queue heads every frame. 105 */ 106 static void 107 uhci_build_interrupt_lattice(uhci_state_t *uhcip) 108 { 109 int half_list = NUM_INTR_QH_LISTS / 2; 110 uint16_t i, j, k; 111 uhci_td_t *sof_td, *isoc_td; 112 uintptr_t addr; 113 queue_head_t *list_array = uhcip->uhci_qh_pool_addr; 114 queue_head_t *tmp_qh; 115 frame_lst_table_t *frame_lst_tablep = 116 uhcip->uhci_frame_lst_tablep; 117 118 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 119 "uhci_build_interrupt_lattice:"); 120 121 /* 122 * Reserve the first 63 queue head structures in the pool as static 123 * queue heads & these are required for constructing interrupt 124 * lattice tree. 125 */ 126 for (i = 0; i < NUM_INTR_QH_LISTS; i++) { 127 SetQH32(uhcip, list_array[i].link_ptr, HC_END_OF_LIST); 128 SetQH32(uhcip, list_array[i].element_ptr, HC_END_OF_LIST); 129 list_array[i].qh_flag = QUEUE_HEAD_FLAG_STATIC; 130 list_array[i].node = i; 131 } 132 133 /* Build the interrupt lattice tree */ 134 for (i = 0; i < half_list - 1; i++) { 135 /* 136 * The next pointer in the host controller queue head 137 * descriptor must contain an iommu address. Calculate 138 * the offset into the cpu address and add this to the 139 * starting iommu address. 140 */ 141 addr = QH_PADDR(&list_array[i]) | HC_QUEUE_HEAD; 142 143 SetQH32(uhcip, list_array[2*i + 1].link_ptr, addr); 144 SetQH32(uhcip, list_array[2*i + 2].link_ptr, addr); 145 } 146 147 /* 148 * Initialize the interrupt list in the Frame list Table 149 * so that it points to the bottom of the tree. 150 */ 151 for (i = 0, j = 0; i < pow_2(TREE_HEIGHT); i++) { 152 addr = QH_PADDR(&list_array[half_list + i - 1]); 153 for (k = 0; k < pow_2(VIRTUAL_TREE_HEIGHT); k++) { 154 SetFL32(uhcip, 155 frame_lst_tablep[uhci_tree_bottom_nodes[j++]], 156 addr | HC_QUEUE_HEAD); 157 } 158 } 159 160 /* 161 * Create a controller and bulk Queue heads 162 */ 163 uhcip->uhci_ctrl_xfers_q_head = uhci_alloc_queue_head(uhcip); 164 tmp_qh = uhcip->uhci_ctrl_xfers_q_tail = uhcip->uhci_ctrl_xfers_q_head; 165 166 SetQH32(uhcip, list_array[0].link_ptr, 167 (QH_PADDR(tmp_qh) | HC_QUEUE_HEAD)); 168 169 uhcip->uhci_bulk_xfers_q_head = uhci_alloc_queue_head(uhcip); 170 uhcip->uhci_bulk_xfers_q_tail = uhcip->uhci_bulk_xfers_q_head; 171 SetQH32(uhcip, tmp_qh->link_ptr, 172 (QH_PADDR(uhcip->uhci_bulk_xfers_q_head)|HC_QUEUE_HEAD)); 173 174 SetQH32(uhcip, uhcip->uhci_bulk_xfers_q_head->link_ptr, HC_END_OF_LIST); 175 176 /* 177 * Add a dummy TD to the static queue head 0. THis is used 178 * to generate an at the end of frame. 179 */ 180 sof_td = uhci_allocate_td_from_pool(uhcip); 181 182 SetQH32(uhcip, list_array[0].element_ptr, 183 TD_PADDR(sof_td) | HC_TD_HEAD); 184 SetTD32(uhcip, sof_td->link_ptr, HC_END_OF_LIST); 185 uhcip->uhci_sof_td = sof_td; 186 187 /* 188 * Add a dummy td that is used to generate an interrupt for 189 * every 1024 frames. 190 */ 191 isoc_td = uhci_allocate_td_from_pool(uhcip); 192 SetTD32(uhcip, isoc_td->link_ptr, HC_END_OF_LIST); 193 uhcip->uhci_isoc_td = isoc_td; 194 195 uhcip->uhci_isoc_qh = uhci_alloc_queue_head(uhcip); 196 SetQH32(uhcip, uhcip->uhci_isoc_qh->link_ptr, 197 GetFL32(uhcip, uhcip->uhci_frame_lst_tablep[MAX_FRAME_NUM])); 198 SetQH32(uhcip, uhcip->uhci_isoc_qh->element_ptr, TD_PADDR(isoc_td)); 199 SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[MAX_FRAME_NUM], 200 QH_PADDR(uhcip->uhci_isoc_qh) | HC_QUEUE_HEAD); 201 } 202 203 204 /* 205 * uhci_allocate_pools: 206 * Allocate the system memory for the Queue Heads Descriptor and 207 * for the Transfer Descriptor (TD) pools. Both QH and TD structures 208 * must be aligned to a 16 byte boundary. 209 */ 210 int 211 uhci_allocate_pools(uhci_state_t *uhcip) 212 { 213 dev_info_t *dip = uhcip->uhci_dip; 214 size_t real_length; 215 int i, result; 216 uint_t ccount; 217 ddi_device_acc_attr_t dev_attr; 218 219 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 220 "uhci_allocate_pools:"); 221 222 /* The host controller will be little endian */ 223 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 224 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 225 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 226 227 /* Allocate the TD pool DMA handle */ 228 if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 0, 229 &uhcip->uhci_td_pool_dma_handle) != DDI_SUCCESS) { 230 231 return (USB_FAILURE); 232 } 233 234 /* Allocate the memory for the TD pool */ 235 if (ddi_dma_mem_alloc(uhcip->uhci_td_pool_dma_handle, 236 uhci_td_pool_size * sizeof (uhci_td_t), 237 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0, 238 (caddr_t *)&uhcip->uhci_td_pool_addr, &real_length, 239 &uhcip->uhci_td_pool_mem_handle)) { 240 241 return (USB_FAILURE); 242 } 243 244 /* Map the TD pool into the I/O address space */ 245 result = ddi_dma_addr_bind_handle(uhcip->uhci_td_pool_dma_handle, 246 NULL, (caddr_t)uhcip->uhci_td_pool_addr, real_length, 247 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 248 NULL, &uhcip->uhci_td_pool_cookie, &ccount); 249 250 bzero((void *)uhcip->uhci_td_pool_addr, 251 uhci_td_pool_size * sizeof (uhci_td_t)); 252 253 /* Process the result */ 254 if (result == DDI_DMA_MAPPED) { 255 /* The cookie count should be 1 */ 256 if (ccount != 1) { 257 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 258 "uhci_allocate_pools: More than 1 cookie"); 259 260 return (USB_FAILURE); 261 } 262 } else { 263 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 264 "uhci_allocate_pools: Result = %d", result); 265 266 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result); 267 268 return (USB_FAILURE); 269 } 270 271 uhcip->uhci_dma_addr_bind_flag |= UHCI_TD_POOL_BOUND; 272 273 /* Initialize the TD pool */ 274 for (i = 0; i < uhci_td_pool_size; i++) { 275 uhcip->uhci_td_pool_addr[i].flag = TD_FLAG_FREE; 276 } 277 278 /* Allocate the TD pool DMA handle */ 279 if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 280 0, &uhcip->uhci_qh_pool_dma_handle) != DDI_SUCCESS) { 281 282 return (USB_FAILURE); 283 } 284 285 /* Allocate the memory for the QH pool */ 286 if (ddi_dma_mem_alloc(uhcip->uhci_qh_pool_dma_handle, 287 uhci_qh_pool_size * sizeof (queue_head_t), 288 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0, 289 (caddr_t *)&uhcip->uhci_qh_pool_addr, &real_length, 290 &uhcip->uhci_qh_pool_mem_handle) != DDI_SUCCESS) { 291 292 return (USB_FAILURE); 293 } 294 295 result = ddi_dma_addr_bind_handle(uhcip->uhci_qh_pool_dma_handle, 296 NULL, (caddr_t)uhcip->uhci_qh_pool_addr, real_length, 297 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 298 &uhcip->uhci_qh_pool_cookie, &ccount); 299 300 /* Process the result */ 301 if (result == DDI_DMA_MAPPED) { 302 /* The cookie count should be 1 */ 303 if (ccount != 1) { 304 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 305 "uhci_allocate_pools: More than 1 cookie"); 306 307 return (USB_FAILURE); 308 } 309 } else { 310 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result); 311 312 return (USB_FAILURE); 313 } 314 315 uhcip->uhci_dma_addr_bind_flag |= UHCI_QH_POOL_BOUND; 316 317 bzero((void *)uhcip->uhci_qh_pool_addr, 318 uhci_qh_pool_size * sizeof (queue_head_t)); 319 320 /* Initialize the QH pool */ 321 for (i = 0; i < uhci_qh_pool_size; i ++) { 322 uhcip->uhci_qh_pool_addr[i].qh_flag = QUEUE_HEAD_FLAG_FREE; 323 } 324 325 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 326 "uhci_allocate_pools: Completed"); 327 328 return (USB_SUCCESS); 329 } 330 331 332 /* 333 * uhci_free_pools: 334 * Cleanup on attach failure or detach 335 */ 336 void 337 uhci_free_pools(uhci_state_t *uhcip) 338 { 339 int i, flag, rval; 340 uhci_td_t *td; 341 uhci_trans_wrapper_t *tw; 342 343 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 344 "uhci_free_pools:"); 345 346 if (uhcip->uhci_td_pool_addr && uhcip->uhci_td_pool_mem_handle) { 347 for (i = 0; i < uhci_td_pool_size; i ++) { 348 td = &uhcip->uhci_td_pool_addr[i]; 349 350 flag = uhcip->uhci_td_pool_addr[i].flag; 351 if ((flag != TD_FLAG_FREE) && 352 (flag != TD_FLAG_DUMMY) && (td->tw != NULL)) { 353 tw = td->tw; 354 uhci_free_tw(uhcip, tw); 355 } 356 357 } 358 359 if (uhcip->uhci_dma_addr_bind_flag & UHCI_TD_POOL_BOUND) { 360 rval = ddi_dma_unbind_handle( 361 uhcip->uhci_td_pool_dma_handle); 362 ASSERT(rval == DDI_SUCCESS); 363 } 364 365 ddi_dma_mem_free(&uhcip->uhci_td_pool_mem_handle); 366 } 367 368 /* Free the TD pool */ 369 if (uhcip->uhci_td_pool_dma_handle) { 370 ddi_dma_free_handle(&uhcip->uhci_td_pool_dma_handle); 371 } 372 373 if (uhcip->uhci_qh_pool_addr && uhcip->uhci_qh_pool_mem_handle) { 374 if (uhcip->uhci_dma_addr_bind_flag & UHCI_QH_POOL_BOUND) { 375 rval = ddi_dma_unbind_handle( 376 uhcip->uhci_qh_pool_dma_handle); 377 ASSERT(rval == DDI_SUCCESS); 378 } 379 ddi_dma_mem_free(&uhcip->uhci_qh_pool_mem_handle); 380 } 381 382 /* Free the QH pool */ 383 if (uhcip->uhci_qh_pool_dma_handle) { 384 ddi_dma_free_handle(&uhcip->uhci_qh_pool_dma_handle); 385 } 386 387 /* Free the Frame list Table area */ 388 if (uhcip->uhci_frame_lst_tablep && uhcip->uhci_flt_mem_handle) { 389 if (uhcip->uhci_dma_addr_bind_flag & UHCI_FLA_POOL_BOUND) { 390 rval = ddi_dma_unbind_handle( 391 uhcip->uhci_flt_dma_handle); 392 ASSERT(rval == DDI_SUCCESS); 393 } 394 ddi_dma_mem_free(&uhcip->uhci_flt_mem_handle); 395 } 396 397 if (uhcip->uhci_flt_dma_handle) { 398 ddi_dma_free_handle(&uhcip->uhci_flt_dma_handle); 399 } 400 } 401 402 403 /* 404 * uhci_decode_ddi_dma_addr_bind_handle_result: 405 * Process the return values of ddi_dma_addr_bind_handle() 406 */ 407 void 408 uhci_decode_ddi_dma_addr_bind_handle_result(uhci_state_t *uhcip, int result) 409 { 410 char *msg; 411 412 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 413 "uhci_decode_ddi_dma_addr_bind_handle_result:"); 414 415 switch (result) { 416 case DDI_DMA_PARTIAL_MAP: 417 msg = "Partial transfers not allowed"; 418 break; 419 case DDI_DMA_INUSE: 420 msg = "Handle is in use"; 421 break; 422 case DDI_DMA_NORESOURCES: 423 msg = "No resources"; 424 break; 425 case DDI_DMA_NOMAPPING: 426 msg = "No mapping"; 427 break; 428 case DDI_DMA_TOOBIG: 429 msg = "Object is too big"; 430 break; 431 default: 432 msg = "Unknown dma error"; 433 } 434 435 USB_DPRINTF_L4(PRINT_MASK_ALL, uhcip->uhci_log_hdl, "%s", msg); 436 } 437 438 439 /* 440 * uhci_init_ctlr: 441 * Initialize the Host Controller (HC). 442 */ 443 int 444 uhci_init_ctlr(uhci_state_t *uhcip) 445 { 446 dev_info_t *dip = uhcip->uhci_dip; 447 uint_t cmd_reg; 448 uint_t frame_base_addr; 449 450 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, "uhci_init_ctlr:"); 451 452 /* 453 * When USB legacy mode is enabled, the BIOS manages the USB keyboard 454 * attached to the UHCI controller. It has been observed that some 455 * times the BIOS does not clear the interrupts in the legacy mode 456 * register in the PCI configuration space. So, disable the SMI intrs 457 * and route the intrs to PIRQD here. 458 */ 459 pci_config_put16(uhcip->uhci_config_handle, 460 LEGACYMODE_REG_OFFSET, LEGACYMODE_REG_INIT_VALUE); 461 462 /* 463 * Disable all the interrupts. 464 */ 465 Set_OpReg16(USBINTR, DISABLE_ALL_INTRS); 466 467 mutex_enter(&uhcip->uhci_int_mutex); 468 cmd_reg = Get_OpReg16(USBCMD); 469 cmd_reg &= (~USBCMD_REG_HC_RUN); 470 471 /* Stop the controller */ 472 Set_OpReg16(USBCMD, cmd_reg); 473 474 /* Reset the host controller */ 475 Set_OpReg16(USBCMD, USBCMD_REG_GBL_RESET); 476 477 /* Wait 10ms for reset to complete */ 478 mutex_exit(&uhcip->uhci_int_mutex); 479 delay(drv_usectohz(UHCI_RESET_DELAY)); 480 mutex_enter(&uhcip->uhci_int_mutex); 481 482 Set_OpReg16(USBCMD, 0); 483 484 /* Set the frame number to zero */ 485 Set_OpReg16(FRNUM, 0); 486 487 /* Initialize the Frame list base address area */ 488 if (uhci_init_frame_lst_table(dip, uhcip) != USB_SUCCESS) { 489 mutex_exit(&uhcip->uhci_int_mutex); 490 491 return (USB_FAILURE); 492 } 493 494 /* Save the contents of the Frame Interval Registers */ 495 uhcip->uhci_frame_interval = Get_OpReg8(SOFMOD); 496 497 frame_base_addr = uhcip->uhci_flt_cookie.dmac_address; 498 499 /* Set the Frame list base address */ 500 Set_OpReg32(FRBASEADD, frame_base_addr); 501 502 /* 503 * Begin sending SOFs 504 * Set the Host Controller Functional State to Operational 505 */ 506 cmd_reg = Get_OpReg16(USBCMD); 507 cmd_reg |= (USBCMD_REG_HC_RUN | USBCMD_REG_MAXPKT_64 | 508 USBCMD_REG_CONFIG_FLAG); 509 510 Set_OpReg16(USBCMD, cmd_reg); 511 mutex_exit(&uhcip->uhci_int_mutex); 512 513 /* 514 * Verify the Command and interrupt enable registers, 515 * a sanity check whether actually initialized or not 516 */ 517 cmd_reg = Get_OpReg16(USBCMD); 518 519 if (!(cmd_reg & (USBCMD_REG_HC_RUN | USBCMD_REG_MAXPKT_64 | 520 USBCMD_REG_CONFIG_FLAG))) { 521 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 522 "uhci_init_ctlr: Controller initialization failed"); 523 524 return (USB_FAILURE); 525 } 526 527 /* 528 * Set the ioc bit of the isoc intr td. This enables 529 * the generation of an interrupt for every 1024 frames. 530 */ 531 SetTD_ioc(uhcip, uhcip->uhci_isoc_td, 1); 532 533 /* Set the flag that uhci controller has been initialized. */ 534 uhcip->uhci_ctlr_init_flag = B_TRUE; 535 536 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 537 "uhci_init_ctlr: Completed"); 538 539 return (USB_SUCCESS); 540 } 541 542 543 /* 544 * uhci_uninit_ctlr: 545 * uninitialize the Host Controller (HC). 546 */ 547 void 548 uhci_uninit_ctlr(uhci_state_t *uhcip) 549 { 550 if (uhcip->uhci_regs_handle) { 551 /* Disable all the interrupts. */ 552 Set_OpReg16(USBINTR, DISABLE_ALL_INTRS); 553 554 /* Complete the current transaction and then halt. */ 555 Set_OpReg16(USBCMD, 0); 556 557 /* Wait for sometime */ 558 mutex_exit(&uhcip->uhci_int_mutex); 559 delay(drv_usectohz(UHCI_TIMEWAIT)); 560 mutex_enter(&uhcip->uhci_int_mutex); 561 } 562 } 563 564 565 /* 566 * uhci_map_regs: 567 * The Host Controller (HC) contains a set of on-chip operational 568 * registers and which should be mapped into a non-cacheable 569 * portion of the system addressable space. 570 */ 571 int 572 uhci_map_regs(uhci_state_t *uhcip) 573 { 574 dev_info_t *dip = uhcip->uhci_dip; 575 int index; 576 uint32_t regs_prop_len; 577 int32_t *regs_list; 578 uint16_t command_reg; 579 ddi_device_acc_attr_t attr; 580 581 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, "uhci_map_regs:"); 582 583 /* The host controller will be little endian */ 584 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 585 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 586 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 587 588 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, uhcip->uhci_dip, 589 DDI_PROP_DONTPASS, "reg", ®s_list, ®s_prop_len) != 590 DDI_PROP_SUCCESS) { 591 592 return (USB_FAILURE); 593 } 594 595 for (index = 0; index * 5 < regs_prop_len; index++) { 596 if (regs_list[index * 5] & UHCI_PROP_MASK) { 597 break; 598 } 599 } 600 601 /* 602 * Deallocate the memory allocated by the ddi_prop_lookup_int_array 603 */ 604 ddi_prop_free(regs_list); 605 606 if (index * 5 >= regs_prop_len) { 607 608 return (USB_FAILURE); 609 } 610 611 /* Map in operational registers */ 612 if (ddi_regs_map_setup(dip, index, (caddr_t *)&uhcip->uhci_regsp, 613 0, sizeof (hc_regs_t), &attr, &uhcip->uhci_regs_handle) != 614 DDI_SUCCESS) { 615 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 616 "ddi_regs_map_setup: failed"); 617 618 return (USB_FAILURE); 619 } 620 621 if (pci_config_setup(dip, &uhcip->uhci_config_handle) != DDI_SUCCESS) { 622 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 623 "uhci_map_regs: Config error"); 624 625 return (USB_FAILURE); 626 } 627 628 /* Make sure Memory Access Enable and Master Enable are set */ 629 command_reg = pci_config_get16(uhcip->uhci_config_handle, 630 PCI_CONF_COMM); 631 if (!(command_reg & (PCI_COMM_MAE | PCI_COMM_ME))) { 632 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 633 "uhci_map_regs: No MAE/ME"); 634 } 635 636 command_reg |= PCI_COMM_MAE | PCI_COMM_ME; 637 pci_config_put16(uhcip->uhci_config_handle, PCI_CONF_COMM, command_reg); 638 639 /* 640 * Check whether I/O base address is configured and enabled. 641 */ 642 if (!(command_reg & PCI_COMM_IO)) { 643 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 644 "I/O Base address access disabled"); 645 646 return (USB_FAILURE); 647 } 648 /* 649 * Get the IO base address of the controller 650 */ 651 uhcip->uhci_iobase = (pci_config_get16(uhcip->uhci_config_handle, 652 PCI_CONF_IOBASE) & PCI_CONF_IOBASE_MASK); 653 654 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 655 "uhci_map_regs: Completed"); 656 657 return (USB_SUCCESS); 658 } 659 660 661 void 662 uhci_unmap_regs(uhci_state_t *uhcip) 663 { 664 /* Unmap the UHCI registers */ 665 if (uhcip->uhci_regs_handle) { 666 /* Reset the host controller */ 667 Set_OpReg16(USBCMD, USBCMD_REG_GBL_RESET); 668 669 ddi_regs_map_free(&uhcip->uhci_regs_handle); 670 } 671 672 if (uhcip->uhci_config_handle) { 673 pci_config_teardown(&uhcip->uhci_config_handle); 674 } 675 } 676 677 678 /* 679 * uhci_set_dma_attributes: 680 * Set the limits in the DMA attributes structure. Most of the values used 681 * in the DMA limit structres are the default values as specified by the 682 * Writing PCI device drivers document. 683 */ 684 void 685 uhci_set_dma_attributes(uhci_state_t *uhcip) 686 { 687 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 688 "uhci_set_dma_attributes:"); 689 690 /* Initialize the DMA attributes */ 691 uhcip->uhci_dma_attr.dma_attr_version = DMA_ATTR_V0; 692 uhcip->uhci_dma_attr.dma_attr_addr_lo = 0x00000000ull; 693 uhcip->uhci_dma_attr.dma_attr_addr_hi = 0xfffffff0ull; 694 695 /* 32 bit addressing */ 696 uhcip->uhci_dma_attr.dma_attr_count_max = 0xffffffull; 697 698 /* 699 * Setting the dam_att_align to 512, some times fails the 700 * binding handle. I dont know why ? But setting to 16 will 701 * be right for our case (16 byte alignment required per 702 * UHCI spec for TD descriptors). 703 */ 704 705 /* 16 byte alignment */ 706 uhcip->uhci_dma_attr.dma_attr_align = 0x10; 707 708 /* 709 * Since PCI specification is byte alignment, the 710 * burstsize field should be set to 1 for PCI devices. 711 */ 712 uhcip->uhci_dma_attr.dma_attr_burstsizes = 0x1; 713 714 uhcip->uhci_dma_attr.dma_attr_minxfer = 0x1; 715 uhcip->uhci_dma_attr.dma_attr_maxxfer = 0xffffffull; 716 uhcip->uhci_dma_attr.dma_attr_seg = 0xffffffffull; 717 uhcip->uhci_dma_attr.dma_attr_sgllen = 1; 718 uhcip->uhci_dma_attr.dma_attr_granular = 1; 719 uhcip->uhci_dma_attr.dma_attr_flags = 0; 720 } 721 722 723 uint_t 724 pow_2(uint_t x) 725 { 726 return ((x == 0) ? 1 : (1 << x)); 727 } 728 729 730 uint_t 731 log_2(uint_t x) 732 { 733 int ret_val = 0; 734 735 while (x != 1) { 736 ret_val++; 737 x = x >> 1; 738 } 739 740 return (ret_val); 741 } 742 743 744 /* 745 * uhci_obtain_state: 746 */ 747 uhci_state_t * 748 uhci_obtain_state(dev_info_t *dip) 749 { 750 int instance = ddi_get_instance(dip); 751 uhci_state_t *state = ddi_get_soft_state(uhci_statep, instance); 752 753 ASSERT(state != NULL); 754 755 return (state); 756 } 757 758 759 /* 760 * uhci_alloc_hcdi_ops: 761 * The HCDI interfaces or entry points are the software interfaces used by 762 * the Universal Serial Bus Driver (USBA) to access the services of the 763 * Host Controller Driver (HCD). During HCD initialization, inform USBA 764 * about all available HCDI interfaces or entry points. 765 */ 766 usba_hcdi_ops_t * 767 uhci_alloc_hcdi_ops(uhci_state_t *uhcip) 768 { 769 usba_hcdi_ops_t *hcdi_ops; 770 771 USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 772 "uhci_alloc_hcdi_ops:"); 773 774 hcdi_ops = usba_alloc_hcdi_ops(); 775 776 hcdi_ops->usba_hcdi_pipe_open = uhci_hcdi_pipe_open; 777 hcdi_ops->usba_hcdi_pipe_close = uhci_hcdi_pipe_close; 778 hcdi_ops->usba_hcdi_pipe_reset = uhci_hcdi_pipe_reset; 779 780 hcdi_ops->usba_hcdi_pipe_ctrl_xfer = uhci_hcdi_pipe_ctrl_xfer; 781 hcdi_ops->usba_hcdi_pipe_bulk_xfer = uhci_hcdi_pipe_bulk_xfer; 782 hcdi_ops->usba_hcdi_pipe_intr_xfer = uhci_hcdi_pipe_intr_xfer; 783 hcdi_ops->usba_hcdi_pipe_isoc_xfer = uhci_hcdi_pipe_isoc_xfer; 784 785 hcdi_ops->usba_hcdi_bulk_transfer_size = uhci_hcdi_bulk_transfer_size; 786 hcdi_ops->usba_hcdi_pipe_stop_intr_polling = 787 uhci_hcdi_pipe_stop_intr_polling; 788 hcdi_ops->usba_hcdi_pipe_stop_isoc_polling = 789 uhci_hcdi_pipe_stop_isoc_polling; 790 791 hcdi_ops->usba_hcdi_get_current_frame_number = 792 uhci_hcdi_get_current_frame_number; 793 hcdi_ops->usba_hcdi_get_max_isoc_pkts = uhci_hcdi_get_max_isoc_pkts; 794 795 hcdi_ops->usba_hcdi_console_input_init = uhci_hcdi_polled_input_init; 796 hcdi_ops->usba_hcdi_console_input_enter = uhci_hcdi_polled_input_enter; 797 hcdi_ops->usba_hcdi_console_read = uhci_hcdi_polled_read; 798 hcdi_ops->usba_hcdi_console_input_exit = uhci_hcdi_polled_input_exit; 799 hcdi_ops->usba_hcdi_console_input_fini = uhci_hcdi_polled_input_fini; 800 801 return (hcdi_ops); 802 } 803 804 805 /* 806 * uhci_init_frame_lst_table : 807 * Allocate the system memory and initialize Host Controller 808 * Frame list table area The starting of the Frame list Table 809 * area must be 4096 byte aligned. 810 */ 811 static int 812 uhci_init_frame_lst_table(dev_info_t *dip, uhci_state_t *uhcip) 813 { 814 int result; 815 uint_t ccount; 816 size_t real_length; 817 ddi_device_acc_attr_t dev_attr; 818 819 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 820 821 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 822 "uhci_init_frame_lst_table:"); 823 824 /* The host controller will be little endian */ 825 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 826 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 827 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 828 829 /* 4K alignment required */ 830 uhcip->uhci_dma_attr.dma_attr_align = 0x1000; 831 832 /* Create space for the HCCA block */ 833 if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 834 0, &uhcip->uhci_flt_dma_handle) != DDI_SUCCESS) { 835 836 return (USB_FAILURE); 837 } 838 839 /* Reset to default 16 bytes */ 840 uhcip->uhci_dma_attr.dma_attr_align = 0x10; 841 842 if (ddi_dma_mem_alloc(uhcip->uhci_flt_dma_handle, 843 SIZE_OF_FRAME_LST_TABLE, &dev_attr, DDI_DMA_CONSISTENT, 844 DDI_DMA_SLEEP, 0, (caddr_t *)&uhcip->uhci_frame_lst_tablep, 845 &real_length, &uhcip->uhci_flt_mem_handle)) { 846 847 return (USB_FAILURE); 848 } 849 850 /* Map the whole Frame list base area into the I/O address space */ 851 result = ddi_dma_addr_bind_handle(uhcip->uhci_flt_dma_handle, 852 NULL, (caddr_t)uhcip->uhci_frame_lst_tablep, real_length, 853 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 854 &uhcip->uhci_flt_cookie, &ccount); 855 856 if (result == DDI_DMA_MAPPED) { 857 /* The cookie count should be 1 */ 858 if (ccount != 1) { 859 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 860 "uhci_init_frame_list_table: More than 1 cookie"); 861 862 return (USB_FAILURE); 863 } 864 } else { 865 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result); 866 867 return (USB_FAILURE); 868 } 869 870 uhcip->uhci_dma_addr_bind_flag |= UHCI_FLA_POOL_BOUND; 871 872 bzero((void *)uhcip->uhci_frame_lst_tablep, real_length); 873 874 /* Initialize the interrupt lists */ 875 uhci_build_interrupt_lattice(uhcip); 876 877 return (USB_SUCCESS); 878 } 879 880 881 /* 882 * uhci_alloc_queue_head: 883 * Allocate a queue head 884 */ 885 queue_head_t * 886 uhci_alloc_queue_head(uhci_state_t *uhcip) 887 { 888 int index; 889 uhci_td_t *dummy_td; 890 queue_head_t *queue_head; 891 892 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 893 "uhci_alloc_queue_head"); 894 895 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 896 897 /* Allocate a dummy td first. */ 898 if ((dummy_td = uhci_allocate_td_from_pool(uhcip)) == NULL) { 899 900 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 901 "uhci_alloc_queue_head: allocate td from pool failed"); 902 903 return (NULL); 904 } 905 906 /* 907 * The first 63 queue heads in the Queue Head (QH) 908 * buffer pool are reserved for building interrupt lattice 909 * tree. Search for a blank Queue head in the QH buffer pool. 910 */ 911 for (index = NUM_STATIC_NODES; index < uhci_qh_pool_size; index++) { 912 if (uhcip->uhci_qh_pool_addr[index].qh_flag == 913 QUEUE_HEAD_FLAG_FREE) { 914 break; 915 } 916 } 917 918 USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 919 "uhci_alloc_queue_head: Allocated %d", index); 920 921 if (index == uhci_qh_pool_size) { 922 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 923 "uhci_alloc_queue_head: All QH exhausted"); 924 925 /* Free the dummy td allocated for this qh. */ 926 dummy_td->flag = TD_FLAG_FREE; 927 928 return (NULL); 929 } 930 931 queue_head = &uhcip->uhci_qh_pool_addr[index]; 932 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 933 "uhci_alloc_queue_head: Allocated address 0x%p", queue_head); 934 935 bzero((void *)queue_head, sizeof (queue_head_t)); 936 SetQH32(uhcip, queue_head->link_ptr, HC_END_OF_LIST); 937 SetQH32(uhcip, queue_head->element_ptr, HC_END_OF_LIST); 938 queue_head->prev_qh = NULL; 939 queue_head->qh_flag = QUEUE_HEAD_FLAG_BUSY; 940 941 bzero((char *)dummy_td, sizeof (uhci_td_t)); 942 queue_head->td_tailp = dummy_td; 943 SetQH32(uhcip, queue_head->element_ptr, TD_PADDR(dummy_td)); 944 945 return (queue_head); 946 } 947 948 949 /* 950 * uhci_allocate_bandwidth: 951 * Figure out whether or not this interval may be supported. Return 952 * the index into the lattice if it can be supported. Return 953 * allocation failure if it can not be supported. 954 */ 955 int 956 uhci_allocate_bandwidth( 957 uhci_state_t *uhcip, 958 usba_pipe_handle_data_t *pipe_handle, 959 uint_t *node) 960 { 961 int bandwidth; /* Requested bandwidth */ 962 uint_t min, min_index; 963 uint_t i; 964 uint_t height; /* Bandwidth's height in the tree */ 965 uint_t leftmost; 966 uint_t length; 967 uint32_t paddr; 968 queue_head_t *tmp_qh; 969 usb_ep_descr_t *endpoint = &pipe_handle->p_ep; 970 971 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 972 973 /* 974 * Calculate the length in bytes of a transaction on this 975 * periodic endpoint. 976 */ 977 mutex_enter(&pipe_handle->p_usba_device->usb_mutex); 978 979 length = uhci_compute_total_bandwidth(endpoint, 980 pipe_handle->p_usba_device->usb_port_status); 981 mutex_exit(&pipe_handle->p_usba_device->usb_mutex); 982 983 /* 984 * If the length in bytes plus the allocated bandwidth exceeds 985 * the maximum, return bandwidth allocation failure. 986 */ 987 if ((length + uhcip->uhci_bandwidth_intr_min + 988 uhcip->uhci_bandwidth_isoch_sum) > (MAX_PERIODIC_BANDWIDTH)) { 989 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 990 "uhci_allocate_bandwidth: " 991 "Reached maximum bandwidth value and cannot allocate " 992 "bandwidth for a given Interrupt/Isoch endpoint"); 993 994 return (USB_NO_BANDWIDTH); 995 } 996 997 /* 998 * ISOC xfers are not supported at this point type 999 */ 1000 if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_ISOCH) { 1001 uhcip->uhci_bandwidth_isoch_sum += length; 1002 1003 return (USB_SUCCESS); 1004 } 1005 1006 /* 1007 * This is an interrupt endpoint. 1008 * Adjust bandwidth to be a power of 2 1009 */ 1010 mutex_enter(&pipe_handle->p_usba_device->usb_mutex); 1011 bandwidth = uhci_bandwidth_adjust(uhcip, endpoint, 1012 pipe_handle->p_usba_device->usb_port_status); 1013 mutex_exit(&pipe_handle->p_usba_device->usb_mutex); 1014 1015 /* 1016 * If this bandwidth can't be supported, 1017 * return allocation failure. 1018 */ 1019 if (bandwidth == USB_FAILURE) { 1020 1021 return (USB_FAILURE); 1022 } 1023 1024 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1025 "The new bandwidth is %d", bandwidth); 1026 1027 /* Find the leaf with the smallest allocated bandwidth */ 1028 min_index = 0; 1029 min = uhcip->uhci_bandwidth[0]; 1030 1031 for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) { 1032 if (uhcip->uhci_bandwidth[i] < min) { 1033 min_index = i; 1034 min = uhcip->uhci_bandwidth[i]; 1035 } 1036 } 1037 1038 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1039 "The leaf with minimal bandwidth %d, " 1040 "The smallest bandwidth %d", min_index, min); 1041 1042 /* 1043 * Find the index into the lattice given the 1044 * leaf with the smallest allocated bandwidth. 1045 */ 1046 height = uhci_lattice_height(bandwidth); 1047 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1048 "The height is %d", height); 1049 1050 *node = uhci_tree_bottom_nodes[min_index]; 1051 1052 /* check if there are isocs TDs scheduled for this frame */ 1053 if (uhcip->uhci_isoc_q_tailp[*node]) { 1054 paddr = (uhcip->uhci_isoc_q_tailp[*node]->link_ptr & 1055 FRAME_LST_PTR_MASK); 1056 } else { 1057 paddr = (uhcip->uhci_frame_lst_tablep[*node] & 1058 FRAME_LST_PTR_MASK); 1059 } 1060 1061 tmp_qh = QH_VADDR(paddr); 1062 *node = tmp_qh->node; 1063 for (i = 0; i < height; i++) { 1064 *node = uhci_lattice_parent(*node); 1065 } 1066 1067 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1068 "The real node is %d", *node); 1069 1070 /* 1071 * Find the leftmost leaf in the subtree specified by the node. 1072 */ 1073 leftmost = uhci_leftmost_leaf(*node, height); 1074 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1075 "Leftmost %d", leftmost); 1076 1077 for (i = leftmost; i < leftmost + 1078 (NUM_FRAME_LST_ENTRIES/bandwidth); i ++) { 1079 1080 if ((length + uhcip->uhci_bandwidth_isoch_sum + 1081 uhcip->uhci_bandwidth[i]) > MAX_PERIODIC_BANDWIDTH) { 1082 1083 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1084 "uhci_allocate_bandwidth: " 1085 "Reached maximum bandwidth value and cannot " 1086 "allocate bandwidth for Interrupt endpoint"); 1087 1088 return (USB_NO_BANDWIDTH); 1089 } 1090 } 1091 1092 /* 1093 * All the leaves for this node must be updated with the bandwidth. 1094 */ 1095 for (i = leftmost; i < leftmost + 1096 (NUM_FRAME_LST_ENTRIES/bandwidth); i ++) { 1097 uhcip->uhci_bandwidth[i] += length; 1098 } 1099 1100 /* Find the leaf with the smallest allocated bandwidth */ 1101 min_index = 0; 1102 min = uhcip->uhci_bandwidth[0]; 1103 1104 for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) { 1105 if (uhcip->uhci_bandwidth[i] < min) { 1106 min_index = i; 1107 min = uhcip->uhci_bandwidth[i]; 1108 } 1109 } 1110 1111 /* Save the minimum for later use */ 1112 uhcip->uhci_bandwidth_intr_min = min; 1113 1114 return (USB_SUCCESS); 1115 } 1116 1117 1118 /* 1119 * uhci_deallocate_bandwidth: 1120 * Deallocate bandwidth for the given node in the lattice 1121 * and the length of transfer. 1122 */ 1123 void 1124 uhci_deallocate_bandwidth(uhci_state_t *uhcip, 1125 usba_pipe_handle_data_t *pipe_handle) 1126 { 1127 uint_t bandwidth; 1128 uint_t height; 1129 uint_t leftmost; 1130 uint_t i; 1131 uint_t min; 1132 usb_ep_descr_t *endpoint = &pipe_handle->p_ep; 1133 uint_t node, length; 1134 uhci_pipe_private_t *pp = 1135 (uhci_pipe_private_t *)pipe_handle->p_hcd_private; 1136 1137 /* This routine is protected by the uhci_int_mutex */ 1138 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1139 1140 /* Obtain the length */ 1141 mutex_enter(&pipe_handle->p_usba_device->usb_mutex); 1142 length = uhci_compute_total_bandwidth(endpoint, 1143 pipe_handle->p_usba_device->usb_port_status); 1144 mutex_exit(&pipe_handle->p_usba_device->usb_mutex); 1145 1146 /* 1147 * If this is an isochronous endpoint, just delete endpoint's 1148 * bandwidth from the total allocated isochronous bandwidth. 1149 */ 1150 if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_ISOCH) { 1151 uhcip->uhci_bandwidth_isoch_sum -= length; 1152 1153 return; 1154 } 1155 1156 /* Obtain the node */ 1157 node = pp->pp_node; 1158 1159 /* Adjust bandwidth to be a power of 2 */ 1160 mutex_enter(&pipe_handle->p_usba_device->usb_mutex); 1161 bandwidth = uhci_bandwidth_adjust(uhcip, endpoint, 1162 pipe_handle->p_usba_device->usb_port_status); 1163 mutex_exit(&pipe_handle->p_usba_device->usb_mutex); 1164 1165 /* Find the height in the tree */ 1166 height = uhci_lattice_height(bandwidth); 1167 1168 /* 1169 * Find the leftmost leaf in the subtree specified by the node 1170 */ 1171 leftmost = uhci_leftmost_leaf(node, height); 1172 1173 /* Delete the bandwith from the appropriate lists */ 1174 for (i = leftmost; i < leftmost + (NUM_FRAME_LST_ENTRIES/bandwidth); 1175 i ++) { 1176 uhcip->uhci_bandwidth[i] -= length; 1177 } 1178 1179 min = uhcip->uhci_bandwidth[0]; 1180 1181 /* Recompute the minimum */ 1182 for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) { 1183 if (uhcip->uhci_bandwidth[i] < min) { 1184 min = uhcip->uhci_bandwidth[i]; 1185 } 1186 } 1187 1188 /* Save the minimum for later use */ 1189 uhcip->uhci_bandwidth_intr_min = min; 1190 } 1191 1192 1193 /* 1194 * uhci_compute_total_bandwidth: 1195 * 1196 * Given a periodic endpoint (interrupt or isochronous) determine the total 1197 * bandwidth for one transaction. The UHCI host controller traverses the 1198 * endpoint descriptor lists on a first-come-first-serve basis. When the HC 1199 * services an endpoint, only a single transaction attempt is made. The HC 1200 * moves to the next Endpoint Descriptor after the first transaction attempt 1201 * rather than finishing the entire Transfer Descriptor. Therefore, when a 1202 * Transfer Descriptor is inserted into the lattice, we will only count the 1203 * number of bytes for one transaction. 1204 * 1205 * The following are the formulas used for calculating bandwidth in terms 1206 * bytes and it is for the single USB full speed and low speed transaction 1207 * respectively. The protocol overheads will be different for each of type 1208 * of USB transfer and all these formulas & protocol overheads are derived 1209 * from the 5.9.3 section of USB Specification & with the help of Bandwidth 1210 * Analysis white paper which is posted on the USB developer forum. 1211 * 1212 * Full-Speed: 1213 * Protocol overhead + ((MaxPacketSize * 7)/6 ) + Host_Delay 1214 * 1215 * Low-Speed: 1216 * Protocol overhead + Hub LS overhead + 1217 * (Low-Speed clock * ((MaxPacketSize * 7)/6 )) + Host_Delay 1218 */ 1219 static uint_t 1220 uhci_compute_total_bandwidth(usb_ep_descr_t *endpoint, 1221 usb_port_status_t port_status) 1222 { 1223 uint_t bandwidth; 1224 ushort_t MaxPacketSize = endpoint->wMaxPacketSize; 1225 1226 /* Add Host Controller specific delay to required bandwidth */ 1227 bandwidth = HOST_CONTROLLER_DELAY; 1228 1229 /* Add bit-stuffing overhead */ 1230 MaxPacketSize = (ushort_t)((MaxPacketSize * 7) / 6); 1231 1232 /* Low Speed interrupt transaction */ 1233 if (port_status == USBA_LOW_SPEED_DEV) { 1234 /* Low Speed interrupt transaction */ 1235 bandwidth += (LOW_SPEED_PROTO_OVERHEAD + 1236 HUB_LOW_SPEED_PROTO_OVERHEAD + 1237 (LOW_SPEED_CLOCK * MaxPacketSize)); 1238 } else { 1239 /* Full Speed transaction */ 1240 bandwidth += MaxPacketSize; 1241 1242 if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_INTR) { 1243 /* Full Speed interrupt transaction */ 1244 bandwidth += FS_NON_ISOC_PROTO_OVERHEAD; 1245 } else { 1246 /* Isochronus and input transaction */ 1247 if (UHCI_XFER_DIR(endpoint) == USB_EP_DIR_IN) { 1248 bandwidth += FS_ISOC_INPUT_PROTO_OVERHEAD; 1249 } else { 1250 /* Isochronus and output transaction */ 1251 bandwidth += FS_ISOC_OUTPUT_PROTO_OVERHEAD; 1252 } 1253 } 1254 } 1255 1256 return (bandwidth); 1257 } 1258 1259 1260 /* 1261 * uhci_bandwidth_adjust: 1262 */ 1263 static int 1264 uhci_bandwidth_adjust( 1265 uhci_state_t *uhcip, 1266 usb_ep_descr_t *endpoint, 1267 usb_port_status_t port_status) 1268 { 1269 int i = 0; 1270 uint_t interval; 1271 1272 /* 1273 * Get the polling interval from the endpoint descriptor 1274 */ 1275 interval = endpoint->bInterval; 1276 1277 /* 1278 * The bInterval value in the endpoint descriptor can range 1279 * from 1 to 255ms. The interrupt lattice has 32 leaf nodes, 1280 * and the host controller cycles through these nodes every 1281 * 32ms. The longest polling interval that the controller 1282 * supports is 32ms. 1283 */ 1284 1285 /* 1286 * Return an error if the polling interval is less than 1ms 1287 * and greater than 255ms 1288 */ 1289 if ((interval < MIN_POLL_INTERVAL) || (interval > MAX_POLL_INTERVAL)) { 1290 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1291 "uhci_bandwidth_adjust: Endpoint's poll interval must be " 1292 "between %d and %d ms", MIN_POLL_INTERVAL, 1293 MAX_POLL_INTERVAL); 1294 1295 return (USB_FAILURE); 1296 } 1297 1298 /* 1299 * According USB Specifications, a full-speed endpoint can 1300 * specify a desired polling interval 1ms to 255ms and a low 1301 * speed endpoints are limited to specifying only 10ms to 1302 * 255ms. But some old keyboards & mice uses polling interval 1303 * of 8ms. For compatibility purpose, we are using polling 1304 * interval between 8ms & 255ms for low speed endpoints. 1305 */ 1306 if ((port_status == USBA_LOW_SPEED_DEV) && 1307 (interval < MIN_LOW_SPEED_POLL_INTERVAL)) { 1308 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1309 "uhci_bandwidth_adjust: Low speed endpoint's poll interval " 1310 "must be >= %d ms, adjusted", 1311 MIN_LOW_SPEED_POLL_INTERVAL); 1312 1313 interval = MIN_LOW_SPEED_POLL_INTERVAL; 1314 } 1315 1316 /* 1317 * If polling interval is greater than 32ms, 1318 * adjust polling interval equal to 32ms. 1319 */ 1320 if (interval > 32) { 1321 interval = 32; 1322 } 1323 1324 /* 1325 * Find the nearest power of 2 that's less 1326 * than interval. 1327 */ 1328 while ((pow_2(i)) <= interval) { 1329 i++; 1330 } 1331 1332 return (pow_2((i - 1))); 1333 } 1334 1335 1336 /* 1337 * uhci_lattice_height: 1338 * Given the requested bandwidth, find the height in the tree at 1339 * which the nodes for this bandwidth fall. The height is measured 1340 * as the number of nodes from the leaf to the level specified by 1341 * bandwidth The root of the tree is at height TREE_HEIGHT. 1342 */ 1343 static uint_t 1344 uhci_lattice_height(uint_t bandwidth) 1345 { 1346 return (TREE_HEIGHT - (log_2(bandwidth))); 1347 } 1348 1349 1350 static uint_t 1351 uhci_lattice_parent(uint_t node) 1352 { 1353 return (((node % 2) == 0) ? ((node/2) - 1) : (node/2)); 1354 } 1355 1356 1357 /* 1358 * uhci_leftmost_leaf: 1359 * Find the leftmost leaf in the subtree specified by the node. 1360 * Height refers to number of nodes from the bottom of the tree 1361 * to the node, including the node. 1362 */ 1363 static uint_t 1364 uhci_leftmost_leaf(uint_t node, uint_t height) 1365 { 1366 node = pow_2(height + VIRTUAL_TREE_HEIGHT) * (node+1) - 1367 NUM_FRAME_LST_ENTRIES; 1368 return (node); 1369 } 1370 1371 1372 /* 1373 * uhci_insert_qh: 1374 * Add the Queue Head (QH) into the Host Controller's (HC) 1375 * appropriate queue head list. 1376 */ 1377 void 1378 uhci_insert_qh(uhci_state_t *uhcip, usba_pipe_handle_data_t *ph) 1379 { 1380 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 1381 1382 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1383 "uhci_insert_qh:"); 1384 1385 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1386 1387 switch (UHCI_XFER_TYPE(&ph->p_ep)) { 1388 case USB_EP_ATTR_CONTROL: 1389 uhci_insert_ctrl_qh(uhcip, pp); 1390 break; 1391 case USB_EP_ATTR_BULK: 1392 uhci_insert_bulk_qh(uhcip, pp); 1393 break; 1394 case USB_EP_ATTR_INTR: 1395 uhci_insert_intr_qh(uhcip, pp); 1396 break; 1397 case USB_EP_ATTR_ISOCH: 1398 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 1399 "uhci_insert_qh: Illegal request"); 1400 break; 1401 } 1402 } 1403 1404 1405 /* 1406 * uhci_insert_ctrl_qh: 1407 * Insert a control QH into the Host Controller's (HC) control QH list. 1408 */ 1409 static void 1410 uhci_insert_ctrl_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 1411 { 1412 queue_head_t *qh = pp->pp_qh; 1413 1414 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1415 "uhci_insert_ctrl_qh:"); 1416 1417 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1418 1419 if (uhcip->uhci_ctrl_xfers_q_head == uhcip->uhci_ctrl_xfers_q_tail) { 1420 uhcip->uhci_ctrl_xfers_q_head->prev_qh = UHCI_INVALID_PTR; 1421 } 1422 1423 SetQH32(uhcip, qh->link_ptr, 1424 GetQH32(uhcip, uhcip->uhci_ctrl_xfers_q_tail->link_ptr)); 1425 qh->prev_qh = uhcip->uhci_ctrl_xfers_q_tail; 1426 SetQH32(uhcip, uhcip->uhci_ctrl_xfers_q_tail->link_ptr, 1427 QH_PADDR(qh) | HC_QUEUE_HEAD); 1428 uhcip->uhci_ctrl_xfers_q_tail = qh; 1429 1430 } 1431 1432 1433 /* 1434 * uhci_insert_bulk_qh: 1435 * Insert a bulk QH into the Host Controller's (HC) bulk QH list. 1436 */ 1437 static void 1438 uhci_insert_bulk_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 1439 { 1440 queue_head_t *qh = pp->pp_qh; 1441 1442 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1443 "uhci_insert_bulk_qh:"); 1444 1445 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1446 1447 if (uhcip->uhci_bulk_xfers_q_head == uhcip->uhci_bulk_xfers_q_tail) { 1448 uhcip->uhci_bulk_xfers_q_head->prev_qh = UHCI_INVALID_PTR; 1449 } else if (uhcip->uhci_bulk_xfers_q_head->link_ptr == 1450 uhcip->uhci_bulk_xfers_q_tail->link_ptr) { 1451 1452 /* If there is already a loop, we should keep the loop. */ 1453 qh->link_ptr = uhcip->uhci_bulk_xfers_q_tail->link_ptr; 1454 } 1455 1456 qh->prev_qh = uhcip->uhci_bulk_xfers_q_tail; 1457 SetQH32(uhcip, uhcip->uhci_bulk_xfers_q_tail->link_ptr, 1458 QH_PADDR(qh) | HC_QUEUE_HEAD); 1459 uhcip->uhci_bulk_xfers_q_tail = qh; 1460 } 1461 1462 1463 /* 1464 * uhci_insert_intr_qh: 1465 * Insert a periodic Queue head i.e Interrupt queue head into the 1466 * Host Controller's (HC) interrupt lattice tree. 1467 */ 1468 static void 1469 uhci_insert_intr_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 1470 { 1471 uint_t node = pp->pp_node; /* The appropriate node was */ 1472 /* found during the opening */ 1473 /* of the pipe. */ 1474 queue_head_t *qh = pp->pp_qh; 1475 queue_head_t *next_lattice_qh, *lattice_qh; 1476 1477 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1478 "uhci_insert_intr_qh:"); 1479 1480 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1481 1482 /* Find the lattice queue head */ 1483 lattice_qh = &uhcip->uhci_qh_pool_addr[node]; 1484 next_lattice_qh = 1485 QH_VADDR(GetQH32(uhcip, lattice_qh->link_ptr) & QH_LINK_PTR_MASK); 1486 1487 next_lattice_qh->prev_qh = qh; 1488 qh->link_ptr = lattice_qh->link_ptr; 1489 qh->prev_qh = lattice_qh; 1490 SetQH32(uhcip, lattice_qh->link_ptr, QH_PADDR(qh) | HC_QUEUE_HEAD); 1491 pp->pp_data_toggle = 0; 1492 } 1493 1494 1495 /* 1496 * uhci_insert_intr_td: 1497 * Create a TD and a data buffer for an interrupt endpoint. 1498 */ 1499 int 1500 uhci_insert_intr_td( 1501 uhci_state_t *uhcip, 1502 usba_pipe_handle_data_t *ph, 1503 usb_intr_req_t *req, 1504 usb_flags_t flags) 1505 { 1506 int error, pipe_dir; 1507 uint_t length, mps; 1508 uint32_t buf_addr; 1509 uhci_td_t *tmp_td; 1510 usb_intr_req_t *intr_reqp; 1511 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 1512 uhci_trans_wrapper_t *tw; 1513 1514 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1515 "uhci_insert_intr_td: req: 0x%p", req); 1516 1517 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1518 1519 /* Get the interrupt pipe direction */ 1520 pipe_dir = UHCI_XFER_DIR(&ph->p_ep); 1521 1522 /* Get the current interrupt request pointer */ 1523 if (req) { 1524 length = req->intr_len; 1525 } else { 1526 ASSERT(pipe_dir == USB_EP_DIR_IN); 1527 length = (pp->pp_client_periodic_in_reqp) ? 1528 (((usb_intr_req_t *)pp-> 1529 pp_client_periodic_in_reqp)->intr_len) : 1530 ph->p_ep.wMaxPacketSize; 1531 } 1532 1533 /* Check the size of interrupt request */ 1534 if (length > UHCI_MAX_TD_XFER_SIZE) { 1535 1536 /* the length shouldn't exceed 8K */ 1537 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1538 "uhci_insert_intr_td: Intr request size 0x%lx is " 1539 "more than 0x%x", length, UHCI_MAX_TD_XFER_SIZE); 1540 1541 return (USB_INVALID_REQUEST); 1542 } 1543 1544 USB_DPRINTF_L3(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1545 "uhci_insert_intr_td: length: 0x%lx", length); 1546 1547 /* Allocate a transaction wrapper */ 1548 if ((tw = uhci_create_transfer_wrapper(uhcip, pp, length, flags)) == 1549 NULL) { 1550 1551 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1552 "uhci_insert_intr_td: TW allocation failed"); 1553 1554 return (USB_NO_RESOURCES); 1555 } 1556 1557 /* 1558 * Initialize the callback and any callback 1559 * data for when the td completes. 1560 */ 1561 tw->tw_handle_td = uhci_handle_intr_td; 1562 tw->tw_handle_callback_value = NULL; 1563 tw->tw_direction = (pipe_dir == USB_EP_DIR_OUT) ? 1564 PID_OUT : PID_IN; 1565 tw->tw_curr_xfer_reqp = (usb_opaque_t)req; 1566 1567 /* 1568 * If it is an Interrupt IN request and interrupt request is NULL, 1569 * allocate the usb interrupt request structure for the current 1570 * interrupt polling request. 1571 */ 1572 if (tw->tw_direction == PID_IN) { 1573 if ((error = uhci_allocate_periodic_in_resource(uhcip, 1574 pp, tw, flags)) != USB_SUCCESS) { 1575 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1576 "uhci_insert_intr_td: Interrupt request structure " 1577 "allocation failed"); 1578 1579 return (error); 1580 } 1581 } 1582 1583 intr_reqp = (usb_intr_req_t *)tw->tw_curr_xfer_reqp; 1584 ASSERT(tw->tw_curr_xfer_reqp != NULL); 1585 1586 tw->tw_timeout_cnt = (intr_reqp->intr_attributes & USB_ATTRS_ONE_XFER) ? 1587 intr_reqp->intr_timeout : 0; 1588 1589 /* DATA IN */ 1590 if (tw->tw_direction == PID_IN) { 1591 /* Insert the td onto the queue head */ 1592 error = uhci_insert_hc_td(uhcip, tw->tw_cookie.dmac_address, 1593 length, pp, tw, PID_IN, intr_reqp->intr_attributes); 1594 1595 if (error != USB_SUCCESS) { 1596 1597 uhci_deallocate_periodic_in_resource(uhcip, pp, tw); 1598 /* free the transfer wrapper */ 1599 uhci_deallocate_tw(uhcip, pp, tw); 1600 1601 return (USB_NO_RESOURCES); 1602 } 1603 tw->tw_bytes_xfered = 0; 1604 1605 return (USB_SUCCESS); 1606 } 1607 1608 /* DATA OUT */ 1609 ASSERT(req->intr_data != NULL); 1610 1611 /* Copy the data into the message */ 1612 ddi_rep_put8(tw->tw_accesshandle, req->intr_data->b_rptr, 1613 (uint8_t *)tw->tw_buf, req->intr_len, DDI_DEV_AUTOINCR); 1614 1615 /* set tw->tw_claim flag, so that nobody else works on this tw. */ 1616 tw->tw_claim = UHCI_INTR_HDLR_CLAIMED; 1617 1618 mps = ph->p_ep.wMaxPacketSize; 1619 buf_addr = tw->tw_cookie.dmac_address; 1620 1621 /* Insert tds onto the queue head */ 1622 while (length > 0) { 1623 1624 error = uhci_insert_hc_td(uhcip, buf_addr, 1625 (length > mps) ? mps : length, 1626 pp, tw, PID_OUT, 1627 intr_reqp->intr_attributes); 1628 1629 if (error != USB_SUCCESS) { 1630 /* no resource. */ 1631 break; 1632 } 1633 1634 if (length <= mps) { 1635 /* inserted all data. */ 1636 length = 0; 1637 1638 } else { 1639 1640 buf_addr += mps; 1641 length -= mps; 1642 } 1643 } 1644 1645 if (error != USB_SUCCESS) { 1646 1647 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 1648 "uhci_insert_intr_td: allocate td failed, free resource"); 1649 1650 /* remove all the tds */ 1651 while (tw->tw_hctd_head != NULL) { 1652 uhci_delete_td(uhcip, tw->tw_hctd_head); 1653 } 1654 1655 tw->tw_claim = UHCI_NOT_CLAIMED; 1656 uhci_deallocate_tw(uhcip, pp, tw); 1657 1658 return (error); 1659 } 1660 1661 /* allow HC to xfer the tds of this tw */ 1662 tmp_td = tw->tw_hctd_head; 1663 while (tmp_td != NULL) { 1664 1665 SetTD_status(uhcip, tmp_td, UHCI_TD_ACTIVE); 1666 tmp_td = tmp_td->tw_td_next; 1667 } 1668 1669 tw->tw_bytes_xfered = 0; 1670 tw->tw_claim = UHCI_NOT_CLAIMED; 1671 1672 return (error); 1673 } 1674 1675 1676 /* 1677 * uhci_create_transfer_wrapper: 1678 * Create a Transaction Wrapper (TW). 1679 * This involves the allocating of DMA resources. 1680 */ 1681 static uhci_trans_wrapper_t * 1682 uhci_create_transfer_wrapper( 1683 uhci_state_t *uhcip, 1684 uhci_pipe_private_t *pp, 1685 size_t length, 1686 usb_flags_t usb_flags) 1687 { 1688 int result; 1689 size_t real_length; 1690 uint_t ccount; 1691 uhci_trans_wrapper_t *tw; 1692 ddi_device_acc_attr_t dev_attr; 1693 1694 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1695 "uhci_create_transfer_wrapper: length = 0x%lx flags = 0x%x", 1696 length, usb_flags); 1697 1698 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1699 1700 /* Allocate space for the transfer wrapper */ 1701 if ((tw = kmem_zalloc(sizeof (uhci_trans_wrapper_t), KM_NOSLEEP)) == 1702 NULL) { 1703 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1704 "uhci_create_transfer_wrapper: kmem_alloc failed"); 1705 1706 return (NULL); 1707 } 1708 1709 /* Store the transfer length */ 1710 tw->tw_length = length; 1711 1712 /* Allocate the DMA handle */ 1713 if ((result = ddi_dma_alloc_handle(uhcip->uhci_dip, 1714 &uhcip->uhci_dma_attr, DDI_DMA_DONTWAIT, 0, &tw->tw_dmahandle)) != 1715 DDI_SUCCESS) { 1716 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1717 "uhci_create_transfer_wrapper: Alloc handle failed"); 1718 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 1719 1720 return (NULL); 1721 } 1722 1723 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 1724 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 1725 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1726 1727 /* Allocate the memory */ 1728 if ((result = ddi_dma_mem_alloc(tw->tw_dmahandle, tw->tw_length, 1729 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, 1730 (caddr_t *)&tw->tw_buf, &real_length, &tw->tw_accesshandle)) != 1731 DDI_SUCCESS) { 1732 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1733 "uhci_create_transfer_wrapper: dma_mem_alloc fail"); 1734 ddi_dma_free_handle(&tw->tw_dmahandle); 1735 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 1736 1737 return (NULL); 1738 } 1739 1740 ASSERT(real_length >= length); 1741 1742 /* Bind the handle */ 1743 if ((result = ddi_dma_addr_bind_handle(tw->tw_dmahandle, NULL, 1744 (caddr_t)tw->tw_buf, real_length, DDI_DMA_RDWR|DDI_DMA_CONSISTENT, 1745 DDI_DMA_DONTWAIT, NULL, &tw->tw_cookie, &ccount)) != 1746 DDI_DMA_MAPPED) { 1747 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1748 "uhci_create_transfer_wrapper: Bind handle failed"); 1749 ddi_dma_mem_free(&tw->tw_accesshandle); 1750 ddi_dma_free_handle(&tw->tw_dmahandle); 1751 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 1752 1753 return (NULL); 1754 } 1755 1756 /* The cookie count should be 1 */ 1757 if (ccount != 1) { 1758 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1759 "create_transfer_wrapper: More than 1 cookie"); 1760 result = ddi_dma_unbind_handle(tw->tw_dmahandle); 1761 ASSERT(result == DDI_SUCCESS); 1762 ddi_dma_mem_free(&tw->tw_accesshandle); 1763 ddi_dma_free_handle(&tw->tw_dmahandle); 1764 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 1765 1766 return (NULL); 1767 } 1768 1769 /* 1770 * Only allow one wrapper to be added at a time. Insert the 1771 * new transaction wrapper into the list for this pipe. 1772 */ 1773 if (pp->pp_tw_head == NULL) { 1774 pp->pp_tw_head = tw; 1775 pp->pp_tw_tail = tw; 1776 } else { 1777 pp->pp_tw_tail->tw_next = tw; 1778 pp->pp_tw_tail = tw; 1779 ASSERT(tw->tw_next == NULL); 1780 } 1781 1782 /* Store a back pointer to the pipe private structure */ 1783 tw->tw_pipe_private = pp; 1784 1785 /* Store the transfer type - synchronous or asynchronous */ 1786 tw->tw_flags = usb_flags; 1787 1788 return (tw); 1789 } 1790 1791 1792 /* 1793 * uhci_insert_hc_td: 1794 * Insert a Transfer Descriptor (TD) on an QH. 1795 */ 1796 int 1797 uhci_insert_hc_td( 1798 uhci_state_t *uhcip, 1799 uint32_t buffer_address, 1800 size_t hcgtd_length, 1801 uhci_pipe_private_t *pp, 1802 uhci_trans_wrapper_t *tw, 1803 uchar_t PID, 1804 usb_req_attrs_t attrs) 1805 { 1806 uhci_td_t *td, *current_dummy; 1807 queue_head_t *qh = pp->pp_qh; 1808 1809 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1810 1811 if ((td = uhci_allocate_td_from_pool(uhcip)) == NULL) { 1812 1813 return (USB_NO_RESOURCES); 1814 } 1815 1816 current_dummy = qh->td_tailp; 1817 1818 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 1819 "uhci_insert_hc_td: td %p, attrs = 0x%x", td, attrs); 1820 1821 /* 1822 * Fill in the current dummy td and 1823 * add the new dummy to the end. 1824 */ 1825 uhci_fill_in_td(uhcip, td, current_dummy, buffer_address, 1826 hcgtd_length, pp, PID, attrs); 1827 1828 /* 1829 * Allow HC hardware xfer the td, except interrupt out td. 1830 */ 1831 if ((tw->tw_handle_td != uhci_handle_intr_td) || (PID != PID_OUT)) { 1832 1833 SetTD_status(uhcip, current_dummy, UHCI_TD_ACTIVE); 1834 } 1835 1836 /* Insert this td onto the tw */ 1837 1838 if (tw->tw_hctd_head == NULL) { 1839 ASSERT(tw->tw_hctd_tail == NULL); 1840 tw->tw_hctd_head = current_dummy; 1841 tw->tw_hctd_tail = current_dummy; 1842 } else { 1843 /* Add the td to the end of the list */ 1844 tw->tw_hctd_tail->tw_td_next = current_dummy; 1845 tw->tw_hctd_tail = current_dummy; 1846 } 1847 1848 /* 1849 * Insert the TD on to the QH. When this occurs, 1850 * the Host Controller will see the newly filled in TD 1851 */ 1852 current_dummy->outst_td_next = NULL; 1853 current_dummy->outst_td_prev = uhcip->uhci_outst_tds_tail; 1854 if (uhcip->uhci_outst_tds_head == NULL) { 1855 uhcip->uhci_outst_tds_head = current_dummy; 1856 } else { 1857 uhcip->uhci_outst_tds_tail->outst_td_next = current_dummy; 1858 } 1859 uhcip->uhci_outst_tds_tail = current_dummy; 1860 current_dummy->tw = tw; 1861 1862 return (USB_SUCCESS); 1863 } 1864 1865 1866 /* 1867 * uhci_fill_in_td: 1868 * Fill in the fields of a Transfer Descriptor (TD). 1869 */ 1870 static void 1871 uhci_fill_in_td( 1872 uhci_state_t *uhcip, 1873 uhci_td_t *td, 1874 uhci_td_t *current_dummy, 1875 uint32_t buffer_address, 1876 size_t length, 1877 uhci_pipe_private_t *pp, 1878 uchar_t PID, 1879 usb_req_attrs_t attrs) 1880 { 1881 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 1882 1883 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 1884 "uhci_fill_in_td: attrs = 0x%x", attrs); 1885 1886 /* 1887 * If this is an isochronous TD, just return 1888 */ 1889 if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_ISOCH) { 1890 1891 return; 1892 } 1893 1894 bzero((char *)td, sizeof (uhci_td_t)); /* Clear the TD */ 1895 SetTD32(uhcip, current_dummy->link_ptr, TD_PADDR(td)); 1896 1897 if (attrs & USB_ATTRS_SHORT_XFER_OK) { 1898 SetTD_spd(uhcip, current_dummy, 1); 1899 } 1900 1901 mutex_enter(&ph->p_usba_device->usb_mutex); 1902 if (ph->p_usba_device->usb_port_status == USBA_LOW_SPEED_DEV) { 1903 SetTD_ls(uhcip, current_dummy, LOW_SPEED_DEVICE); 1904 } 1905 1906 SetTD_c_err(uhcip, current_dummy, UHCI_MAX_ERR_COUNT); 1907 SetTD_mlen(uhcip, current_dummy, (length == 0) ? 0x7ff: (length - 1)); 1908 SetTD_dtogg(uhcip, current_dummy, pp->pp_data_toggle); 1909 1910 /* Adjust the data toggle bit */ 1911 ADJ_DATA_TOGGLE(pp); 1912 1913 SetTD_devaddr(uhcip, current_dummy, ph->p_usba_device->usb_addr); 1914 SetTD_endpt(uhcip, current_dummy, 1915 ph->p_ep.bEndpointAddress & END_POINT_ADDRESS_MASK); 1916 SetTD_PID(uhcip, current_dummy, PID); 1917 SetTD32(uhcip, current_dummy->buffer_address, buffer_address); 1918 SetTD_ioc(uhcip, current_dummy, INTERRUPT_ON_COMPLETION); 1919 1920 td->qh_td_prev = current_dummy; 1921 current_dummy->qh_td_prev = NULL; 1922 pp->pp_qh->td_tailp = td; 1923 mutex_exit(&ph->p_usba_device->usb_mutex); 1924 } 1925 1926 1927 /* 1928 * uhci_modify_td_active_bits: 1929 * Sets active bit in all the tds of QH to INACTIVE so that 1930 * the HC stops processing the TD's related to the QH. 1931 */ 1932 void 1933 uhci_modify_td_active_bits( 1934 uhci_state_t *uhcip, 1935 uhci_pipe_private_t *pp) 1936 { 1937 uhci_td_t *td_head; 1938 usb_ep_descr_t *ept = &pp->pp_pipe_handle->p_ep; 1939 uhci_trans_wrapper_t *tw_head = pp->pp_tw_head; 1940 1941 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 1942 "uhci_modify_td_active_bits: tw head %p", (void *)tw_head); 1943 1944 while (tw_head != NULL) { 1945 tw_head->tw_claim = UHCI_MODIFY_TD_BITS_CLAIMED; 1946 td_head = tw_head->tw_hctd_head; 1947 1948 while (td_head) { 1949 if (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_ISOCH) { 1950 SetTD_status(uhcip, td_head, 1951 GetTD_status(uhcip, td_head) & TD_INACTIVE); 1952 } else { 1953 SetTD32(uhcip, td_head->link_ptr, 1954 GetTD32(uhcip, td_head->link_ptr) | 1955 HC_END_OF_LIST); 1956 } 1957 1958 td_head = td_head->tw_td_next; 1959 } 1960 tw_head = tw_head->tw_next; 1961 } 1962 } 1963 1964 1965 /* 1966 * uhci_insert_ctrl_td: 1967 * Create a TD and a data buffer for a control Queue Head. 1968 */ 1969 int 1970 uhci_insert_ctrl_td( 1971 uhci_state_t *uhcip, 1972 usba_pipe_handle_data_t *ph, 1973 usb_ctrl_req_t *ctrl_reqp, 1974 usb_flags_t flags) 1975 { 1976 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 1977 uhci_trans_wrapper_t *tw; 1978 1979 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1980 "uhci_insert_ctrl_td: timeout: 0x%x", ctrl_reqp->ctrl_timeout); 1981 1982 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1983 1984 /* Allocate a transaction wrapper */ 1985 if ((tw = uhci_create_transfer_wrapper(uhcip, pp, 1986 ctrl_reqp->ctrl_wLength + SETUP_SIZE, flags)) == NULL) { 1987 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1988 "uhci_insert_ctrl_td: TW allocation failed"); 1989 1990 return (USB_NO_RESOURCES); 1991 } 1992 1993 pp->pp_data_toggle = 0; 1994 1995 tw->tw_curr_xfer_reqp = (usb_opaque_t)ctrl_reqp; 1996 tw->tw_bytes_xfered = 0; 1997 tw->tw_bytes_pending = ctrl_reqp->ctrl_wLength; 1998 tw->tw_timeout_cnt = max(UHCI_CTRL_TIMEOUT, ctrl_reqp->ctrl_timeout); 1999 2000 /* 2001 * Initialize the callback and any callback 2002 * data for when the td completes. 2003 */ 2004 tw->tw_handle_td = uhci_handle_ctrl_td; 2005 tw->tw_handle_callback_value = NULL; 2006 2007 if ((uhci_create_setup_pkt(uhcip, pp, tw)) != USB_SUCCESS) { 2008 tw->tw_ctrl_state = 0; 2009 2010 /* free the transfer wrapper */ 2011 uhci_deallocate_tw(uhcip, pp, tw); 2012 2013 return (USB_NO_RESOURCES); 2014 } 2015 2016 tw->tw_ctrl_state = SETUP; 2017 2018 return (USB_SUCCESS); 2019 } 2020 2021 2022 /* 2023 * uhci_create_setup_pkt: 2024 * create a setup packet to initiate a control transfer. 2025 * 2026 * OHCI driver has seen the case where devices fail if there is 2027 * more than one control transfer to the device within a frame. 2028 * So, the UHCI ensures that only one TD will be put on the control 2029 * pipe to one device (to be consistent with OHCI driver). 2030 */ 2031 static int 2032 uhci_create_setup_pkt( 2033 uhci_state_t *uhcip, 2034 uhci_pipe_private_t *pp, 2035 uhci_trans_wrapper_t *tw) 2036 { 2037 int sdata; 2038 usb_ctrl_req_t *req = (usb_ctrl_req_t *)tw->tw_curr_xfer_reqp; 2039 2040 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2041 "uhci_create_setup_pkt: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%p", 2042 req->ctrl_bmRequestType, req->ctrl_bRequest, req->ctrl_wValue, 2043 req->ctrl_wIndex, req->ctrl_wLength, (void *)req->ctrl_data); 2044 2045 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2046 ASSERT(tw != NULL); 2047 2048 /* Create the first four bytes of the setup packet */ 2049 sdata = (req->ctrl_bmRequestType | (req->ctrl_bRequest << 8) | 2050 (req->ctrl_wValue << 16)); 2051 ddi_put32(tw->tw_accesshandle, (uint_t *)tw->tw_buf, sdata); 2052 2053 /* Create the second four bytes */ 2054 sdata = (uint32_t)(req->ctrl_wIndex | (req->ctrl_wLength << 16)); 2055 ddi_put32(tw->tw_accesshandle, 2056 (uint_t *)(tw->tw_buf + sizeof (uint_t)), sdata); 2057 2058 /* 2059 * The TD's are placed on the QH one at a time. 2060 * Once this TD is placed on the done list, the 2061 * data or status phase TD will be enqueued. 2062 */ 2063 if ((uhci_insert_hc_td(uhcip, tw->tw_cookie.dmac_address, SETUP_SIZE, 2064 pp, tw, PID_SETUP, req->ctrl_attributes)) != USB_SUCCESS) { 2065 2066 return (USB_NO_RESOURCES); 2067 } 2068 2069 USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2070 "Create_setup: pp = 0x%p, attrs = 0x%x", pp, req->ctrl_attributes); 2071 2072 /* 2073 * If this control transfer has a data phase, record the 2074 * direction. If the data phase is an OUT transaction , 2075 * copy the data into the buffer of the transfer wrapper. 2076 */ 2077 if (req->ctrl_wLength != 0) { 2078 /* There is a data stage. Find the direction */ 2079 if (req->ctrl_bmRequestType & USB_DEV_REQ_DEV_TO_HOST) { 2080 tw->tw_direction = PID_IN; 2081 } else { 2082 tw->tw_direction = PID_OUT; 2083 2084 /* Copy the data into the buffer */ 2085 ddi_rep_put8(tw->tw_accesshandle, 2086 req->ctrl_data->b_rptr, 2087 (uint8_t *)(tw->tw_buf + SETUP_SIZE), 2088 req->ctrl_wLength, 2089 DDI_DEV_AUTOINCR); 2090 } 2091 } 2092 2093 return (USB_SUCCESS); 2094 } 2095 2096 2097 /* 2098 * uhci_create_stats: 2099 * Allocate and initialize the uhci kstat structures 2100 */ 2101 void 2102 uhci_create_stats(uhci_state_t *uhcip) 2103 { 2104 int i; 2105 char kstatname[KSTAT_STRLEN]; 2106 char *usbtypes[USB_N_COUNT_KSTATS] = 2107 {"ctrl", "isoch", "bulk", "intr"}; 2108 uint_t instance = uhcip->uhci_instance; 2109 const char *dname = ddi_driver_name(uhcip->uhci_dip); 2110 uhci_intrs_stats_t *isp; 2111 2112 if (UHCI_INTRS_STATS(uhcip) == NULL) { 2113 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,intrs", 2114 dname, instance); 2115 UHCI_INTRS_STATS(uhcip) = kstat_create("usba", instance, 2116 kstatname, "usb_interrupts", KSTAT_TYPE_NAMED, 2117 sizeof (uhci_intrs_stats_t) / sizeof (kstat_named_t), 2118 KSTAT_FLAG_PERSISTENT); 2119 2120 if (UHCI_INTRS_STATS(uhcip) != NULL) { 2121 isp = UHCI_INTRS_STATS_DATA(uhcip); 2122 kstat_named_init(&isp->uhci_intrs_hc_halted, 2123 "HC Halted", KSTAT_DATA_UINT64); 2124 kstat_named_init(&isp->uhci_intrs_hc_process_err, 2125 "HC Process Errors", KSTAT_DATA_UINT64); 2126 kstat_named_init(&isp->uhci_intrs_host_sys_err, 2127 "Host Sys Errors", KSTAT_DATA_UINT64); 2128 kstat_named_init(&isp->uhci_intrs_resume_detected, 2129 "Resume Detected", KSTAT_DATA_UINT64); 2130 kstat_named_init(&isp->uhci_intrs_usb_err_intr, 2131 "USB Error", KSTAT_DATA_UINT64); 2132 kstat_named_init(&isp->uhci_intrs_usb_intr, 2133 "USB Interrupts", KSTAT_DATA_UINT64); 2134 kstat_named_init(&isp->uhci_intrs_total, 2135 "Total Interrupts", KSTAT_DATA_UINT64); 2136 kstat_named_init(&isp->uhci_intrs_not_claimed, 2137 "Not Claimed", KSTAT_DATA_UINT64); 2138 2139 UHCI_INTRS_STATS(uhcip)->ks_private = uhcip; 2140 UHCI_INTRS_STATS(uhcip)->ks_update = nulldev; 2141 kstat_install(UHCI_INTRS_STATS(uhcip)); 2142 } 2143 } 2144 2145 if (UHCI_TOTAL_STATS(uhcip) == NULL) { 2146 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,total", 2147 dname, instance); 2148 UHCI_TOTAL_STATS(uhcip) = kstat_create("usba", instance, 2149 kstatname, "usb_byte_count", KSTAT_TYPE_IO, 1, 2150 KSTAT_FLAG_PERSISTENT); 2151 2152 if (UHCI_TOTAL_STATS(uhcip) != NULL) { 2153 kstat_install(UHCI_TOTAL_STATS(uhcip)); 2154 } 2155 } 2156 2157 for (i = 0; i < USB_N_COUNT_KSTATS; i++) { 2158 if (uhcip->uhci_count_stats[i] == NULL) { 2159 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,%s", 2160 dname, instance, usbtypes[i]); 2161 uhcip->uhci_count_stats[i] = kstat_create("usba", 2162 instance, kstatname, "usb_byte_count", 2163 KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 2164 2165 if (uhcip->uhci_count_stats[i] != NULL) { 2166 kstat_install(uhcip->uhci_count_stats[i]); 2167 } 2168 } 2169 } 2170 } 2171 2172 2173 /* 2174 * uhci_destroy_stats: 2175 * Clean up uhci kstat structures 2176 */ 2177 void 2178 uhci_destroy_stats(uhci_state_t *uhcip) 2179 { 2180 int i; 2181 2182 if (UHCI_INTRS_STATS(uhcip)) { 2183 kstat_delete(UHCI_INTRS_STATS(uhcip)); 2184 UHCI_INTRS_STATS(uhcip) = NULL; 2185 } 2186 2187 if (UHCI_TOTAL_STATS(uhcip)) { 2188 kstat_delete(UHCI_TOTAL_STATS(uhcip)); 2189 UHCI_TOTAL_STATS(uhcip) = NULL; 2190 } 2191 2192 for (i = 0; i < USB_N_COUNT_KSTATS; i++) { 2193 if (uhcip->uhci_count_stats[i]) { 2194 kstat_delete(uhcip->uhci_count_stats[i]); 2195 uhcip->uhci_count_stats[i] = NULL; 2196 } 2197 } 2198 } 2199 2200 2201 void 2202 uhci_do_intrs_stats(uhci_state_t *uhcip, int val) 2203 { 2204 if (UHCI_INTRS_STATS(uhcip) == NULL) { 2205 2206 return; 2207 } 2208 2209 UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_total.value.ui64++; 2210 switch (val) { 2211 case USBSTS_REG_HC_HALTED: 2212 UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_hc_halted.value.ui64++; 2213 break; 2214 case USBSTS_REG_HC_PROCESS_ERR: 2215 UHCI_INTRS_STATS_DATA(uhcip)-> 2216 uhci_intrs_hc_process_err.value.ui64++; 2217 break; 2218 case USBSTS_REG_HOST_SYS_ERR: 2219 UHCI_INTRS_STATS_DATA(uhcip)-> 2220 uhci_intrs_host_sys_err.value.ui64++; 2221 break; 2222 case USBSTS_REG_RESUME_DETECT: 2223 UHCI_INTRS_STATS_DATA(uhcip)-> 2224 uhci_intrs_resume_detected.value.ui64++; 2225 break; 2226 case USBSTS_REG_USB_ERR_INTR: 2227 UHCI_INTRS_STATS_DATA(uhcip)-> 2228 uhci_intrs_usb_err_intr.value.ui64++; 2229 break; 2230 case USBSTS_REG_USB_INTR: 2231 UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_usb_intr.value.ui64++; 2232 break; 2233 default: 2234 UHCI_INTRS_STATS_DATA(uhcip)-> 2235 uhci_intrs_not_claimed.value.ui64++; 2236 break; 2237 } 2238 } 2239 2240 2241 void 2242 uhci_do_byte_stats(uhci_state_t *uhcip, size_t len, uint8_t attr, uint8_t addr) 2243 { 2244 uint8_t type = attr & USB_EP_ATTR_MASK; 2245 uint8_t dir = addr & USB_EP_DIR_MASK; 2246 2247 switch (dir) { 2248 case USB_EP_DIR_IN: 2249 UHCI_TOTAL_STATS_DATA(uhcip)->reads++; 2250 UHCI_TOTAL_STATS_DATA(uhcip)->nread += len; 2251 switch (type) { 2252 case USB_EP_ATTR_CONTROL: 2253 UHCI_CTRL_STATS(uhcip)->reads++; 2254 UHCI_CTRL_STATS(uhcip)->nread += len; 2255 break; 2256 case USB_EP_ATTR_BULK: 2257 UHCI_BULK_STATS(uhcip)->reads++; 2258 UHCI_BULK_STATS(uhcip)->nread += len; 2259 break; 2260 case USB_EP_ATTR_INTR: 2261 UHCI_INTR_STATS(uhcip)->reads++; 2262 UHCI_INTR_STATS(uhcip)->nread += len; 2263 break; 2264 case USB_EP_ATTR_ISOCH: 2265 UHCI_ISOC_STATS(uhcip)->reads++; 2266 UHCI_ISOC_STATS(uhcip)->nread += len; 2267 break; 2268 } 2269 break; 2270 case USB_EP_DIR_OUT: 2271 UHCI_TOTAL_STATS_DATA(uhcip)->writes++; 2272 UHCI_TOTAL_STATS_DATA(uhcip)->nwritten += len; 2273 switch (type) { 2274 case USB_EP_ATTR_CONTROL: 2275 UHCI_CTRL_STATS(uhcip)->writes++; 2276 UHCI_CTRL_STATS(uhcip)->nwritten += len; 2277 break; 2278 case USB_EP_ATTR_BULK: 2279 UHCI_BULK_STATS(uhcip)->writes++; 2280 UHCI_BULK_STATS(uhcip)->nwritten += len; 2281 break; 2282 case USB_EP_ATTR_INTR: 2283 UHCI_INTR_STATS(uhcip)->writes++; 2284 UHCI_INTR_STATS(uhcip)->nwritten += len; 2285 break; 2286 case USB_EP_ATTR_ISOCH: 2287 UHCI_ISOC_STATS(uhcip)->writes++; 2288 UHCI_ISOC_STATS(uhcip)->nwritten += len; 2289 break; 2290 } 2291 break; 2292 } 2293 } 2294 2295 2296 /* 2297 * uhci_free_tw: 2298 * Free the Transfer Wrapper (TW). 2299 */ 2300 void 2301 uhci_free_tw(uhci_state_t *uhcip, uhci_trans_wrapper_t *tw) 2302 { 2303 int rval; 2304 2305 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, "uhci_free_tw:"); 2306 2307 ASSERT(tw != NULL); 2308 2309 rval = ddi_dma_unbind_handle(tw->tw_dmahandle); 2310 ASSERT(rval == DDI_SUCCESS); 2311 2312 ddi_dma_mem_free(&tw->tw_accesshandle); 2313 ddi_dma_free_handle(&tw->tw_dmahandle); 2314 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 2315 } 2316 2317 2318 /* 2319 * uhci_deallocate_tw: 2320 * Deallocate of a Transaction Wrapper (TW) and this involves 2321 * the freeing of DMA resources. 2322 */ 2323 void 2324 uhci_deallocate_tw(uhci_state_t *uhcip, 2325 uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw) 2326 { 2327 uhci_trans_wrapper_t *head; 2328 2329 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2330 "uhci_deallocate_tw:"); 2331 2332 /* 2333 * If the transfer wrapper has no Host Controller (HC) 2334 * Transfer Descriptors (TD) associated with it, then 2335 * remove the transfer wrapper. The transfers are done 2336 * in FIFO order, so this should be the first transfer 2337 * wrapper on the list. 2338 */ 2339 if (tw->tw_hctd_head != NULL) { 2340 ASSERT(tw->tw_hctd_tail != NULL); 2341 2342 return; 2343 } 2344 2345 ASSERT(tw->tw_hctd_tail == NULL); 2346 ASSERT(pp->pp_tw_head != NULL); 2347 2348 /* 2349 * If pp->pp_tw_head is NULL, set the tail also to NULL. 2350 */ 2351 head = pp->pp_tw_head; 2352 2353 if (head == tw) { 2354 pp->pp_tw_head = head->tw_next; 2355 if (pp->pp_tw_head == NULL) { 2356 pp->pp_tw_tail = NULL; 2357 } 2358 } else { 2359 while (head->tw_next != tw) 2360 head = head->tw_next; 2361 head->tw_next = tw->tw_next; 2362 if (tw->tw_next == NULL) { 2363 pp->pp_tw_tail = head; 2364 } 2365 } 2366 uhci_free_tw(uhcip, tw); 2367 } 2368 2369 2370 void 2371 uhci_delete_td(uhci_state_t *uhcip, uhci_td_t *td) 2372 { 2373 uhci_td_t *tmp_td; 2374 uhci_trans_wrapper_t *tw = td->tw; 2375 2376 if ((td->outst_td_next == NULL) && (td->outst_td_prev == NULL)) { 2377 uhcip->uhci_outst_tds_head = NULL; 2378 uhcip->uhci_outst_tds_tail = NULL; 2379 } else if (td->outst_td_next == NULL) { 2380 td->outst_td_prev->outst_td_next = NULL; 2381 uhcip->uhci_outst_tds_tail = td->outst_td_prev; 2382 } else if (td->outst_td_prev == NULL) { 2383 td->outst_td_next->outst_td_prev = NULL; 2384 uhcip->uhci_outst_tds_head = td->outst_td_next; 2385 } else { 2386 td->outst_td_prev->outst_td_next = td->outst_td_next; 2387 td->outst_td_next->outst_td_prev = td->outst_td_prev; 2388 } 2389 2390 tmp_td = tw->tw_hctd_head; 2391 2392 if (tmp_td != td) { 2393 while (tmp_td->tw_td_next != td) { 2394 tmp_td = tmp_td->tw_td_next; 2395 } 2396 ASSERT(tmp_td); 2397 tmp_td->tw_td_next = td->tw_td_next; 2398 if (td->tw_td_next == NULL) { 2399 tw->tw_hctd_tail = tmp_td; 2400 } 2401 } else { 2402 tw->tw_hctd_head = tw->tw_hctd_head->tw_td_next; 2403 if (tw->tw_hctd_head == NULL) { 2404 tw->tw_hctd_tail = NULL; 2405 } 2406 } 2407 2408 td->flag = TD_FLAG_FREE; 2409 } 2410 2411 2412 void 2413 uhci_remove_tds_tws( 2414 uhci_state_t *uhcip, 2415 usba_pipe_handle_data_t *ph) 2416 { 2417 usb_opaque_t curr_reqp; 2418 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 2419 usb_ep_descr_t *ept = &pp->pp_pipe_handle->p_ep; 2420 uhci_trans_wrapper_t *tw_tmp; 2421 uhci_trans_wrapper_t *tw_head = pp->pp_tw_head; 2422 2423 while (tw_head != NULL) { 2424 tw_tmp = tw_head; 2425 tw_head = tw_head->tw_next; 2426 2427 curr_reqp = tw_tmp->tw_curr_xfer_reqp; 2428 if (curr_reqp) { 2429 /* do this for control/bulk/intr */ 2430 if ((tw_tmp->tw_direction == PID_IN) && 2431 (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_INTR)) { 2432 uhci_deallocate_periodic_in_resource(uhcip, 2433 pp, tw_tmp); 2434 } else { 2435 uhci_hcdi_callback(uhcip, pp, 2436 pp->pp_pipe_handle, tw_tmp, USB_CR_FLUSHED); 2437 } 2438 } /* end of curr_reqp */ 2439 2440 if (tw_tmp->tw_claim != UHCI_MODIFY_TD_BITS_CLAIMED) { 2441 continue; 2442 } 2443 2444 while (tw_tmp->tw_hctd_head != NULL) { 2445 uhci_delete_td(uhcip, tw_tmp->tw_hctd_head); 2446 } 2447 2448 uhci_deallocate_tw(uhcip, pp, tw_tmp); 2449 } 2450 } 2451 2452 2453 /* 2454 * uhci_remove_qh: 2455 * Remove the Queue Head from the Host Controller's 2456 * appropriate QH list. 2457 */ 2458 void 2459 uhci_remove_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 2460 { 2461 uhci_td_t *dummy_td; 2462 2463 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2464 2465 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2466 "uhci_remove_qh:"); 2467 2468 dummy_td = pp->pp_qh->td_tailp; 2469 dummy_td->flag = TD_FLAG_FREE; 2470 2471 switch (UHCI_XFER_TYPE(&pp->pp_pipe_handle->p_ep)) { 2472 case USB_EP_ATTR_CONTROL: 2473 uhci_remove_ctrl_qh(uhcip, pp); 2474 break; 2475 case USB_EP_ATTR_BULK: 2476 uhci_remove_bulk_qh(uhcip, pp); 2477 break; 2478 case USB_EP_ATTR_INTR: 2479 uhci_remove_intr_qh(uhcip, pp); 2480 break; 2481 } 2482 } 2483 2484 2485 static void 2486 uhci_remove_intr_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 2487 { 2488 queue_head_t *qh = pp->pp_qh; 2489 queue_head_t *next_lattice_qh = 2490 QH_VADDR(GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK); 2491 2492 qh->prev_qh->link_ptr = qh->link_ptr; 2493 next_lattice_qh->prev_qh = qh->prev_qh; 2494 qh->qh_flag = QUEUE_HEAD_FLAG_FREE; 2495 2496 } 2497 2498 /* 2499 * uhci_remove_bulk_qh: 2500 * Remove a bulk QH from the Host Controller's QH list. There may be a 2501 * loop for bulk QHs, we must care about this while removing a bulk QH. 2502 */ 2503 static void 2504 uhci_remove_bulk_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 2505 { 2506 queue_head_t *qh = pp->pp_qh; 2507 queue_head_t *next_lattice_qh; 2508 uint32_t paddr; 2509 2510 paddr = (GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK); 2511 next_lattice_qh = (qh == uhcip->uhci_bulk_xfers_q_tail) ? 2512 0 : QH_VADDR(paddr); 2513 2514 if ((qh == uhcip->uhci_bulk_xfers_q_tail) && 2515 (qh->prev_qh == uhcip->uhci_bulk_xfers_q_head)) { 2516 SetQH32(uhcip, qh->prev_qh->link_ptr, HC_END_OF_LIST); 2517 } else { 2518 qh->prev_qh->link_ptr = qh->link_ptr; 2519 } 2520 2521 if (next_lattice_qh == NULL) { 2522 uhcip->uhci_bulk_xfers_q_tail = qh->prev_qh; 2523 } else { 2524 next_lattice_qh->prev_qh = qh->prev_qh; 2525 } 2526 2527 qh->qh_flag = QUEUE_HEAD_FLAG_FREE; 2528 2529 } 2530 2531 2532 static void 2533 uhci_remove_ctrl_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 2534 { 2535 queue_head_t *qh = pp->pp_qh; 2536 queue_head_t *next_lattice_qh = 2537 QH_VADDR(GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK); 2538 2539 qh->prev_qh->link_ptr = qh->link_ptr; 2540 if (next_lattice_qh->prev_qh != NULL) { 2541 next_lattice_qh->prev_qh = qh->prev_qh; 2542 } else { 2543 uhcip->uhci_ctrl_xfers_q_tail = qh->prev_qh; 2544 } 2545 2546 qh->qh_flag = QUEUE_HEAD_FLAG_FREE; 2547 } 2548 2549 2550 /* 2551 * uhci_allocate_td_from_pool: 2552 * Allocate a Transfer Descriptor (TD) from the TD buffer pool. 2553 */ 2554 static uhci_td_t * 2555 uhci_allocate_td_from_pool(uhci_state_t *uhcip) 2556 { 2557 int index; 2558 uhci_td_t *td; 2559 2560 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2561 2562 /* 2563 * Search for a blank Transfer Descriptor (TD) 2564 * in the TD buffer pool. 2565 */ 2566 for (index = 0; index < uhci_td_pool_size; index ++) { 2567 if (uhcip->uhci_td_pool_addr[index].flag == TD_FLAG_FREE) { 2568 break; 2569 } 2570 } 2571 2572 if (index == uhci_td_pool_size) { 2573 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2574 "uhci_allocate_td_from_pool: TD exhausted"); 2575 2576 return (NULL); 2577 } 2578 2579 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2580 "uhci_allocate_td_from_pool: Allocated %d", index); 2581 2582 /* Create a new dummy for the end of the TD list */ 2583 td = &uhcip->uhci_td_pool_addr[index]; 2584 2585 /* Mark the newly allocated TD as a dummy */ 2586 td->flag = TD_FLAG_DUMMY; 2587 td->qh_td_prev = NULL; 2588 2589 return (td); 2590 } 2591 2592 2593 /* 2594 * uhci_insert_bulk_td: 2595 */ 2596 int 2597 uhci_insert_bulk_td( 2598 uhci_state_t *uhcip, 2599 usba_pipe_handle_data_t *ph, 2600 usb_bulk_req_t *req, 2601 usb_flags_t flags) 2602 { 2603 size_t length; 2604 uint_t mps; /* MaxPacketSize */ 2605 uint_t num_bulk_tds, i; 2606 uint32_t buf_addr; 2607 uhci_td_t *bulk_td_ptr; 2608 uhci_td_t *current_dummy; 2609 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 2610 uhci_trans_wrapper_t *tw; 2611 uhci_bulk_isoc_xfer_t *bulk_xfer_info; 2612 2613 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2614 "uhci_insert_bulk_td: req: 0x%p, flags = 0x%x", req, flags); 2615 2616 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2617 2618 /* 2619 * Create transfer wrapper 2620 */ 2621 if ((tw = uhci_create_transfer_wrapper(uhcip, pp, req->bulk_len, 2622 flags)) == NULL) { 2623 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2624 "uhci_insert_bulk_td: TW allocation failed"); 2625 2626 return (USB_NO_RESOURCES); 2627 } 2628 2629 tw->tw_bytes_xfered = 0; 2630 tw->tw_bytes_pending = req->bulk_len; 2631 tw->tw_handle_td = uhci_handle_bulk_td; 2632 tw->tw_handle_callback_value = (usb_opaque_t)req->bulk_data; 2633 tw->tw_timeout_cnt = req->bulk_timeout; 2634 tw->tw_data = req->bulk_data; 2635 tw->tw_curr_xfer_reqp = (usb_opaque_t)req; 2636 2637 /* Get the bulk pipe direction */ 2638 tw->tw_direction = (UHCI_XFER_DIR(&ph->p_ep) == USB_EP_DIR_OUT) ? 2639 PID_OUT : PID_IN; 2640 2641 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2642 "uhci_insert_bulk_td: direction: 0x%x", tw->tw_direction); 2643 2644 /* If the DATA OUT, copy the data into transfer buffer. */ 2645 if (tw->tw_direction == PID_OUT) { 2646 ASSERT(req->bulk_data != NULL); 2647 2648 /* Copy the data into the message */ 2649 ddi_rep_put8(tw->tw_accesshandle, req->bulk_data->b_rptr, 2650 (uint8_t *)tw->tw_buf, req->bulk_len, DDI_DEV_AUTOINCR); 2651 } 2652 2653 /* Get the max packet size. */ 2654 length = mps = pp->pp_pipe_handle->p_ep.wMaxPacketSize; 2655 2656 /* 2657 * Calculate number of TD's to insert in the current frame interval. 2658 * Max number TD's allowed (driver implementation) is 128 2659 * in one frame interval. Once all the TD's are completed 2660 * then the remaining TD's will be inserted into the lattice 2661 * in the uhci_handle_bulk_td(). 2662 */ 2663 if ((tw->tw_bytes_pending / mps) >= MAX_NUM_BULK_TDS_PER_XFER) { 2664 num_bulk_tds = MAX_NUM_BULK_TDS_PER_XFER; 2665 } else { 2666 num_bulk_tds = (tw->tw_bytes_pending / mps); 2667 2668 if (tw->tw_bytes_pending % mps) { 2669 num_bulk_tds++; 2670 length = (tw->tw_bytes_pending % mps); 2671 } 2672 } 2673 2674 /* 2675 * Allocate memory for the bulk xfer information structure 2676 */ 2677 if ((bulk_xfer_info = kmem_zalloc( 2678 sizeof (uhci_bulk_isoc_xfer_t), KM_NOSLEEP)) == NULL) { 2679 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2680 "uhci_insert_bulk_td: kmem_zalloc failed"); 2681 2682 /* Free the transfer wrapper */ 2683 uhci_deallocate_tw(uhcip, pp, tw); 2684 2685 return (USB_FAILURE); 2686 } 2687 2688 /* Allocate memory for the bulk TD's */ 2689 if (uhci_alloc_memory_for_tds(uhcip, num_bulk_tds, bulk_xfer_info) != 2690 USB_SUCCESS) { 2691 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2692 "uhci_insert_bulk_td: alloc_memory_for_tds failed"); 2693 2694 kmem_free(bulk_xfer_info, sizeof (uhci_bulk_isoc_xfer_t)); 2695 2696 /* Free the transfer wrapper */ 2697 uhci_deallocate_tw(uhcip, pp, tw); 2698 2699 return (USB_FAILURE); 2700 } 2701 2702 bulk_td_ptr = (uhci_td_t *)bulk_xfer_info->pool_addr; 2703 bulk_td_ptr[0].qh_td_prev = NULL; 2704 current_dummy = pp->pp_qh->td_tailp; 2705 buf_addr = tw->tw_cookie.dmac_address; 2706 pp->pp_qh->bulk_xfer_info = bulk_xfer_info; 2707 2708 /* Fill up all the bulk TD's */ 2709 for (i = 0; i < (num_bulk_tds - 1); i++) { 2710 uhci_fill_in_bulk_isoc_td(uhcip, &bulk_td_ptr[i], 2711 &bulk_td_ptr[i+1], BULKTD_PADDR(bulk_xfer_info, 2712 &bulk_td_ptr[i+1]), ph, buf_addr, mps, tw); 2713 buf_addr += mps; 2714 } 2715 2716 uhci_fill_in_bulk_isoc_td(uhcip, &bulk_td_ptr[i], current_dummy, 2717 TD_PADDR(current_dummy), ph, buf_addr, length, tw); 2718 bulk_xfer_info->num_tds = num_bulk_tds; 2719 2720 /* 2721 * Point the end of the lattice tree to the start of the bulk xfers 2722 * queue head. This allows the HC to execute the same Queue Head/TD 2723 * in the same frame. There are some bulk devices, which NAKs after 2724 * completing each TD. As a result, the performance on such devices 2725 * is very bad. This loop will provide a chance to execute NAk'ed 2726 * bulk TDs again in the same frame. 2727 */ 2728 if (uhcip->uhci_pending_bulk_cmds++ == 0) { 2729 uhcip->uhci_bulk_xfers_q_tail->link_ptr = 2730 uhcip->uhci_bulk_xfers_q_head->link_ptr; 2731 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 2732 "uhci_insert_bulk_td: count = %d no tds %d", 2733 uhcip->uhci_pending_bulk_cmds, num_bulk_tds); 2734 } 2735 2736 /* Insert on the bulk queue head for the execution by HC */ 2737 SetQH32(uhcip, pp->pp_qh->element_ptr, 2738 bulk_xfer_info->cookie.dmac_address); 2739 2740 return (USB_SUCCESS); 2741 } 2742 2743 2744 /* 2745 * uhci_fill_in_bulk_isoc_td 2746 * Fills the bulk TD 2747 */ 2748 void 2749 uhci_fill_in_bulk_isoc_td(uhci_state_t *uhcip, uhci_td_t *current_td, 2750 uhci_td_t *next_td, 2751 uint32_t next_td_paddr, 2752 usba_pipe_handle_data_t *ph, 2753 uint_t buffer_address, 2754 uint_t length, 2755 uhci_trans_wrapper_t *tw) 2756 { 2757 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 2758 usb_ep_descr_t *ept = &pp->pp_pipe_handle->p_ep; 2759 2760 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2761 "uhci_fill_in_bulk_isoc_td: tw = 0x%p", tw); 2762 2763 bzero((char *)current_td, sizeof (uhci_td_t)); 2764 SetTD32(uhcip, current_td->link_ptr, next_td_paddr | HC_DEPTH_FIRST); 2765 2766 switch (UHCI_XFER_TYPE(ept)) { 2767 case USB_EP_ATTR_ISOCH: 2768 if (((usb_isoc_req_t *)tw->tw_curr_xfer_reqp)->isoc_attributes 2769 & USB_ATTRS_SHORT_XFER_OK) { 2770 SetTD_spd(uhcip, current_td, 1); 2771 } 2772 break; 2773 case USB_EP_ATTR_BULK: 2774 if (((usb_bulk_req_t *)tw->tw_curr_xfer_reqp)->bulk_attributes 2775 & USB_ATTRS_SHORT_XFER_OK) { 2776 SetTD_spd(uhcip, current_td, 1); 2777 } 2778 break; 2779 } 2780 2781 mutex_enter(&ph->p_usba_device->usb_mutex); 2782 2783 SetTD_c_err(uhcip, current_td, UHCI_MAX_ERR_COUNT); 2784 SetTD_status(uhcip, current_td, UHCI_TD_ACTIVE); 2785 SetTD_ioc(uhcip, current_td, INTERRUPT_ON_COMPLETION); 2786 SetTD_mlen(uhcip, current_td, (length - 1)); 2787 SetTD_dtogg(uhcip, current_td, pp->pp_data_toggle); 2788 SetTD_devaddr(uhcip, current_td, ph->p_usba_device->usb_addr); 2789 SetTD_endpt(uhcip, current_td, ph->p_ep.bEndpointAddress & 2790 END_POINT_ADDRESS_MASK); 2791 SetTD_PID(uhcip, current_td, tw->tw_direction); 2792 SetTD32(uhcip, current_td->buffer_address, buffer_address); 2793 2794 /* 2795 * Adjust the data toggle. 2796 * The data toggle bit must always be 0 for isoc transfers. 2797 * And set the "iso" bit in the TD for isoc transfers. 2798 */ 2799 if (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_ISOCH) { 2800 pp->pp_data_toggle = 0; 2801 SetTD_iso(uhcip, current_td, 1); 2802 } else { 2803 ADJ_DATA_TOGGLE(pp); 2804 next_td->qh_td_prev = current_td; 2805 pp->pp_qh->td_tailp = next_td; 2806 } 2807 2808 current_td->outst_td_next = NULL; 2809 current_td->outst_td_prev = uhcip->uhci_outst_tds_tail; 2810 if (uhcip->uhci_outst_tds_head == NULL) { 2811 uhcip->uhci_outst_tds_head = current_td; 2812 } else { 2813 uhcip->uhci_outst_tds_tail->outst_td_next = current_td; 2814 } 2815 uhcip->uhci_outst_tds_tail = current_td; 2816 current_td->tw = tw; 2817 2818 if (tw->tw_hctd_head == NULL) { 2819 ASSERT(tw->tw_hctd_tail == NULL); 2820 tw->tw_hctd_head = current_td; 2821 tw->tw_hctd_tail = current_td; 2822 } else { 2823 /* Add the td to the end of the list */ 2824 tw->tw_hctd_tail->tw_td_next = current_td; 2825 tw->tw_hctd_tail = current_td; 2826 } 2827 2828 mutex_exit(&ph->p_usba_device->usb_mutex); 2829 } 2830 2831 2832 /* 2833 * uhci_alloc_memory_for_tds: 2834 * - Allocates memory for the isoc/bulk td pools. 2835 */ 2836 static int 2837 uhci_alloc_memory_for_tds( 2838 uhci_state_t *uhcip, 2839 uint_t num_tds, 2840 uhci_bulk_isoc_xfer_t *info) 2841 { 2842 int result; 2843 size_t real_length; 2844 uint_t ccount; 2845 ddi_device_acc_attr_t dev_attr; 2846 2847 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 2848 "uhci_alloc_memory_for_tds: num_tds: 0x%x info: 0x%p", 2849 num_tds, info); 2850 2851 /* The host controller will be little endian */ 2852 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 2853 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 2854 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 2855 2856 /* Allocate the bulk TD pool DMA handle */ 2857 if (ddi_dma_alloc_handle(uhcip->uhci_dip, &uhcip->uhci_dma_attr, 2858 DDI_DMA_SLEEP, 0, &info->dma_handle) != DDI_SUCCESS) { 2859 2860 return (USB_FAILURE); 2861 } 2862 2863 /* Allocate the memory for the bulk TD pool */ 2864 if (ddi_dma_mem_alloc(info->dma_handle, 2865 num_tds * sizeof (uhci_td_t), 2866 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0, 2867 &info->pool_addr, &real_length, &info->mem_handle)) { 2868 2869 return (USB_FAILURE); 2870 } 2871 2872 /* Map the bulk TD pool into the I/O address space */ 2873 result = ddi_dma_addr_bind_handle(info->dma_handle, NULL, 2874 (caddr_t)info->pool_addr, real_length, 2875 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 2876 &info->cookie, &ccount); 2877 2878 /* Process the result */ 2879 if (result == DDI_DMA_MAPPED) { 2880 /* The cookie count should be 1 */ 2881 if (ccount != 1) { 2882 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 2883 "uhci_allocate_pools: More than 1 cookie"); 2884 2885 return (USB_FAILURE); 2886 } 2887 } else { 2888 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 2889 "uhci_allocate_pools: Result = %d", result); 2890 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result); 2891 2892 return (USB_FAILURE); 2893 } 2894 2895 bzero((void *)info->pool_addr, num_tds * sizeof (uhci_td_t)); 2896 2897 return (USB_SUCCESS); 2898 } 2899 2900 2901 /* 2902 * uhci_handle_bulk_td: 2903 * 2904 * Handles the completed bulk transfer descriptors 2905 */ 2906 void 2907 uhci_handle_bulk_td(uhci_state_t *uhcip, uhci_td_t *td) 2908 { 2909 uint_t num_bulk_tds, i; 2910 usb_cr_t error; 2911 uint_t length, bytes_xfered; 2912 ushort_t MaxPacketSize; 2913 uint32_t buf_addr, paddr; 2914 uhci_td_t *bulk_td_ptr, *current_dummy, *td_head; 2915 queue_head_t *qh, *next_qh; 2916 uhci_trans_wrapper_t *tw = td->tw; 2917 uhci_pipe_private_t *pp = tw->tw_pipe_private; 2918 uhci_bulk_isoc_xfer_t *bulk_xfer_info; 2919 usba_pipe_handle_data_t *ph; 2920 2921 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 2922 "uhci_handle_bulk_td: td = 0x%p tw = 0x%p", td, tw); 2923 2924 /* 2925 * Update the tw_bytes_pending, and tw_bytes_xfered 2926 */ 2927 bytes_xfered = ZERO_LENGTH; 2928 2929 /* 2930 * Check whether there are any errors occurred in the xfer. 2931 * If so, update the data_toggle for the queue head and 2932 * return error to the upper layer. 2933 */ 2934 if (GetTD_status(uhcip, td) & TD_STATUS_MASK) { 2935 uhci_handle_bulk_td_errors(uhcip, td); 2936 2937 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2938 "uhci_handle_bulk_td: error; data toggle: 0x%x", 2939 pp->pp_data_toggle); 2940 2941 return; 2942 } 2943 2944 /* 2945 * Update the tw_bytes_pending, and tw_bytes_xfered 2946 */ 2947 bytes_xfered = GetTD_alen(uhcip, td); 2948 if (bytes_xfered != ZERO_LENGTH) { 2949 tw->tw_bytes_pending -= (bytes_xfered + 1); 2950 tw->tw_bytes_xfered += (bytes_xfered + 1); 2951 } 2952 2953 /* 2954 * Get Bulk pipe information and pipe handle 2955 */ 2956 bulk_xfer_info = pp->pp_qh->bulk_xfer_info; 2957 ph = tw->tw_pipe_private->pp_pipe_handle; 2958 2959 /* 2960 * Check whether data underrun occurred. 2961 * If so, complete the transfer 2962 * Update the data toggle bit 2963 */ 2964 if (bytes_xfered != GetTD_mlen(uhcip, td)) { 2965 bulk_xfer_info->num_tds = 1; 2966 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 2967 "uhci_handle_bulk_td: Data underrun occured"); 2968 2969 pp->pp_data_toggle = GetTD_dtogg(uhcip, td) == 0 ? 1 : 0; 2970 } 2971 2972 /* 2973 * If the TD's in the current frame are completed, then check 2974 * whether we have any more bytes to xfer. If so, insert TD's. 2975 * If no more bytes needs to be transferred, then do callback to the 2976 * upper layer. 2977 * If the TD's in the current frame are not completed, then 2978 * just delete the TD from the linked lists. 2979 */ 2980 USB_DPRINTF_L3(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2981 "uhci_handle_bulk_td: completed TD data toggle: 0x%x", 2982 GetTD_dtogg(uhcip, td)); 2983 2984 if (--bulk_xfer_info->num_tds == 0) { 2985 uhci_delete_td(uhcip, td); 2986 2987 if ((tw->tw_bytes_pending) && 2988 (GetTD_mlen(uhcip, td) - GetTD_alen(uhcip, td) == 0)) { 2989 2990 MaxPacketSize = pp->pp_pipe_handle->p_ep.wMaxPacketSize; 2991 length = MaxPacketSize; 2992 2993 qh = pp->pp_qh; 2994 paddr = GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK; 2995 if (GetQH32(uhcip, qh->link_ptr) != 2996 GetQH32(uhcip, 2997 uhcip->uhci_bulk_xfers_q_head->link_ptr)) { 2998 next_qh = QH_VADDR(paddr); 2999 SetQH32(uhcip, qh->prev_qh->link_ptr, 3000 paddr|(0x2)); 3001 next_qh->prev_qh = qh->prev_qh; 3002 SetQH32(uhcip, qh->link_ptr, 3003 GetQH32(uhcip, 3004 uhcip->uhci_bulk_xfers_q_head->link_ptr)); 3005 qh->prev_qh = uhcip->uhci_bulk_xfers_q_tail; 3006 SetQH32(uhcip, 3007 uhcip->uhci_bulk_xfers_q_tail->link_ptr, 3008 QH_PADDR(qh) | 0x2); 3009 uhcip->uhci_bulk_xfers_q_tail = qh; 3010 } 3011 3012 if ((tw->tw_bytes_pending / MaxPacketSize) >= 3013 MAX_NUM_BULK_TDS_PER_XFER) { 3014 num_bulk_tds = MAX_NUM_BULK_TDS_PER_XFER; 3015 } else { 3016 num_bulk_tds = 3017 (tw->tw_bytes_pending / MaxPacketSize); 3018 if (tw->tw_bytes_pending % MaxPacketSize) { 3019 num_bulk_tds++; 3020 length = (tw->tw_bytes_pending % 3021 MaxPacketSize); 3022 } 3023 } 3024 3025 current_dummy = pp->pp_qh->td_tailp; 3026 bulk_td_ptr = (uhci_td_t *)bulk_xfer_info->pool_addr; 3027 buf_addr = tw->tw_cookie.dmac_address + 3028 tw->tw_bytes_xfered; 3029 for (i = 0; i < (num_bulk_tds - 1); i++) { 3030 uhci_fill_in_bulk_isoc_td(uhcip, 3031 &bulk_td_ptr[i], &bulk_td_ptr[i + 1], 3032 BULKTD_PADDR(bulk_xfer_info, 3033 &bulk_td_ptr[i+1]), ph, buf_addr, 3034 MaxPacketSize, tw); 3035 3036 buf_addr += MaxPacketSize; 3037 } 3038 3039 uhci_fill_in_bulk_isoc_td(uhcip, &bulk_td_ptr[i], 3040 current_dummy, TD_PADDR(current_dummy), ph, 3041 buf_addr, length, tw); 3042 3043 pp->pp_qh->bulk_xfer_info = bulk_xfer_info; 3044 bulk_xfer_info->num_tds = num_bulk_tds; 3045 SetQH32(uhcip, pp->pp_qh->element_ptr, 3046 bulk_xfer_info->cookie.dmac_address); 3047 } else { 3048 usba_pipe_handle_data_t *usb_pp = pp->pp_pipe_handle; 3049 3050 pp->pp_qh->bulk_xfer_info = NULL; 3051 3052 if (tw->tw_bytes_pending) { 3053 /* Update the element pointer */ 3054 SetQH32(uhcip, pp->pp_qh->element_ptr, 3055 TD_PADDR(pp->pp_qh->td_tailp)); 3056 3057 /* Remove all the tds */ 3058 td_head = tw->tw_hctd_head; 3059 while (td_head != NULL) { 3060 uhci_delete_td(uhcip, td_head); 3061 td_head = tw->tw_hctd_head; 3062 } 3063 } 3064 3065 if (tw->tw_direction == PID_IN) { 3066 usb_req_attrs_t attrs = ((usb_bulk_req_t *) 3067 tw->tw_curr_xfer_reqp)->bulk_attributes; 3068 3069 error = USB_CR_OK; 3070 3071 /* Data run occurred */ 3072 if (tw->tw_bytes_pending && 3073 (!(attrs & USB_ATTRS_SHORT_XFER_OK))) { 3074 error = USB_CR_DATA_UNDERRUN; 3075 } 3076 3077 uhci_sendup_td_message(uhcip, error, tw); 3078 } else { 3079 uhci_do_byte_stats(uhcip, tw->tw_length, 3080 usb_pp->p_ep.bmAttributes, 3081 usb_pp->p_ep.bEndpointAddress); 3082 3083 /* Data underrun occurred */ 3084 if (tw->tw_bytes_pending) { 3085 3086 tw->tw_data->b_rptr += 3087 tw->tw_bytes_xfered; 3088 3089 USB_DPRINTF_L2(PRINT_MASK_ATTA, 3090 uhcip->uhci_log_hdl, 3091 "uhci_handle_bulk_td: " 3092 "data underrun occurred"); 3093 3094 uhci_hcdi_callback(uhcip, pp, 3095 tw->tw_pipe_private->pp_pipe_handle, 3096 tw, USB_CR_DATA_UNDERRUN); 3097 } else { 3098 uhci_hcdi_callback(uhcip, pp, 3099 tw->tw_pipe_private->pp_pipe_handle, 3100 tw, USB_CR_OK); 3101 } 3102 } /* direction */ 3103 3104 /* Deallocate DMA memory */ 3105 uhci_deallocate_tw(uhcip, pp, tw); 3106 (void) ddi_dma_unbind_handle( 3107 bulk_xfer_info->dma_handle); 3108 ddi_dma_mem_free(&bulk_xfer_info->mem_handle); 3109 ddi_dma_free_handle(&bulk_xfer_info->dma_handle); 3110 kmem_free(bulk_xfer_info, 3111 sizeof (uhci_bulk_isoc_xfer_t)); 3112 3113 /* 3114 * When there are no pending bulk commands, point the 3115 * end of the lattice tree to NULL. This will make sure 3116 * that the HC control does not loop anymore and PCI 3117 * bus is not affected. 3118 */ 3119 if (--uhcip->uhci_pending_bulk_cmds == 0) { 3120 uhcip->uhci_bulk_xfers_q_tail->link_ptr = 3121 HC_END_OF_LIST; 3122 USB_DPRINTF_L3(PRINT_MASK_ATTA, 3123 uhcip->uhci_log_hdl, 3124 "uhci_handle_bulk_td: count = %d", 3125 uhcip->uhci_pending_bulk_cmds); 3126 } 3127 } 3128 } else { 3129 uhci_delete_td(uhcip, td); 3130 } 3131 } 3132 3133 3134 void 3135 uhci_handle_bulk_td_errors(uhci_state_t *uhcip, uhci_td_t *td) 3136 { 3137 usb_cr_t usb_err; 3138 uint32_t paddr_tail, element_ptr; 3139 uhci_td_t *next_td; 3140 uhci_pipe_private_t *pp; 3141 uhci_trans_wrapper_t *tw = td->tw; 3142 usba_pipe_handle_data_t *ph; 3143 3144 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3145 "uhci_handle_bulk_td_errors: td = %p", (void *)td); 3146 3147 #ifdef DEBUG 3148 uhci_print_td(uhcip, td); 3149 #endif 3150 3151 tw = td->tw; 3152 ph = tw->tw_pipe_private->pp_pipe_handle; 3153 pp = (uhci_pipe_private_t *)ph->p_hcd_private; 3154 3155 /* 3156 * Find the type of error occurred and return the error 3157 * to the upper layer. And adjust the data toggle. 3158 */ 3159 element_ptr = GetQH32(uhcip, pp->pp_qh->element_ptr) & 3160 QH_ELEMENT_PTR_MASK; 3161 paddr_tail = TD_PADDR(pp->pp_qh->td_tailp); 3162 3163 /* 3164 * If a timeout occurs before a transfer has completed, 3165 * the timeout handler sets the CRC/Timeout bit and clears the Active 3166 * bit in the link_ptr for each td in the transfer. 3167 * It then waits (at least) 1 ms so that any tds the controller might 3168 * have been executing will have completed. 3169 * So at this point element_ptr will point to either: 3170 * 1) the next td for the transfer (which has not been executed, 3171 * and has the CRC/Timeout status bit set and Active bit cleared), 3172 * 2) the dummy td for this qh. 3173 * So if the element_ptr does not point to the dummy td, we know 3174 * it points to the next td that would have been executed. 3175 * That td has the data toggle we want to save. 3176 * All outstanding tds have been marked as CRC/Timeout, 3177 * so it doesn't matter which td we pass to uhci_parse_td_error 3178 * for the error status. 3179 */ 3180 if (element_ptr != paddr_tail) { 3181 next_td = BULKTD_VADDR(pp->pp_qh->bulk_xfer_info, 3182 (element_ptr & QH_ELEMENT_PTR_MASK)); 3183 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3184 "uhci_handle_bulk_td_errors: next td = %p", 3185 (void *)next_td); 3186 3187 usb_err = uhci_parse_td_error(uhcip, pp, next_td); 3188 } else { 3189 usb_err = uhci_parse_td_error(uhcip, pp, td); 3190 } 3191 3192 /* 3193 * Update the link pointer. 3194 */ 3195 SetQH32(uhcip, pp->pp_qh->element_ptr, TD_PADDR(pp->pp_qh->td_tailp)); 3196 3197 /* 3198 * Send up number of bytes transferred before the error condition. 3199 */ 3200 if ((tw->tw_direction == PID_OUT) && tw->tw_data) { 3201 tw->tw_data->b_rptr += tw->tw_bytes_xfered; 3202 } 3203 3204 uhci_remove_bulk_tds_tws(uhcip, tw->tw_pipe_private, UHCI_IN_ERROR); 3205 3206 /* 3207 * When there are no pending bulk commands, point the end of the 3208 * lattice tree to NULL. This will make sure that the HC control 3209 * does not loop anymore and PCI bus is not affected. 3210 */ 3211 if (--uhcip->uhci_pending_bulk_cmds == 0) { 3212 uhcip->uhci_bulk_xfers_q_tail->link_ptr = HC_END_OF_LIST; 3213 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3214 "uhci_handle_bulk_td_errors: count = %d", 3215 uhcip->uhci_pending_bulk_cmds); 3216 } 3217 3218 uhci_hcdi_callback(uhcip, pp, ph, tw, usb_err); 3219 uhci_deallocate_tw(uhcip, pp, tw); 3220 } 3221 3222 3223 void 3224 uhci_remove_bulk_tds_tws( 3225 uhci_state_t *uhcip, 3226 uhci_pipe_private_t *pp, 3227 int what) 3228 { 3229 uint_t rval; 3230 uhci_td_t *head; 3231 uhci_td_t *head_next; 3232 usb_opaque_t curr_reqp; 3233 uhci_bulk_isoc_xfer_t *info; 3234 3235 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 3236 3237 if ((info = pp->pp_qh->bulk_xfer_info) == NULL) { 3238 3239 return; 3240 } 3241 3242 head = uhcip->uhci_outst_tds_head; 3243 3244 while (head) { 3245 uhci_trans_wrapper_t *tw_tmp = head->tw; 3246 head_next = head->outst_td_next; 3247 3248 if (pp->pp_qh == tw_tmp->tw_pipe_private->pp_qh) { 3249 curr_reqp = tw_tmp->tw_curr_xfer_reqp; 3250 if (curr_reqp && 3251 ((what == UHCI_IN_CLOSE) || 3252 (what == UHCI_IN_RESET))) { 3253 uhci_hcdi_callback(uhcip, pp, 3254 pp->pp_pipe_handle, 3255 tw_tmp, USB_CR_FLUSHED); 3256 } /* end of curr_reqp */ 3257 3258 uhci_delete_td(uhcip, head); 3259 3260 if (what == UHCI_IN_CLOSE || what == UHCI_IN_RESET) { 3261 ASSERT(info->num_tds > 0); 3262 if (--info->num_tds == 0) { 3263 uhci_deallocate_tw(uhcip, pp, tw_tmp); 3264 3265 /* 3266 * This will make sure that the HC 3267 * does not loop anymore when there 3268 * are no pending bulk commands. 3269 */ 3270 if (--uhcip->uhci_pending_bulk_cmds 3271 == 0) { 3272 uhcip->uhci_bulk_xfers_q_tail-> 3273 link_ptr = HC_END_OF_LIST; 3274 USB_DPRINTF_L3(PRINT_MASK_ATTA, 3275 uhcip->uhci_log_hdl, 3276 "uhci_remove_bulk_tds_tws:" 3277 " count = %d", 3278 uhcip-> 3279 uhci_pending_bulk_cmds); 3280 } 3281 } 3282 } 3283 } 3284 3285 head = head_next; 3286 } 3287 3288 if (what == UHCI_IN_CLOSE || what == UHCI_IN_RESET) { 3289 ASSERT(info->num_tds == 0); 3290 } 3291 3292 rval = ddi_dma_unbind_handle(info->dma_handle); 3293 ASSERT(rval == DDI_SUCCESS); 3294 ddi_dma_mem_free(&info->mem_handle); 3295 ddi_dma_free_handle(&info->dma_handle); 3296 kmem_free(info, sizeof (uhci_bulk_isoc_xfer_t)); 3297 pp->pp_qh->bulk_xfer_info = NULL; 3298 } 3299 3300 3301 /* 3302 * uhci_save_data_toggle () 3303 * Save the data toggle in the usba_device structure 3304 */ 3305 void 3306 uhci_save_data_toggle(uhci_pipe_private_t *pp) 3307 { 3308 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 3309 3310 /* Save the data toggle in the usb devices structure. */ 3311 mutex_enter(&ph->p_mutex); 3312 usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress, 3313 pp->pp_data_toggle); 3314 mutex_exit(&ph->p_mutex); 3315 } 3316 3317 3318 /* 3319 * uhci_insert_isoc_td: 3320 * - Create transfer wrapper 3321 * - Allocate memory for the isoc td's 3322 * - Fill up all the TD's and submit to the HC 3323 * - Update all the linked lists 3324 */ 3325 int 3326 uhci_insert_isoc_td( 3327 uhci_state_t *uhcip, 3328 usba_pipe_handle_data_t *ph, 3329 usb_isoc_req_t *isoc_req, 3330 size_t length, 3331 usb_flags_t flags) 3332 { 3333 int rval = USB_SUCCESS; 3334 int error; 3335 uint_t ddic; 3336 uint32_t i, buffer_address; 3337 uint32_t bytes_to_xfer; 3338 uint32_t expired_frames = 0; 3339 usb_frame_number_t start_frame, end_frame, current_frame; 3340 uhci_td_t *td_ptr; 3341 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 3342 uhci_trans_wrapper_t *tw; 3343 uhci_bulk_isoc_xfer_t *isoc_xfer_info; 3344 3345 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 3346 "uhci_insert_isoc_td: ph = 0x%p, isoc req = %p length = %lu", 3347 ph, (void *)isoc_req, length); 3348 3349 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 3350 3351 /* Allocate a transfer wrapper */ 3352 if ((tw = uhci_create_transfer_wrapper(uhcip, pp, length, flags)) == 3353 NULL) { 3354 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 3355 "uhci_insert_isoc_td: TW allocation failed"); 3356 3357 return (USB_NO_RESOURCES); 3358 } 3359 3360 /* Save current isochronous request pointer */ 3361 tw->tw_curr_xfer_reqp = (usb_opaque_t)isoc_req; 3362 3363 /* 3364 * Initialize the transfer wrapper. These values are useful 3365 * for sending back the reply. 3366 */ 3367 tw->tw_handle_td = uhci_handle_isoc_td; 3368 tw->tw_handle_callback_value = NULL; 3369 tw->tw_direction = (UHCI_XFER_DIR(&ph->p_ep) == USB_EP_DIR_OUT) ? 3370 PID_OUT : PID_IN; 3371 3372 /* 3373 * If the transfer isoc send, then copy the data from the request 3374 * to the transfer wrapper. 3375 */ 3376 if ((tw->tw_direction == PID_OUT) && length) { 3377 ASSERT(isoc_req->isoc_data != NULL); 3378 3379 /* Copy the data into the message */ 3380 ddi_rep_put8(tw->tw_accesshandle, isoc_req->isoc_data->b_rptr, 3381 (uint8_t *)tw->tw_buf, length, DDI_DEV_AUTOINCR); 3382 } 3383 3384 if (tw->tw_direction == PID_IN) { 3385 if ((rval = uhci_allocate_periodic_in_resource(uhcip, pp, tw, 3386 flags)) != USB_SUCCESS) { 3387 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 3388 "uhci_insert_isoc_td: isoc_req_t alloc failed"); 3389 3390 return (rval); 3391 } 3392 3393 isoc_req = (usb_isoc_req_t *)tw->tw_curr_xfer_reqp; 3394 } 3395 3396 tw->tw_isoc_req = (usb_isoc_req_t *)tw->tw_curr_xfer_reqp; 3397 3398 /* Get the pointer to the isoc_xfer_info structure */ 3399 isoc_xfer_info = (uhci_bulk_isoc_xfer_t *)&tw->tw_xfer_info; 3400 isoc_xfer_info->num_tds = isoc_req->isoc_pkts_count; 3401 3402 /* 3403 * Allocate memory for isoc tds 3404 */ 3405 if ((rval = uhci_alloc_memory_for_tds(uhcip, isoc_req->isoc_pkts_count, 3406 isoc_xfer_info)) != USB_SUCCESS) { 3407 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 3408 "uhci_insert_isoc_td: Memory allocation failure"); 3409 uhci_deallocate_tw(uhcip, pp, tw); 3410 3411 return (rval); 3412 } 3413 3414 /* 3415 * Get the isoc td pool address, buffer address and 3416 * max packet size that the device supports. 3417 */ 3418 td_ptr = (uhci_td_t *)isoc_xfer_info->pool_addr; 3419 buffer_address = tw->tw_cookie.dmac_address; 3420 3421 /* 3422 * Fill up the isoc tds 3423 */ 3424 USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 3425 "uhci_insert_isoc_td : isoc pkts %d", isoc_req->isoc_pkts_count); 3426 3427 for (i = 0; i < isoc_req->isoc_pkts_count; i++) { 3428 td_ptr[i].isoc_pkt_index = i; 3429 bytes_to_xfer = isoc_req->isoc_pkt_descr[i].isoc_pkt_length; 3430 3431 uhci_fill_in_bulk_isoc_td(uhcip, &td_ptr[i], (uhci_td_t *)NULL, 3432 HC_END_OF_LIST, ph, buffer_address, bytes_to_xfer, tw); 3433 buffer_address += isoc_req->isoc_pkt_descr[i].isoc_pkt_length; 3434 } 3435 3436 /* 3437 * Get the starting frame number. 3438 * The client drivers sets the flag USB_ATTRS_ISOC_XFER_ASAP to inform 3439 * the HCD to care of starting frame number. 3440 * 3441 * Following code is very time critical. So, perform atomic execution. 3442 */ 3443 ddic = ddi_enter_critical(); 3444 current_frame = uhci_get_sw_frame_number(uhcip); 3445 3446 if (isoc_req->isoc_attributes & USB_ATTRS_ISOC_START_FRAME) { 3447 start_frame = isoc_req->isoc_frame_no; 3448 end_frame = start_frame + isoc_req->isoc_pkts_count; 3449 3450 /* Check available frames */ 3451 if ((end_frame - current_frame) < UHCI_MAX_ISOC_FRAMES) { 3452 if (current_frame > start_frame) { 3453 if ((current_frame + FRNUM_OFFSET) < 3454 end_frame) { 3455 expired_frames = current_frame + 3456 FRNUM_OFFSET - start_frame; 3457 start_frame = current_frame + 3458 FRNUM_OFFSET; 3459 } else { 3460 rval = USB_INVALID_START_FRAME; 3461 } 3462 } 3463 } else { 3464 rval = USB_INVALID_START_FRAME; 3465 } 3466 3467 } else if (isoc_req->isoc_attributes & USB_ATTRS_ISOC_XFER_ASAP) { 3468 start_frame = pp->pp_frame_num; 3469 3470 if (start_frame == INVALID_FRNUM) { 3471 start_frame = current_frame + FRNUM_OFFSET; 3472 } else if (current_frame > start_frame) { 3473 start_frame = current_frame + FRNUM_OFFSET; 3474 } 3475 3476 end_frame = start_frame + isoc_req->isoc_pkts_count; 3477 isoc_req->isoc_frame_no = start_frame; 3478 3479 } 3480 3481 if (rval != USB_SUCCESS) { 3482 3483 /* Exit the critical */ 3484 ddi_exit_critical(ddic); 3485 3486 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 3487 "uhci_insert_isoc_td: Invalid starting frame number"); 3488 3489 while (tw->tw_hctd_head) { 3490 uhci_delete_td(uhcip, tw->tw_hctd_head); 3491 } 3492 3493 error = ddi_dma_unbind_handle(isoc_xfer_info->dma_handle); 3494 ASSERT(error == DDI_SUCCESS); 3495 3496 ddi_dma_mem_free(&isoc_xfer_info->mem_handle); 3497 ddi_dma_free_handle(&isoc_xfer_info->dma_handle); 3498 3499 uhci_deallocate_tw(uhcip, pp, tw); 3500 3501 return (rval); 3502 } 3503 3504 for (i = 0; i < expired_frames; i++) { 3505 isoc_req->isoc_pkt_descr[i].isoc_pkt_status = 3506 USB_CR_NOT_ACCESSED; 3507 isoc_req->isoc_pkt_descr[i].isoc_pkt_actual_length = 3508 isoc_req->isoc_pkt_descr[i].isoc_pkt_length; 3509 uhci_delete_td(uhcip, &td_ptr[i]); 3510 --isoc_xfer_info->num_tds; 3511 } 3512 3513 /* 3514 * Add the TD's to the HC list 3515 */ 3516 start_frame = (start_frame & 0x3ff); 3517 for (; i < isoc_req->isoc_pkts_count; i++) { 3518 if (uhcip->uhci_isoc_q_tailp[start_frame]) { 3519 td_ptr[i].isoc_prev = 3520 uhcip->uhci_isoc_q_tailp[start_frame]; 3521 td_ptr[i].isoc_next = NULL; 3522 td_ptr[i].link_ptr = 3523 uhcip->uhci_isoc_q_tailp[start_frame]->link_ptr; 3524 uhcip->uhci_isoc_q_tailp[start_frame]->isoc_next = 3525 &td_ptr[i]; 3526 SetTD32(uhcip, 3527 uhcip->uhci_isoc_q_tailp[start_frame]->link_ptr, 3528 ISOCTD_PADDR(isoc_xfer_info, &td_ptr[i])); 3529 uhcip->uhci_isoc_q_tailp[start_frame] = &td_ptr[i]; 3530 } else { 3531 uhcip->uhci_isoc_q_tailp[start_frame] = &td_ptr[i]; 3532 td_ptr[i].isoc_next = NULL; 3533 td_ptr[i].isoc_prev = NULL; 3534 SetTD32(uhcip, td_ptr[i].link_ptr, 3535 GetFL32(uhcip, 3536 uhcip->uhci_frame_lst_tablep[start_frame])); 3537 SetFL32(uhcip, 3538 uhcip->uhci_frame_lst_tablep[start_frame], 3539 ISOCTD_PADDR(isoc_xfer_info, &td_ptr[i])); 3540 } 3541 td_ptr[i].starting_frame = start_frame; 3542 3543 if (++start_frame == NUM_FRAME_LST_ENTRIES) 3544 start_frame = 0; 3545 } 3546 3547 ddi_exit_critical(ddic); 3548 pp->pp_frame_num = end_frame; 3549 3550 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 3551 "uhci_insert_isoc_td: current frame number 0x%llx, pipe frame num" 3552 " 0x%llx", current_frame, pp->pp_frame_num); 3553 3554 return (rval); 3555 } 3556 3557 3558 /* 3559 * uhci_handle_isoc_td: 3560 * Handles the completed isoc tds 3561 */ 3562 void 3563 uhci_handle_isoc_td(uhci_state_t *uhcip, uhci_td_t *td) 3564 { 3565 uint_t rval; 3566 uint32_t pkt_index = td->isoc_pkt_index; 3567 usb_cr_t cr; 3568 uhci_trans_wrapper_t *tw = td->tw; 3569 usb_isoc_req_t *isoc_req = (usb_isoc_req_t *)tw->tw_isoc_req; 3570 uhci_pipe_private_t *pp = tw->tw_pipe_private; 3571 uhci_bulk_isoc_xfer_t *isoc_xfer_info = &tw->tw_xfer_info; 3572 usba_pipe_handle_data_t *usb_pp; 3573 3574 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 3575 "uhci_handle_isoc_td: td = 0x%p, pp = 0x%p, tw = 0x%p, req = 0x%p, " 3576 "index = %x", td, pp, tw, isoc_req, pkt_index); 3577 3578 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 3579 3580 usb_pp = pp->pp_pipe_handle; 3581 3582 /* 3583 * Check whether there are any errors occurred. If so, update error 3584 * count and return it to the upper.But never return a non zero 3585 * completion reason. 3586 */ 3587 cr = USB_CR_OK; 3588 if (GetTD_status(uhcip, td) & TD_STATUS_MASK) { 3589 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 3590 "uhci_handle_isoc_td: Error Occurred: TD Status = %x", 3591 GetTD_status(uhcip, td)); 3592 isoc_req->isoc_error_count++; 3593 } 3594 3595 if (isoc_req != NULL) { 3596 isoc_req->isoc_pkt_descr[pkt_index].isoc_pkt_status = cr; 3597 isoc_req->isoc_pkt_descr[pkt_index].isoc_pkt_actual_length = 3598 (GetTD_alen(uhcip, td) == ZERO_LENGTH) ? 0 : 3599 GetTD_alen(uhcip, td) + 1; 3600 } 3601 3602 uhci_delete_isoc_td(uhcip, td); 3603 3604 if (--isoc_xfer_info->num_tds != 0) { 3605 USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 3606 "uhci_handle_isoc_td: Number of TDs %d", 3607 isoc_xfer_info->num_tds); 3608 3609 return; 3610 } 3611 3612 tw->tw_claim = UHCI_INTR_HDLR_CLAIMED; 3613 if (tw->tw_direction == PID_IN) { 3614 uhci_sendup_td_message(uhcip, cr, tw); 3615 3616 if ((uhci_handle_isoc_receive(uhcip, pp, tw)) != USB_SUCCESS) { 3617 USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 3618 "uhci_handle_isoc_td: Drop message"); 3619 } 3620 3621 } else { 3622 /* update kstats only for OUT. sendup_td_msg() does it for IN */ 3623 uhci_do_byte_stats(uhcip, tw->tw_length, 3624 usb_pp->p_ep.bmAttributes, usb_pp->p_ep.bEndpointAddress); 3625 3626 uhci_hcdi_callback(uhcip, pp, usb_pp, tw, USB_CR_OK); 3627 } 3628 3629 rval = ddi_dma_unbind_handle(isoc_xfer_info->dma_handle); 3630 ASSERT(rval == DDI_SUCCESS); 3631 ddi_dma_mem_free(&isoc_xfer_info->mem_handle); 3632 ddi_dma_free_handle(&isoc_xfer_info->dma_handle); 3633 uhci_deallocate_tw(uhcip, pp, tw); 3634 } 3635 3636 3637 /* 3638 * uhci_handle_isoc_receive: 3639 * - Sends the isoc data to the client 3640 * - Inserts another isoc receive request 3641 */ 3642 static int 3643 uhci_handle_isoc_receive( 3644 uhci_state_t *uhcip, 3645 uhci_pipe_private_t *pp, 3646 uhci_trans_wrapper_t *tw) 3647 { 3648 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 3649 "uhci_handle_isoc_receive: tw = 0x%p", tw); 3650 3651 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 3652 3653 /* 3654 * -- check for pipe state being polling before 3655 * inserting a new request. Check when is TD 3656 * de-allocation being done? (so we can reuse the same TD) 3657 */ 3658 if (uhci_start_isoc_receive_polling(uhcip, 3659 pp->pp_pipe_handle, (usb_isoc_req_t *)tw->tw_curr_xfer_reqp, 3660 0) != USB_SUCCESS) { 3661 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 3662 "uhci_handle_isoc_receive: receive polling failed"); 3663 3664 return (USB_FAILURE); 3665 } 3666 3667 return (USB_SUCCESS); 3668 } 3669 3670 3671 /* 3672 * uhci_delete_isoc_td: 3673 * - Delete from the outstanding command queue 3674 * - Delete from the tw queue 3675 * - Delete from the isoc queue 3676 * - Delete from the HOST CONTROLLER list 3677 */ 3678 static void 3679 uhci_delete_isoc_td(uhci_state_t *uhcip, uhci_td_t *td) 3680 { 3681 uint32_t starting_frame = td->starting_frame; 3682 3683 if ((td->isoc_next == NULL) && (td->isoc_prev == NULL)) { 3684 SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[starting_frame], 3685 GetTD32(uhcip, td->link_ptr)); 3686 uhcip->uhci_isoc_q_tailp[starting_frame] = 0; 3687 } else if (td->isoc_next == NULL) { 3688 td->isoc_prev->link_ptr = td->link_ptr; 3689 td->isoc_prev->isoc_next = NULL; 3690 uhcip->uhci_isoc_q_tailp[starting_frame] = td->isoc_prev; 3691 } else if (td->isoc_prev == NULL) { 3692 td->isoc_next->isoc_prev = NULL; 3693 SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[starting_frame], 3694 GetTD32(uhcip, td->link_ptr)); 3695 } else { 3696 td->isoc_prev->isoc_next = td->isoc_next; 3697 td->isoc_next->isoc_prev = td->isoc_prev; 3698 td->isoc_prev->link_ptr = td->link_ptr; 3699 } 3700 3701 uhci_delete_td(uhcip, td); 3702 } 3703 3704 3705 /* 3706 * uhci_send_isoc_receive 3707 * - Allocates usb_isoc_request 3708 * - Updates the isoc request 3709 * - Inserts the isoc td's into the HC processing list. 3710 */ 3711 int 3712 uhci_start_isoc_receive_polling( 3713 uhci_state_t *uhcip, 3714 usba_pipe_handle_data_t *ph, 3715 usb_isoc_req_t *isoc_req, 3716 usb_flags_t usb_flags) 3717 { 3718 int ii, error; 3719 size_t max_isoc_xfer_size, length; 3720 ushort_t isoc_pkt_count; 3721 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 3722 usb_isoc_pkt_descr_t *isoc_pkt_descr; 3723 3724 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 3725 "uhci_start_isoc_receive_polling: usb_flags = %x", usb_flags); 3726 3727 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 3728 3729 max_isoc_xfer_size = ph->p_ep.wMaxPacketSize * UHCI_MAX_ISOC_PKTS; 3730 3731 if (isoc_req) { 3732 isoc_pkt_descr = isoc_req->isoc_pkt_descr; 3733 isoc_pkt_count = isoc_req->isoc_pkts_count; 3734 } else { 3735 isoc_pkt_descr = ((usb_isoc_req_t *) 3736 pp->pp_client_periodic_in_reqp)->isoc_pkt_descr; 3737 isoc_pkt_count = ((usb_isoc_req_t *) 3738 pp->pp_client_periodic_in_reqp)->isoc_pkts_count; 3739 } 3740 3741 for (ii = 0, length = 0; ii < isoc_pkt_count; ii++) { 3742 length += isoc_pkt_descr->isoc_pkt_length; 3743 isoc_pkt_descr++; 3744 } 3745 3746 /* Check the size of isochronous request */ 3747 if (length > max_isoc_xfer_size) { 3748 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 3749 "uhci_start_isoc_receive_polling: " 3750 "Max isoc request size = %lx, Given isoc req size = %lx", 3751 max_isoc_xfer_size, length); 3752 3753 return (USB_FAILURE); 3754 } 3755 3756 /* Add the TD into the Host Controller's isoc list */ 3757 if ((error = uhci_insert_isoc_td(uhcip, ph, isoc_req, 3758 length, usb_flags)) != USB_SUCCESS) { 3759 usb_free_isoc_req(isoc_req); 3760 } 3761 3762 return (error); 3763 } 3764 3765 3766 /* 3767 * uhci_remove_isoc_tds_tws 3768 * This routine scans the pipe and removes all the td's 3769 * and transfer wrappers and deallocates the memory 3770 * associated with those td's and tw's. 3771 */ 3772 void 3773 uhci_remove_isoc_tds_tws(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 3774 { 3775 uint_t rval; 3776 uhci_td_t *tmp_td, *td_head; 3777 usb_isoc_req_t *isoc_req; 3778 uhci_trans_wrapper_t *tmp_tw, *tw_head; 3779 3780 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 3781 "uhci_remove_isoc_tds_tws: pp = %p", (void *)pp); 3782 3783 tw_head = pp->pp_tw_head; 3784 while (tw_head) { 3785 tmp_tw = tw_head; 3786 tw_head = tw_head->tw_next; 3787 td_head = tmp_tw->tw_hctd_head; 3788 if (tmp_tw->tw_direction == PID_IN) { 3789 uhci_deallocate_periodic_in_resource(uhcip, pp, 3790 tmp_tw); 3791 } else if (tmp_tw->tw_direction == PID_OUT) { 3792 uhci_hcdi_callback(uhcip, pp, pp->pp_pipe_handle, 3793 tmp_tw, USB_CR_FLUSHED); 3794 } 3795 3796 while (td_head) { 3797 tmp_td = td_head; 3798 td_head = td_head->tw_td_next; 3799 uhci_delete_isoc_td(uhcip, tmp_td); 3800 } 3801 3802 isoc_req = (usb_isoc_req_t *)tmp_tw->tw_isoc_req; 3803 if (isoc_req) { 3804 usb_free_isoc_req(isoc_req); 3805 } 3806 3807 ASSERT(tmp_tw->tw_hctd_head == NULL); 3808 3809 if (tmp_tw->tw_xfer_info.dma_handle) { 3810 rval = ddi_dma_unbind_handle(tmp_tw->tw_xfer_info. 3811 dma_handle); 3812 ASSERT(rval == DDI_SUCCESS); 3813 ddi_dma_mem_free(&tmp_tw->tw_xfer_info.mem_handle); 3814 ddi_dma_free_handle(&tmp_tw->tw_xfer_info.dma_handle); 3815 } 3816 3817 uhci_deallocate_tw(uhcip, pp, tmp_tw); 3818 } 3819 } 3820 3821 3822 /* 3823 * uhci_isoc_update_sw_frame_number() 3824 * to avoid code duplication, call uhci_get_sw_frame_number() 3825 */ 3826 void 3827 uhci_isoc_update_sw_frame_number(uhci_state_t *uhcip) 3828 { 3829 (void) uhci_get_sw_frame_number(uhcip); 3830 } 3831 3832 3833 /* 3834 * uhci_get_sw_frame_number: 3835 * Hold the uhci_int_mutex before calling this routine. 3836 */ 3837 uint64_t 3838 uhci_get_sw_frame_number(uhci_state_t *uhcip) 3839 { 3840 uint64_t sw_frnum, hw_frnum, current_frnum; 3841 3842 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 3843 3844 sw_frnum = uhcip->uhci_sw_frnum; 3845 hw_frnum = Get_OpReg16(FRNUM); 3846 3847 /* 3848 * Check bit 10 in the software counter and hardware frame counter. 3849 * If both are same, then don't increment the software frame counter 3850 * (Bit 10 of hw frame counter toggle for every 1024 frames) 3851 * The lower 11 bits of software counter contains the hardware frame 3852 * counter value. The MSB (bit 10) of software counter is incremented 3853 * for every 1024 frames either here or in get frame number routine. 3854 */ 3855 if ((sw_frnum & UHCI_BIT_10_MASK) == (hw_frnum & UHCI_BIT_10_MASK)) { 3856 /* The MSB of hw counter did not toggle */ 3857 current_frnum = ((sw_frnum & (SW_FRNUM_MASK)) | hw_frnum); 3858 } else { 3859 /* 3860 * The hw counter wrapped around. And the interrupt handler 3861 * did not get a chance to update the sw frame counter. 3862 * So, update the sw frame counter and return correct frame no. 3863 */ 3864 sw_frnum >>= UHCI_SIZE_OF_HW_FRNUM - 1; 3865 current_frnum = 3866 ((++sw_frnum << (UHCI_SIZE_OF_HW_FRNUM - 1)) | hw_frnum); 3867 } 3868 uhcip->uhci_sw_frnum = current_frnum; 3869 3870 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3871 "uhci_get_sw_frame_number: sw=%ld hd=%ld", 3872 uhcip->uhci_sw_frnum, hw_frnum); 3873 3874 return (current_frnum); 3875 } 3876 3877 3878 /* 3879 * uhci_cmd_timeout_hdlr: 3880 * This routine will get called for every second. It checks for 3881 * timed out control commands/bulk commands. Timeout any commands 3882 * that exceeds the time out period specified by the pipe policy. 3883 */ 3884 void 3885 uhci_cmd_timeout_hdlr(void *arg) 3886 { 3887 uint_t flag = B_FALSE; 3888 uhci_td_t *head, *tmp_td; 3889 uhci_state_t *uhcip = (uhci_state_t *)arg; 3890 uhci_pipe_private_t *pp; 3891 3892 /* 3893 * Check whether any of the control xfers are timed out. 3894 * If so, complete those commands with time out as reason. 3895 */ 3896 mutex_enter(&uhcip->uhci_int_mutex); 3897 head = uhcip->uhci_outst_tds_head; 3898 3899 while (head) { 3900 /* 3901 * If timeout out is zero, then dont timeout command. 3902 */ 3903 if (head->tw->tw_timeout_cnt == 0) { 3904 head = head->outst_td_next; 3905 continue; 3906 } 3907 3908 if (!(head->tw->tw_flags & TW_TIMEOUT_FLAG)) { 3909 head->tw->tw_flags |= TW_TIMEOUT_FLAG; 3910 --head->tw->tw_timeout_cnt; 3911 } 3912 3913 /* only do it for bulk and control TDs */ 3914 if ((head->tw->tw_timeout_cnt == 0) && 3915 (head->tw->tw_handle_td != uhci_handle_isoc_td)) { 3916 3917 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3918 "Command timed out: td = %p", (void *)head); 3919 3920 head->tw->tw_claim = UHCI_TIMEOUT_HDLR_CLAIMED; 3921 3922 /* 3923 * Check finally whether the command completed 3924 */ 3925 if (GetTD_status(uhcip, head) & UHCI_TD_ACTIVE) { 3926 SetTD32(uhcip, head->link_ptr, 3927 GetTD32(uhcip, head->link_ptr) | 3928 HC_END_OF_LIST); 3929 pp = head->tw->tw_pipe_private; 3930 SetQH32(uhcip, pp->pp_qh->element_ptr, 3931 GetQH32(uhcip, pp->pp_qh->element_ptr) | 3932 HC_END_OF_LIST); 3933 } 3934 3935 flag = B_TRUE; 3936 } 3937 3938 head = head->outst_td_next; 3939 } 3940 3941 if (flag) { 3942 (void) uhci_wait_for_sof(uhcip); 3943 } 3944 3945 head = uhcip->uhci_outst_tds_head; 3946 while (head) { 3947 if (head->tw->tw_flags & TW_TIMEOUT_FLAG) { 3948 head->tw->tw_flags &= ~TW_TIMEOUT_FLAG; 3949 } 3950 if (head->tw->tw_claim == UHCI_TIMEOUT_HDLR_CLAIMED) { 3951 head->tw->tw_claim = UHCI_NOT_CLAIMED; 3952 tmp_td = head->tw->tw_hctd_head; 3953 while (tmp_td) { 3954 SetTD_status(uhcip, tmp_td, 3955 UHCI_TD_CRC_TIMEOUT); 3956 tmp_td = tmp_td->tw_td_next; 3957 } 3958 } 3959 head = head->outst_td_next; 3960 } 3961 3962 /* 3963 * Process the td which was completed before shifting from normal 3964 * mode to polled mode 3965 */ 3966 if (uhcip->uhci_polled_flag == UHCI_POLLED_FLAG_TRUE) { 3967 uhci_process_submitted_td_queue(uhcip); 3968 uhcip->uhci_polled_flag = UHCI_POLLED_FLAG_FALSE; 3969 } else if (flag) { 3970 /* Process the completed/timed out commands */ 3971 uhci_process_submitted_td_queue(uhcip); 3972 } 3973 3974 /* Re-register the control/bulk/intr commands' timeout handler */ 3975 if (uhcip->uhci_cmd_timeout_id) { 3976 uhcip->uhci_cmd_timeout_id = timeout(uhci_cmd_timeout_hdlr, 3977 (void *)uhcip, UHCI_ONE_SECOND); 3978 } 3979 3980 mutex_exit(&uhcip->uhci_int_mutex); 3981 } 3982 3983 3984 /* 3985 * uhci_wait_for_sof: 3986 * Wait for the start of the next frame (implying any changes made in the 3987 * lattice have now taken effect). 3988 * To be sure this is the case, we wait for the completion of the current 3989 * frame (which might have already been pending), then another complete 3990 * frame to ensure everything has taken effect. 3991 */ 3992 int 3993 uhci_wait_for_sof(uhci_state_t *uhcip) 3994 { 3995 int n; 3996 ushort_t cmd_reg; 3997 usb_frame_number_t before_frame_number, after_frame_number; 3998 clock_t time, rval; 3999 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 4000 "uhci_wait_for_sof: uhcip = %p", uhcip); 4001 4002 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4003 before_frame_number = uhci_get_sw_frame_number(uhcip); 4004 for (n = 0; n < MAX_SOF_WAIT_COUNT; n++) { 4005 SetTD_ioc(uhcip, uhcip->uhci_sof_td, 1); 4006 uhcip->uhci_cv_signal = B_TRUE; 4007 4008 time = ddi_get_lbolt() + UHCI_ONE_SECOND; 4009 rval = cv_timedwait(&uhcip->uhci_cv_SOF, 4010 &uhcip->uhci_int_mutex, time); 4011 4012 after_frame_number = uhci_get_sw_frame_number(uhcip); 4013 if ((rval == -1) && 4014 (after_frame_number <= before_frame_number)) { 4015 cmd_reg = Get_OpReg16(USBCMD); 4016 Set_OpReg16(USBCMD, (cmd_reg | USBCMD_REG_HC_RUN)); 4017 Set_OpReg16(USBINTR, ENABLE_ALL_INTRS); 4018 after_frame_number = uhci_get_sw_frame_number(uhcip); 4019 } 4020 before_frame_number = after_frame_number; 4021 } 4022 4023 SetTD_ioc(uhcip, uhcip->uhci_sof_td, 0); 4024 4025 return (uhcip->uhci_cv_signal ? USB_FAILURE : USB_SUCCESS); 4026 4027 } 4028 4029 /* 4030 * uhci_allocate_periodic_in_resource: 4031 * Allocate interrupt/isochronous request structure for the 4032 * interrupt/isochronous IN transfer. 4033 */ 4034 int 4035 uhci_allocate_periodic_in_resource( 4036 uhci_state_t *uhcip, 4037 uhci_pipe_private_t *pp, 4038 uhci_trans_wrapper_t *tw, 4039 usb_flags_t flags) 4040 { 4041 size_t length = 0; 4042 usb_opaque_t client_periodic_in_reqp; 4043 usb_intr_req_t *cur_intr_req; 4044 usb_isoc_req_t *curr_isoc_reqp; 4045 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 4046 4047 USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4048 "uhci_allocate_periodic_in_resource:\n\t" 4049 "ph = 0x%p, pp = 0x%p, tw = 0x%p, flags = 0x%x", ph, pp, tw, flags); 4050 4051 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4052 4053 /* Check the current periodic in request pointer */ 4054 if (tw->tw_curr_xfer_reqp) { 4055 USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4056 "uhci_allocate_periodic_in_resource: Interrupt " 4057 "request structure already exists: " 4058 "allocation failed"); 4059 4060 return (USB_SUCCESS); 4061 } 4062 4063 /* Get the client periodic in request pointer */ 4064 client_periodic_in_reqp = pp->pp_client_periodic_in_reqp; 4065 4066 /* 4067 * If it a periodic IN request and periodic request is NULL, 4068 * allocate corresponding usb periodic IN request for the 4069 * current periodic polling request and copy the information 4070 * from the saved periodic request structure. 4071 */ 4072 if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_INTR) { 4073 /* Get the interrupt transfer length */ 4074 length = ((usb_intr_req_t *)client_periodic_in_reqp)-> 4075 intr_len; 4076 4077 cur_intr_req = usba_hcdi_dup_intr_req(ph->p_dip, 4078 (usb_intr_req_t *)client_periodic_in_reqp, length, flags); 4079 if (cur_intr_req == NULL) { 4080 USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4081 "uhci_allocate_periodic_in_resource: Interrupt " 4082 "request structure allocation failed"); 4083 4084 return (USB_NO_RESOURCES); 4085 } 4086 4087 /* Check and save the timeout value */ 4088 tw->tw_timeout_cnt = (cur_intr_req->intr_attributes & 4089 USB_ATTRS_ONE_XFER) ? cur_intr_req->intr_timeout: 0; 4090 tw->tw_curr_xfer_reqp = (usb_opaque_t)cur_intr_req; 4091 tw->tw_length = cur_intr_req->intr_len; 4092 } else { 4093 ASSERT(client_periodic_in_reqp != NULL); 4094 4095 if ((curr_isoc_reqp = usba_hcdi_dup_isoc_req(ph->p_dip, 4096 (usb_isoc_req_t *)client_periodic_in_reqp, flags)) == 4097 NULL) { 4098 USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4099 "uhci_allocate_periodic_in_resource: Isochronous " 4100 "request structure allocation failed"); 4101 4102 return (USB_NO_RESOURCES); 4103 } 4104 4105 /* 4106 * Save the client's isochronous request pointer and 4107 * length of isochronous transfer in transfer wrapper. 4108 * The dup'ed request is saved in pp_client_periodic_in_reqp 4109 */ 4110 tw->tw_curr_xfer_reqp = 4111 (usb_opaque_t)pp->pp_client_periodic_in_reqp; 4112 pp->pp_client_periodic_in_reqp = (usb_opaque_t)curr_isoc_reqp; 4113 tw->tw_length = curr_isoc_reqp->isoc_pkts_length; 4114 } 4115 4116 mutex_enter(&ph->p_mutex); 4117 ph->p_req_count++; 4118 mutex_exit(&ph->p_mutex); 4119 4120 return (USB_SUCCESS); 4121 } 4122 4123 4124 /* 4125 * uhci_deallocate_periodic_in_resource: 4126 * Deallocate interrupt/isochronous request structure for the 4127 * interrupt/isochronous IN transfer. 4128 */ 4129 void 4130 uhci_deallocate_periodic_in_resource( 4131 uhci_state_t *uhcip, 4132 uhci_pipe_private_t *pp, 4133 uhci_trans_wrapper_t *tw) 4134 { 4135 usb_opaque_t curr_xfer_reqp; 4136 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 4137 4138 USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4139 "uhci_deallocate_periodic_in_resource: " 4140 "pp = 0x%p tw = 0x%p", pp, tw); 4141 4142 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4143 4144 curr_xfer_reqp = tw->tw_curr_xfer_reqp; 4145 if (curr_xfer_reqp) { 4146 /* 4147 * Reset periodic in request usb isoch 4148 * packet request pointers to null. 4149 */ 4150 tw->tw_curr_xfer_reqp = NULL; 4151 tw->tw_isoc_req = NULL; 4152 4153 mutex_enter(&ph->p_mutex); 4154 ph->p_req_count--; 4155 mutex_exit(&ph->p_mutex); 4156 4157 /* 4158 * Free pre-allocated interrupt or isochronous requests. 4159 */ 4160 switch (UHCI_XFER_TYPE(&ph->p_ep)) { 4161 case USB_EP_ATTR_INTR: 4162 usb_free_intr_req((usb_intr_req_t *)curr_xfer_reqp); 4163 break; 4164 case USB_EP_ATTR_ISOCH: 4165 usb_free_isoc_req((usb_isoc_req_t *)curr_xfer_reqp); 4166 break; 4167 } 4168 } 4169 } 4170 4171 4172 /* 4173 * uhci_hcdi_callback() 4174 * convenience wrapper around usba_hcdi_callback() 4175 */ 4176 void 4177 uhci_hcdi_callback(uhci_state_t *uhcip, uhci_pipe_private_t *pp, 4178 usba_pipe_handle_data_t *ph, uhci_trans_wrapper_t *tw, usb_cr_t cr) 4179 { 4180 usb_opaque_t curr_xfer_reqp; 4181 4182 USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4183 "uhci_hcdi_callback: ph = 0x%p, tw = 0x%p, cr = 0x%x", ph, tw, cr); 4184 4185 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4186 4187 if (tw && tw->tw_curr_xfer_reqp) { 4188 curr_xfer_reqp = tw->tw_curr_xfer_reqp; 4189 tw->tw_curr_xfer_reqp = NULL; 4190 tw->tw_isoc_req = NULL; 4191 } else { 4192 ASSERT(pp->pp_client_periodic_in_reqp != NULL); 4193 4194 curr_xfer_reqp = pp->pp_client_periodic_in_reqp; 4195 pp->pp_client_periodic_in_reqp = NULL; 4196 } 4197 4198 ASSERT(curr_xfer_reqp != NULL); 4199 4200 mutex_exit(&uhcip->uhci_int_mutex); 4201 usba_hcdi_cb(ph, curr_xfer_reqp, cr); 4202 mutex_enter(&uhcip->uhci_int_mutex); 4203 } 4204 4205 4206 #ifdef DEBUG 4207 static void 4208 uhci_print_td(uhci_state_t *uhcip, uhci_td_t *td) 4209 { 4210 uint_t *ptr = (uint_t *)td; 4211 4212 #ifndef lint 4213 _NOTE(NO_COMPETING_THREADS_NOW); 4214 #endif 4215 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 4216 "\tDWORD 1 0x%x\t DWORD 2 0x%x", ptr[0], ptr[1]); 4217 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 4218 "\tDWORD 3 0x%x\t DWORD 4 0x%x", ptr[2], ptr[3]); 4219 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 4220 "\tBytes xfered = %d", td->tw->tw_bytes_xfered); 4221 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 4222 "\tBytes Pending = %d", td->tw->tw_bytes_pending); 4223 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 4224 "Queue Head Details:"); 4225 uhci_print_qh(uhcip, td->tw->tw_pipe_private->pp_qh); 4226 4227 #ifndef lint 4228 _NOTE(COMPETING_THREADS_NOW); 4229 #endif 4230 } 4231 4232 4233 static void 4234 uhci_print_qh(uhci_state_t *uhcip, queue_head_t *qh) 4235 { 4236 uint_t *ptr = (uint_t *)qh; 4237 4238 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 4239 "\tLink Ptr = %x Element Ptr = %x", ptr[0], ptr[1]); 4240 } 4241 #endif 4242