1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * Universal Host Controller Driver (UHCI) 30 * 31 * The UHCI driver is a driver which interfaces to the Universal 32 * Serial Bus Driver (USBA) and the Host Controller (HC). The interface to 33 * the Host Controller is defined by the UHCI. 34 * This file contains misc functions. 35 */ 36 #include <sys/usb/hcd/uhci/uhcid.h> 37 #include <sys/usb/hcd/uhci/uhciutil.h> 38 #include <sys/usb/hcd/uhci/uhcipolled.h> 39 40 #include <sys/disp.h> 41 42 /* Globals */ 43 extern uint_t uhci_td_pool_size; /* Num TDs */ 44 extern uint_t uhci_qh_pool_size; /* Num QHs */ 45 extern ushort_t uhci_tree_bottom_nodes[]; 46 extern void *uhci_statep; 47 48 /* function prototypes */ 49 static void uhci_build_interrupt_lattice(uhci_state_t *uhcip); 50 static int uhci_init_frame_lst_table(dev_info_t *dip, uhci_state_t *uhcip); 51 52 static uint_t uhci_lattice_height(uint_t bandwidth); 53 static uint_t uhci_lattice_parent(uint_t node); 54 static uint_t uhci_leftmost_leaf(uint_t node, uint_t height); 55 static uint_t uhci_compute_total_bandwidth(usb_ep_descr_t *endpoint, 56 usb_port_status_t port_status); 57 58 static int uhci_bandwidth_adjust(uhci_state_t *uhcip, 59 usb_ep_descr_t *endpoint, usb_port_status_t port_status); 60 61 static uhci_td_t *uhci_allocate_td_from_pool(uhci_state_t *uhcip); 62 static void uhci_fill_in_td(uhci_state_t *uhcip, 63 uhci_td_t *td, uhci_td_t *current_dummy, 64 uint32_t buffer_offset, size_t length, 65 uhci_pipe_private_t *pp, uchar_t PID, 66 usb_req_attrs_t attrs, uhci_trans_wrapper_t *tw); 67 static uint32_t uhci_get_tw_paddr_by_offs(uhci_state_t *uhcip, 68 uint32_t buffer_offset, size_t length, 69 uhci_trans_wrapper_t *tw); 70 static uhci_trans_wrapper_t *uhci_create_transfer_wrapper( 71 uhci_state_t *uhcip, uhci_pipe_private_t *pp, 72 size_t length, usb_flags_t usb_flags); 73 static uhci_trans_wrapper_t *uhci_create_isoc_transfer_wrapper( 74 uhci_state_t *uhcip, uhci_pipe_private_t *pp, 75 usb_isoc_req_t *req, size_t length, 76 usb_flags_t usb_flags); 77 78 static int uhci_create_setup_pkt(uhci_state_t *uhcip, 79 uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw); 80 static void uhci_insert_ctrl_qh(uhci_state_t *uhcip, 81 uhci_pipe_private_t *pp); 82 static void uhci_remove_ctrl_qh(uhci_state_t *uhcip, 83 uhci_pipe_private_t *pp); 84 static void uhci_insert_intr_qh(uhci_state_t *uhcip, 85 uhci_pipe_private_t *pp); 86 static void uhci_remove_intr_qh(uhci_state_t *uhcip, 87 uhci_pipe_private_t *pp); 88 static void uhci_remove_bulk_qh(uhci_state_t *uhcip, 89 uhci_pipe_private_t *pp); 90 static void uhci_insert_bulk_qh(uhci_state_t *uhcip, 91 uhci_pipe_private_t *pp); 92 static void uhci_handle_bulk_td_errors(uhci_state_t *uhcip, uhci_td_t *td); 93 static int uhci_alloc_memory_for_tds(uhci_state_t *uhcip, uint_t num_tds, 94 uhci_bulk_isoc_xfer_t *info); 95 static int uhci_alloc_bulk_isoc_tds(uhci_state_t *uhcip, uint_t num_tds, 96 uhci_bulk_isoc_xfer_t *info); 97 static void uhci_get_isoc_td_by_index(uhci_state_t *uhcip, 98 uhci_bulk_isoc_xfer_t *info, uint_t index, 99 uhci_td_t **tdpp, uhci_bulk_isoc_td_pool_t **td_pool_pp); 100 static void uhci_get_bulk_td_by_paddr(uhci_state_t *uhcip, 101 uhci_bulk_isoc_xfer_t *info, uint32_t paddr, 102 uhci_bulk_isoc_td_pool_t **td_pool_pp); 103 104 static int uhci_handle_isoc_receive(uhci_state_t *uhcip, 105 uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw); 106 static void uhci_delete_isoc_td(uhci_state_t *uhcip, 107 uhci_td_t *td); 108 #ifdef DEBUG 109 static void uhci_print_td(uhci_state_t *uhcip, uhci_td_t *td); 110 static void uhci_print_qh(uhci_state_t *uhcip, queue_head_t *qh); 111 #endif 112 113 114 /* 115 * uhci_build_interrupt_lattice: 116 * 117 * Construct the interrupt lattice tree using static Queue Head pointers. 118 * This interrupt lattice tree will have total of 63 queue heads and the 119 * Host Controller (HC) processes queue heads every frame. 120 */ 121 static void 122 uhci_build_interrupt_lattice(uhci_state_t *uhcip) 123 { 124 int half_list = NUM_INTR_QH_LISTS / 2; 125 uint16_t i, j, k; 126 uhci_td_t *sof_td, *isoc_td; 127 uintptr_t addr; 128 queue_head_t *list_array = uhcip->uhci_qh_pool_addr; 129 queue_head_t *tmp_qh; 130 frame_lst_table_t *frame_lst_tablep = 131 uhcip->uhci_frame_lst_tablep; 132 133 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 134 "uhci_build_interrupt_lattice:"); 135 136 /* 137 * Reserve the first 63 queue head structures in the pool as static 138 * queue heads & these are required for constructing interrupt 139 * lattice tree. 140 */ 141 for (i = 0; i < NUM_INTR_QH_LISTS; i++) { 142 SetQH32(uhcip, list_array[i].link_ptr, HC_END_OF_LIST); 143 SetQH32(uhcip, list_array[i].element_ptr, HC_END_OF_LIST); 144 list_array[i].qh_flag = QUEUE_HEAD_FLAG_STATIC; 145 list_array[i].node = i; 146 } 147 148 /* Build the interrupt lattice tree */ 149 for (i = 0; i < half_list - 1; i++) { 150 /* 151 * The next pointer in the host controller queue head 152 * descriptor must contain an iommu address. Calculate 153 * the offset into the cpu address and add this to the 154 * starting iommu address. 155 */ 156 addr = QH_PADDR(&list_array[i]) | HC_QUEUE_HEAD; 157 158 SetQH32(uhcip, list_array[2*i + 1].link_ptr, addr); 159 SetQH32(uhcip, list_array[2*i + 2].link_ptr, addr); 160 } 161 162 /* 163 * Initialize the interrupt list in the Frame list Table 164 * so that it points to the bottom of the tree. 165 */ 166 for (i = 0, j = 0; i < pow_2(TREE_HEIGHT); i++) { 167 addr = QH_PADDR(&list_array[half_list + i - 1]); 168 for (k = 0; k < pow_2(VIRTUAL_TREE_HEIGHT); k++) { 169 SetFL32(uhcip, 170 frame_lst_tablep[uhci_tree_bottom_nodes[j++]], 171 addr | HC_QUEUE_HEAD); 172 } 173 } 174 175 /* 176 * Create a controller and bulk Queue heads 177 */ 178 uhcip->uhci_ctrl_xfers_q_head = uhci_alloc_queue_head(uhcip); 179 tmp_qh = uhcip->uhci_ctrl_xfers_q_tail = uhcip->uhci_ctrl_xfers_q_head; 180 181 SetQH32(uhcip, list_array[0].link_ptr, 182 (QH_PADDR(tmp_qh) | HC_QUEUE_HEAD)); 183 184 uhcip->uhci_bulk_xfers_q_head = uhci_alloc_queue_head(uhcip); 185 uhcip->uhci_bulk_xfers_q_tail = uhcip->uhci_bulk_xfers_q_head; 186 SetQH32(uhcip, tmp_qh->link_ptr, 187 (QH_PADDR(uhcip->uhci_bulk_xfers_q_head)|HC_QUEUE_HEAD)); 188 189 SetQH32(uhcip, uhcip->uhci_bulk_xfers_q_head->link_ptr, HC_END_OF_LIST); 190 191 /* 192 * Add a dummy TD to the static queue head 0. THis is used 193 * to generate an at the end of frame. 194 */ 195 sof_td = uhci_allocate_td_from_pool(uhcip); 196 197 SetQH32(uhcip, list_array[0].element_ptr, 198 TD_PADDR(sof_td) | HC_TD_HEAD); 199 SetTD32(uhcip, sof_td->link_ptr, HC_END_OF_LIST); 200 uhcip->uhci_sof_td = sof_td; 201 202 /* 203 * Add a dummy td that is used to generate an interrupt for 204 * every 1024 frames. 205 */ 206 isoc_td = uhci_allocate_td_from_pool(uhcip); 207 SetTD32(uhcip, isoc_td->link_ptr, HC_END_OF_LIST); 208 uhcip->uhci_isoc_td = isoc_td; 209 210 uhcip->uhci_isoc_qh = uhci_alloc_queue_head(uhcip); 211 SetQH32(uhcip, uhcip->uhci_isoc_qh->link_ptr, 212 GetFL32(uhcip, uhcip->uhci_frame_lst_tablep[MAX_FRAME_NUM])); 213 SetQH32(uhcip, uhcip->uhci_isoc_qh->element_ptr, TD_PADDR(isoc_td)); 214 SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[MAX_FRAME_NUM], 215 QH_PADDR(uhcip->uhci_isoc_qh) | HC_QUEUE_HEAD); 216 } 217 218 219 /* 220 * uhci_allocate_pools: 221 * Allocate the system memory for the Queue Heads Descriptor and 222 * for the Transfer Descriptor (TD) pools. Both QH and TD structures 223 * must be aligned to a 16 byte boundary. 224 */ 225 int 226 uhci_allocate_pools(uhci_state_t *uhcip) 227 { 228 dev_info_t *dip = uhcip->uhci_dip; 229 size_t real_length; 230 int i, result; 231 uint_t ccount; 232 ddi_device_acc_attr_t dev_attr; 233 234 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 235 "uhci_allocate_pools:"); 236 237 /* The host controller will be little endian */ 238 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 239 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 240 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 241 242 /* Allocate the TD pool DMA handle */ 243 if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 0, 244 &uhcip->uhci_td_pool_dma_handle) != DDI_SUCCESS) { 245 246 return (USB_FAILURE); 247 } 248 249 /* Allocate the memory for the TD pool */ 250 if (ddi_dma_mem_alloc(uhcip->uhci_td_pool_dma_handle, 251 uhci_td_pool_size * sizeof (uhci_td_t), 252 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0, 253 (caddr_t *)&uhcip->uhci_td_pool_addr, &real_length, 254 &uhcip->uhci_td_pool_mem_handle)) { 255 256 return (USB_FAILURE); 257 } 258 259 /* Map the TD pool into the I/O address space */ 260 result = ddi_dma_addr_bind_handle(uhcip->uhci_td_pool_dma_handle, 261 NULL, (caddr_t)uhcip->uhci_td_pool_addr, real_length, 262 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 263 NULL, &uhcip->uhci_td_pool_cookie, &ccount); 264 265 bzero((void *)uhcip->uhci_td_pool_addr, 266 uhci_td_pool_size * sizeof (uhci_td_t)); 267 268 /* Process the result */ 269 if (result == DDI_DMA_MAPPED) { 270 /* The cookie count should be 1 */ 271 if (ccount != 1) { 272 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 273 "uhci_allocate_pools: More than 1 cookie"); 274 275 return (USB_FAILURE); 276 } 277 } else { 278 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 279 "uhci_allocate_pools: Result = %d", result); 280 281 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result); 282 283 return (USB_FAILURE); 284 } 285 286 uhcip->uhci_dma_addr_bind_flag |= UHCI_TD_POOL_BOUND; 287 288 /* Initialize the TD pool */ 289 for (i = 0; i < uhci_td_pool_size; i++) { 290 uhcip->uhci_td_pool_addr[i].flag = TD_FLAG_FREE; 291 } 292 293 /* Allocate the TD pool DMA handle */ 294 if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 295 0, &uhcip->uhci_qh_pool_dma_handle) != DDI_SUCCESS) { 296 297 return (USB_FAILURE); 298 } 299 300 /* Allocate the memory for the QH pool */ 301 if (ddi_dma_mem_alloc(uhcip->uhci_qh_pool_dma_handle, 302 uhci_qh_pool_size * sizeof (queue_head_t), 303 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0, 304 (caddr_t *)&uhcip->uhci_qh_pool_addr, &real_length, 305 &uhcip->uhci_qh_pool_mem_handle) != DDI_SUCCESS) { 306 307 return (USB_FAILURE); 308 } 309 310 result = ddi_dma_addr_bind_handle(uhcip->uhci_qh_pool_dma_handle, 311 NULL, (caddr_t)uhcip->uhci_qh_pool_addr, real_length, 312 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 313 &uhcip->uhci_qh_pool_cookie, &ccount); 314 315 /* Process the result */ 316 if (result == DDI_DMA_MAPPED) { 317 /* The cookie count should be 1 */ 318 if (ccount != 1) { 319 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 320 "uhci_allocate_pools: More than 1 cookie"); 321 322 return (USB_FAILURE); 323 } 324 } else { 325 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result); 326 327 return (USB_FAILURE); 328 } 329 330 uhcip->uhci_dma_addr_bind_flag |= UHCI_QH_POOL_BOUND; 331 332 bzero((void *)uhcip->uhci_qh_pool_addr, 333 uhci_qh_pool_size * sizeof (queue_head_t)); 334 335 /* Initialize the QH pool */ 336 for (i = 0; i < uhci_qh_pool_size; i ++) { 337 uhcip->uhci_qh_pool_addr[i].qh_flag = QUEUE_HEAD_FLAG_FREE; 338 } 339 340 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 341 "uhci_allocate_pools: Completed"); 342 343 return (USB_SUCCESS); 344 } 345 346 347 /* 348 * uhci_free_pools: 349 * Cleanup on attach failure or detach 350 */ 351 void 352 uhci_free_pools(uhci_state_t *uhcip) 353 { 354 int i, flag, rval; 355 uhci_td_t *td; 356 uhci_trans_wrapper_t *tw; 357 358 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 359 "uhci_free_pools:"); 360 361 if (uhcip->uhci_td_pool_addr && uhcip->uhci_td_pool_mem_handle) { 362 for (i = 0; i < uhci_td_pool_size; i ++) { 363 td = &uhcip->uhci_td_pool_addr[i]; 364 365 flag = uhcip->uhci_td_pool_addr[i].flag; 366 if ((flag != TD_FLAG_FREE) && 367 (flag != TD_FLAG_DUMMY) && (td->tw != NULL)) { 368 tw = td->tw; 369 uhci_free_tw(uhcip, tw); 370 } 371 372 } 373 374 if (uhcip->uhci_dma_addr_bind_flag & UHCI_TD_POOL_BOUND) { 375 rval = ddi_dma_unbind_handle( 376 uhcip->uhci_td_pool_dma_handle); 377 ASSERT(rval == DDI_SUCCESS); 378 } 379 380 ddi_dma_mem_free(&uhcip->uhci_td_pool_mem_handle); 381 } 382 383 /* Free the TD pool */ 384 if (uhcip->uhci_td_pool_dma_handle) { 385 ddi_dma_free_handle(&uhcip->uhci_td_pool_dma_handle); 386 } 387 388 if (uhcip->uhci_qh_pool_addr && uhcip->uhci_qh_pool_mem_handle) { 389 if (uhcip->uhci_dma_addr_bind_flag & UHCI_QH_POOL_BOUND) { 390 rval = ddi_dma_unbind_handle( 391 uhcip->uhci_qh_pool_dma_handle); 392 ASSERT(rval == DDI_SUCCESS); 393 } 394 ddi_dma_mem_free(&uhcip->uhci_qh_pool_mem_handle); 395 } 396 397 /* Free the QH pool */ 398 if (uhcip->uhci_qh_pool_dma_handle) { 399 ddi_dma_free_handle(&uhcip->uhci_qh_pool_dma_handle); 400 } 401 402 /* Free the Frame list Table area */ 403 if (uhcip->uhci_frame_lst_tablep && uhcip->uhci_flt_mem_handle) { 404 if (uhcip->uhci_dma_addr_bind_flag & UHCI_FLA_POOL_BOUND) { 405 rval = ddi_dma_unbind_handle( 406 uhcip->uhci_flt_dma_handle); 407 ASSERT(rval == DDI_SUCCESS); 408 } 409 ddi_dma_mem_free(&uhcip->uhci_flt_mem_handle); 410 } 411 412 if (uhcip->uhci_flt_dma_handle) { 413 ddi_dma_free_handle(&uhcip->uhci_flt_dma_handle); 414 } 415 } 416 417 418 /* 419 * uhci_decode_ddi_dma_addr_bind_handle_result: 420 * Process the return values of ddi_dma_addr_bind_handle() 421 */ 422 void 423 uhci_decode_ddi_dma_addr_bind_handle_result(uhci_state_t *uhcip, int result) 424 { 425 char *msg; 426 427 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 428 "uhci_decode_ddi_dma_addr_bind_handle_result:"); 429 430 switch (result) { 431 case DDI_DMA_PARTIAL_MAP: 432 msg = "Partial transfers not allowed"; 433 break; 434 case DDI_DMA_INUSE: 435 msg = "Handle is in use"; 436 break; 437 case DDI_DMA_NORESOURCES: 438 msg = "No resources"; 439 break; 440 case DDI_DMA_NOMAPPING: 441 msg = "No mapping"; 442 break; 443 case DDI_DMA_TOOBIG: 444 msg = "Object is too big"; 445 break; 446 default: 447 msg = "Unknown dma error"; 448 } 449 450 USB_DPRINTF_L4(PRINT_MASK_ALL, uhcip->uhci_log_hdl, "%s", msg); 451 } 452 453 454 /* 455 * uhci_init_ctlr: 456 * Initialize the Host Controller (HC). 457 */ 458 int 459 uhci_init_ctlr(uhci_state_t *uhcip) 460 { 461 dev_info_t *dip = uhcip->uhci_dip; 462 uint_t cmd_reg; 463 uint_t frame_base_addr; 464 465 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, "uhci_init_ctlr:"); 466 467 /* 468 * When USB legacy mode is enabled, the BIOS manages the USB keyboard 469 * attached to the UHCI controller. It has been observed that some 470 * times the BIOS does not clear the interrupts in the legacy mode 471 * register in the PCI configuration space. So, disable the SMI intrs 472 * and route the intrs to PIRQD here. 473 */ 474 pci_config_put16(uhcip->uhci_config_handle, 475 LEGACYMODE_REG_OFFSET, LEGACYMODE_REG_INIT_VALUE); 476 477 /* 478 * Disable all the interrupts. 479 */ 480 Set_OpReg16(USBINTR, DISABLE_ALL_INTRS); 481 482 mutex_enter(&uhcip->uhci_int_mutex); 483 cmd_reg = Get_OpReg16(USBCMD); 484 cmd_reg &= (~USBCMD_REG_HC_RUN); 485 486 /* Stop the controller */ 487 Set_OpReg16(USBCMD, cmd_reg); 488 489 /* Reset the host controller */ 490 Set_OpReg16(USBCMD, USBCMD_REG_GBL_RESET); 491 492 /* Wait 10ms for reset to complete */ 493 mutex_exit(&uhcip->uhci_int_mutex); 494 delay(drv_usectohz(UHCI_RESET_DELAY)); 495 mutex_enter(&uhcip->uhci_int_mutex); 496 497 Set_OpReg16(USBCMD, 0); 498 499 /* Set the frame number to zero */ 500 Set_OpReg16(FRNUM, 0); 501 502 if (uhcip->uhci_hc_soft_state == UHCI_CTLR_INIT_STATE) { 503 /* Initialize the Frame list base address area */ 504 if (uhci_init_frame_lst_table(dip, uhcip) != USB_SUCCESS) { 505 mutex_exit(&uhcip->uhci_int_mutex); 506 507 return (USB_FAILURE); 508 } 509 } 510 511 /* Save the contents of the Frame Interval Registers */ 512 uhcip->uhci_frame_interval = Get_OpReg8(SOFMOD); 513 514 frame_base_addr = uhcip->uhci_flt_cookie.dmac_address; 515 516 /* Set the Frame list base address */ 517 Set_OpReg32(FRBASEADD, frame_base_addr); 518 519 /* 520 * Begin sending SOFs 521 * Set the Host Controller Functional State to Operational 522 */ 523 cmd_reg = Get_OpReg16(USBCMD); 524 cmd_reg |= (USBCMD_REG_HC_RUN | USBCMD_REG_MAXPKT_64 | 525 USBCMD_REG_CONFIG_FLAG); 526 527 Set_OpReg16(USBCMD, cmd_reg); 528 529 /* 530 * Verify the Command and interrupt enable registers, 531 * a sanity check whether actually initialized or not 532 */ 533 cmd_reg = Get_OpReg16(USBCMD); 534 535 if (!(cmd_reg & (USBCMD_REG_HC_RUN | USBCMD_REG_MAXPKT_64 | 536 USBCMD_REG_CONFIG_FLAG))) { 537 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 538 "uhci_init_ctlr: Controller initialization failed"); 539 mutex_exit(&uhcip->uhci_int_mutex); 540 541 return (USB_FAILURE); 542 } 543 544 /* 545 * Set the ioc bit of the isoc intr td. This enables 546 * the generation of an interrupt for every 1024 frames. 547 */ 548 SetTD_ioc(uhcip, uhcip->uhci_isoc_td, 1); 549 550 /* Set host controller soft state to operational */ 551 uhcip->uhci_hc_soft_state = UHCI_CTLR_OPERATIONAL_STATE; 552 mutex_exit(&uhcip->uhci_int_mutex); 553 554 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 555 "uhci_init_ctlr: Completed"); 556 557 return (USB_SUCCESS); 558 } 559 560 561 /* 562 * uhci_uninit_ctlr: 563 * uninitialize the Host Controller (HC). 564 */ 565 void 566 uhci_uninit_ctlr(uhci_state_t *uhcip) 567 { 568 if (uhcip->uhci_regs_handle) { 569 /* Disable all the interrupts. */ 570 Set_OpReg16(USBINTR, DISABLE_ALL_INTRS); 571 572 /* Complete the current transaction and then halt. */ 573 Set_OpReg16(USBCMD, 0); 574 575 /* Wait for sometime */ 576 mutex_exit(&uhcip->uhci_int_mutex); 577 delay(drv_usectohz(UHCI_TIMEWAIT)); 578 mutex_enter(&uhcip->uhci_int_mutex); 579 } 580 } 581 582 583 /* 584 * uhci_map_regs: 585 * The Host Controller (HC) contains a set of on-chip operational 586 * registers and which should be mapped into a non-cacheable 587 * portion of the system addressable space. 588 */ 589 int 590 uhci_map_regs(uhci_state_t *uhcip) 591 { 592 dev_info_t *dip = uhcip->uhci_dip; 593 int index; 594 uint32_t regs_prop_len; 595 int32_t *regs_list; 596 uint16_t command_reg; 597 ddi_device_acc_attr_t attr; 598 599 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, "uhci_map_regs:"); 600 601 /* The host controller will be little endian */ 602 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 603 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 604 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 605 606 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, uhcip->uhci_dip, 607 DDI_PROP_DONTPASS, "reg", ®s_list, ®s_prop_len) != 608 DDI_PROP_SUCCESS) { 609 610 return (USB_FAILURE); 611 } 612 613 for (index = 0; index * 5 < regs_prop_len; index++) { 614 if (regs_list[index * 5] & UHCI_PROP_MASK) { 615 break; 616 } 617 } 618 619 /* 620 * Deallocate the memory allocated by the ddi_prop_lookup_int_array 621 */ 622 ddi_prop_free(regs_list); 623 624 if (index * 5 >= regs_prop_len) { 625 626 return (USB_FAILURE); 627 } 628 629 /* Map in operational registers */ 630 if (ddi_regs_map_setup(dip, index, (caddr_t *)&uhcip->uhci_regsp, 631 0, sizeof (hc_regs_t), &attr, &uhcip->uhci_regs_handle) != 632 DDI_SUCCESS) { 633 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 634 "ddi_regs_map_setup: failed"); 635 636 return (USB_FAILURE); 637 } 638 639 if (pci_config_setup(dip, &uhcip->uhci_config_handle) != DDI_SUCCESS) { 640 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 641 "uhci_map_regs: Config error"); 642 643 return (USB_FAILURE); 644 } 645 646 /* Make sure Memory Access Enable and Master Enable are set */ 647 command_reg = pci_config_get16(uhcip->uhci_config_handle, 648 PCI_CONF_COMM); 649 if (!(command_reg & (PCI_COMM_MAE | PCI_COMM_ME))) { 650 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 651 "uhci_map_regs: No MAE/ME"); 652 } 653 654 command_reg |= PCI_COMM_MAE | PCI_COMM_ME; 655 pci_config_put16(uhcip->uhci_config_handle, PCI_CONF_COMM, command_reg); 656 657 /* 658 * Check whether I/O base address is configured and enabled. 659 */ 660 if (!(command_reg & PCI_COMM_IO)) { 661 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 662 "I/O Base address access disabled"); 663 664 return (USB_FAILURE); 665 } 666 /* 667 * Get the IO base address of the controller 668 */ 669 uhcip->uhci_iobase = (pci_config_get16(uhcip->uhci_config_handle, 670 PCI_CONF_IOBASE) & PCI_CONF_IOBASE_MASK); 671 672 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 673 "uhci_map_regs: Completed"); 674 675 return (USB_SUCCESS); 676 } 677 678 679 void 680 uhci_unmap_regs(uhci_state_t *uhcip) 681 { 682 /* Unmap the UHCI registers */ 683 if (uhcip->uhci_regs_handle) { 684 /* Reset the host controller */ 685 Set_OpReg16(USBCMD, USBCMD_REG_GBL_RESET); 686 687 ddi_regs_map_free(&uhcip->uhci_regs_handle); 688 } 689 690 if (uhcip->uhci_config_handle) { 691 pci_config_teardown(&uhcip->uhci_config_handle); 692 } 693 } 694 695 696 /* 697 * uhci_set_dma_attributes: 698 * Set the limits in the DMA attributes structure. Most of the values used 699 * in the DMA limit structres are the default values as specified by the 700 * Writing PCI device drivers document. 701 */ 702 void 703 uhci_set_dma_attributes(uhci_state_t *uhcip) 704 { 705 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 706 "uhci_set_dma_attributes:"); 707 708 /* Initialize the DMA attributes */ 709 uhcip->uhci_dma_attr.dma_attr_version = DMA_ATTR_V0; 710 uhcip->uhci_dma_attr.dma_attr_addr_lo = 0x00000000ull; 711 uhcip->uhci_dma_attr.dma_attr_addr_hi = 0xfffffff0ull; 712 713 /* 32 bit addressing */ 714 uhcip->uhci_dma_attr.dma_attr_count_max = 0xffffffull; 715 716 /* 717 * Setting the dam_att_align to 512, some times fails the 718 * binding handle. I dont know why ? But setting to 16 will 719 * be right for our case (16 byte alignment required per 720 * UHCI spec for TD descriptors). 721 */ 722 723 /* 16 byte alignment */ 724 uhcip->uhci_dma_attr.dma_attr_align = 0x10; 725 726 /* 727 * Since PCI specification is byte alignment, the 728 * burstsize field should be set to 1 for PCI devices. 729 */ 730 uhcip->uhci_dma_attr.dma_attr_burstsizes = 0x1; 731 732 uhcip->uhci_dma_attr.dma_attr_minxfer = 0x1; 733 uhcip->uhci_dma_attr.dma_attr_maxxfer = 0xffffffull; 734 uhcip->uhci_dma_attr.dma_attr_seg = 0xffffffffull; 735 uhcip->uhci_dma_attr.dma_attr_sgllen = 1; 736 uhcip->uhci_dma_attr.dma_attr_granular = 1; 737 uhcip->uhci_dma_attr.dma_attr_flags = 0; 738 } 739 740 741 uint_t 742 pow_2(uint_t x) 743 { 744 return ((x == 0) ? 1 : (1 << x)); 745 } 746 747 748 uint_t 749 log_2(uint_t x) 750 { 751 int ret_val = 0; 752 753 while (x != 1) { 754 ret_val++; 755 x = x >> 1; 756 } 757 758 return (ret_val); 759 } 760 761 762 /* 763 * uhci_obtain_state: 764 */ 765 uhci_state_t * 766 uhci_obtain_state(dev_info_t *dip) 767 { 768 int instance = ddi_get_instance(dip); 769 uhci_state_t *state = ddi_get_soft_state(uhci_statep, instance); 770 771 ASSERT(state != NULL); 772 773 return (state); 774 } 775 776 777 /* 778 * uhci_alloc_hcdi_ops: 779 * The HCDI interfaces or entry points are the software interfaces used by 780 * the Universal Serial Bus Driver (USBA) to access the services of the 781 * Host Controller Driver (HCD). During HCD initialization, inform USBA 782 * about all available HCDI interfaces or entry points. 783 */ 784 usba_hcdi_ops_t * 785 uhci_alloc_hcdi_ops(uhci_state_t *uhcip) 786 { 787 usba_hcdi_ops_t *hcdi_ops; 788 789 USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 790 "uhci_alloc_hcdi_ops:"); 791 792 hcdi_ops = usba_alloc_hcdi_ops(); 793 794 hcdi_ops->usba_hcdi_ops_version = HCDI_OPS_VERSION_1; 795 796 hcdi_ops->usba_hcdi_pipe_open = uhci_hcdi_pipe_open; 797 hcdi_ops->usba_hcdi_pipe_close = uhci_hcdi_pipe_close; 798 hcdi_ops->usba_hcdi_pipe_reset = uhci_hcdi_pipe_reset; 799 800 hcdi_ops->usba_hcdi_pipe_ctrl_xfer = uhci_hcdi_pipe_ctrl_xfer; 801 hcdi_ops->usba_hcdi_pipe_bulk_xfer = uhci_hcdi_pipe_bulk_xfer; 802 hcdi_ops->usba_hcdi_pipe_intr_xfer = uhci_hcdi_pipe_intr_xfer; 803 hcdi_ops->usba_hcdi_pipe_isoc_xfer = uhci_hcdi_pipe_isoc_xfer; 804 805 hcdi_ops->usba_hcdi_bulk_transfer_size = uhci_hcdi_bulk_transfer_size; 806 hcdi_ops->usba_hcdi_pipe_stop_intr_polling = 807 uhci_hcdi_pipe_stop_intr_polling; 808 hcdi_ops->usba_hcdi_pipe_stop_isoc_polling = 809 uhci_hcdi_pipe_stop_isoc_polling; 810 811 hcdi_ops->usba_hcdi_get_current_frame_number = 812 uhci_hcdi_get_current_frame_number; 813 hcdi_ops->usba_hcdi_get_max_isoc_pkts = uhci_hcdi_get_max_isoc_pkts; 814 815 hcdi_ops->usba_hcdi_console_input_init = uhci_hcdi_polled_input_init; 816 hcdi_ops->usba_hcdi_console_input_enter = uhci_hcdi_polled_input_enter; 817 hcdi_ops->usba_hcdi_console_read = uhci_hcdi_polled_read; 818 hcdi_ops->usba_hcdi_console_input_exit = uhci_hcdi_polled_input_exit; 819 hcdi_ops->usba_hcdi_console_input_fini = uhci_hcdi_polled_input_fini; 820 821 hcdi_ops->usba_hcdi_console_output_init = uhci_hcdi_polled_output_init; 822 hcdi_ops->usba_hcdi_console_output_enter = 823 uhci_hcdi_polled_output_enter; 824 hcdi_ops->usba_hcdi_console_write = uhci_hcdi_polled_write; 825 hcdi_ops->usba_hcdi_console_output_exit = uhci_hcdi_polled_output_exit; 826 hcdi_ops->usba_hcdi_console_output_fini = uhci_hcdi_polled_output_fini; 827 828 return (hcdi_ops); 829 } 830 831 832 /* 833 * uhci_init_frame_lst_table : 834 * Allocate the system memory and initialize Host Controller 835 * Frame list table area The starting of the Frame list Table 836 * area must be 4096 byte aligned. 837 */ 838 static int 839 uhci_init_frame_lst_table(dev_info_t *dip, uhci_state_t *uhcip) 840 { 841 int result; 842 uint_t ccount; 843 size_t real_length; 844 ddi_device_acc_attr_t dev_attr; 845 846 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 847 848 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 849 "uhci_init_frame_lst_table:"); 850 851 /* The host controller will be little endian */ 852 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 853 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 854 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 855 856 /* 4K alignment required */ 857 uhcip->uhci_dma_attr.dma_attr_align = 0x1000; 858 859 /* Create space for the HCCA block */ 860 if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 861 0, &uhcip->uhci_flt_dma_handle) != DDI_SUCCESS) { 862 863 return (USB_FAILURE); 864 } 865 866 /* Reset to default 16 bytes */ 867 uhcip->uhci_dma_attr.dma_attr_align = 0x10; 868 869 if (ddi_dma_mem_alloc(uhcip->uhci_flt_dma_handle, 870 SIZE_OF_FRAME_LST_TABLE, &dev_attr, DDI_DMA_CONSISTENT, 871 DDI_DMA_SLEEP, 0, (caddr_t *)&uhcip->uhci_frame_lst_tablep, 872 &real_length, &uhcip->uhci_flt_mem_handle)) { 873 874 return (USB_FAILURE); 875 } 876 877 /* Map the whole Frame list base area into the I/O address space */ 878 result = ddi_dma_addr_bind_handle(uhcip->uhci_flt_dma_handle, 879 NULL, (caddr_t)uhcip->uhci_frame_lst_tablep, real_length, 880 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 881 &uhcip->uhci_flt_cookie, &ccount); 882 883 if (result == DDI_DMA_MAPPED) { 884 /* The cookie count should be 1 */ 885 if (ccount != 1) { 886 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 887 "uhci_init_frame_list_table: More than 1 cookie"); 888 889 return (USB_FAILURE); 890 } 891 } else { 892 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result); 893 894 return (USB_FAILURE); 895 } 896 897 uhcip->uhci_dma_addr_bind_flag |= UHCI_FLA_POOL_BOUND; 898 899 bzero((void *)uhcip->uhci_frame_lst_tablep, real_length); 900 901 /* Initialize the interrupt lists */ 902 uhci_build_interrupt_lattice(uhcip); 903 904 return (USB_SUCCESS); 905 } 906 907 908 /* 909 * uhci_alloc_queue_head: 910 * Allocate a queue head 911 */ 912 queue_head_t * 913 uhci_alloc_queue_head(uhci_state_t *uhcip) 914 { 915 int index; 916 uhci_td_t *dummy_td; 917 queue_head_t *queue_head; 918 919 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 920 "uhci_alloc_queue_head"); 921 922 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 923 924 /* Allocate a dummy td first. */ 925 if ((dummy_td = uhci_allocate_td_from_pool(uhcip)) == NULL) { 926 927 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 928 "uhci_alloc_queue_head: allocate td from pool failed"); 929 930 return (NULL); 931 } 932 933 /* 934 * The first 63 queue heads in the Queue Head (QH) 935 * buffer pool are reserved for building interrupt lattice 936 * tree. Search for a blank Queue head in the QH buffer pool. 937 */ 938 for (index = NUM_STATIC_NODES; index < uhci_qh_pool_size; index++) { 939 if (uhcip->uhci_qh_pool_addr[index].qh_flag == 940 QUEUE_HEAD_FLAG_FREE) { 941 break; 942 } 943 } 944 945 USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 946 "uhci_alloc_queue_head: Allocated %d", index); 947 948 if (index == uhci_qh_pool_size) { 949 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 950 "uhci_alloc_queue_head: All QH exhausted"); 951 952 /* Free the dummy td allocated for this qh. */ 953 dummy_td->flag = TD_FLAG_FREE; 954 955 return (NULL); 956 } 957 958 queue_head = &uhcip->uhci_qh_pool_addr[index]; 959 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 960 "uhci_alloc_queue_head: Allocated address 0x%p", queue_head); 961 962 bzero((void *)queue_head, sizeof (queue_head_t)); 963 SetQH32(uhcip, queue_head->link_ptr, HC_END_OF_LIST); 964 SetQH32(uhcip, queue_head->element_ptr, HC_END_OF_LIST); 965 queue_head->prev_qh = NULL; 966 queue_head->qh_flag = QUEUE_HEAD_FLAG_BUSY; 967 968 bzero((char *)dummy_td, sizeof (uhci_td_t)); 969 queue_head->td_tailp = dummy_td; 970 SetQH32(uhcip, queue_head->element_ptr, TD_PADDR(dummy_td)); 971 972 return (queue_head); 973 } 974 975 976 /* 977 * uhci_allocate_bandwidth: 978 * Figure out whether or not this interval may be supported. Return 979 * the index into the lattice if it can be supported. Return 980 * allocation failure if it can not be supported. 981 */ 982 int 983 uhci_allocate_bandwidth( 984 uhci_state_t *uhcip, 985 usba_pipe_handle_data_t *pipe_handle, 986 uint_t *node) 987 { 988 int bandwidth; /* Requested bandwidth */ 989 uint_t min, min_index; 990 uint_t i; 991 uint_t height; /* Bandwidth's height in the tree */ 992 uint_t leftmost; 993 uint_t length; 994 uint32_t paddr; 995 queue_head_t *tmp_qh; 996 usb_ep_descr_t *endpoint = &pipe_handle->p_ep; 997 998 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 999 1000 /* 1001 * Calculate the length in bytes of a transaction on this 1002 * periodic endpoint. 1003 */ 1004 mutex_enter(&pipe_handle->p_usba_device->usb_mutex); 1005 1006 length = uhci_compute_total_bandwidth(endpoint, 1007 pipe_handle->p_usba_device->usb_port_status); 1008 mutex_exit(&pipe_handle->p_usba_device->usb_mutex); 1009 1010 /* 1011 * If the length in bytes plus the allocated bandwidth exceeds 1012 * the maximum, return bandwidth allocation failure. 1013 */ 1014 if ((length + uhcip->uhci_bandwidth_intr_min + 1015 uhcip->uhci_bandwidth_isoch_sum) > (MAX_PERIODIC_BANDWIDTH)) { 1016 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1017 "uhci_allocate_bandwidth: " 1018 "Reached maximum bandwidth value and cannot allocate " 1019 "bandwidth for a given Interrupt/Isoch endpoint"); 1020 1021 return (USB_NO_BANDWIDTH); 1022 } 1023 1024 /* 1025 * ISOC xfers are not supported at this point type 1026 */ 1027 if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_ISOCH) { 1028 uhcip->uhci_bandwidth_isoch_sum += length; 1029 1030 return (USB_SUCCESS); 1031 } 1032 1033 /* 1034 * This is an interrupt endpoint. 1035 * Adjust bandwidth to be a power of 2 1036 */ 1037 mutex_enter(&pipe_handle->p_usba_device->usb_mutex); 1038 bandwidth = uhci_bandwidth_adjust(uhcip, endpoint, 1039 pipe_handle->p_usba_device->usb_port_status); 1040 mutex_exit(&pipe_handle->p_usba_device->usb_mutex); 1041 1042 /* 1043 * If this bandwidth can't be supported, 1044 * return allocation failure. 1045 */ 1046 if (bandwidth == USB_FAILURE) { 1047 1048 return (USB_FAILURE); 1049 } 1050 1051 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1052 "The new bandwidth is %d", bandwidth); 1053 1054 /* Find the leaf with the smallest allocated bandwidth */ 1055 min_index = 0; 1056 min = uhcip->uhci_bandwidth[0]; 1057 1058 for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) { 1059 if (uhcip->uhci_bandwidth[i] < min) { 1060 min_index = i; 1061 min = uhcip->uhci_bandwidth[i]; 1062 } 1063 } 1064 1065 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1066 "The leaf with minimal bandwidth %d, " 1067 "The smallest bandwidth %d", min_index, min); 1068 1069 /* 1070 * Find the index into the lattice given the 1071 * leaf with the smallest allocated bandwidth. 1072 */ 1073 height = uhci_lattice_height(bandwidth); 1074 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1075 "The height is %d", height); 1076 1077 *node = uhci_tree_bottom_nodes[min_index]; 1078 1079 /* check if there are isocs TDs scheduled for this frame */ 1080 if (uhcip->uhci_isoc_q_tailp[*node]) { 1081 paddr = (uhcip->uhci_isoc_q_tailp[*node]->link_ptr & 1082 FRAME_LST_PTR_MASK); 1083 } else { 1084 paddr = (uhcip->uhci_frame_lst_tablep[*node] & 1085 FRAME_LST_PTR_MASK); 1086 } 1087 1088 tmp_qh = QH_VADDR(paddr); 1089 *node = tmp_qh->node; 1090 for (i = 0; i < height; i++) { 1091 *node = uhci_lattice_parent(*node); 1092 } 1093 1094 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1095 "The real node is %d", *node); 1096 1097 /* 1098 * Find the leftmost leaf in the subtree specified by the node. 1099 */ 1100 leftmost = uhci_leftmost_leaf(*node, height); 1101 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1102 "Leftmost %d", leftmost); 1103 1104 for (i = leftmost; i < leftmost + 1105 (NUM_FRAME_LST_ENTRIES/bandwidth); i ++) { 1106 1107 if ((length + uhcip->uhci_bandwidth_isoch_sum + 1108 uhcip->uhci_bandwidth[i]) > MAX_PERIODIC_BANDWIDTH) { 1109 1110 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1111 "uhci_allocate_bandwidth: " 1112 "Reached maximum bandwidth value and cannot " 1113 "allocate bandwidth for Interrupt endpoint"); 1114 1115 return (USB_NO_BANDWIDTH); 1116 } 1117 } 1118 1119 /* 1120 * All the leaves for this node must be updated with the bandwidth. 1121 */ 1122 for (i = leftmost; i < leftmost + 1123 (NUM_FRAME_LST_ENTRIES/bandwidth); i ++) { 1124 uhcip->uhci_bandwidth[i] += length; 1125 } 1126 1127 /* Find the leaf with the smallest allocated bandwidth */ 1128 min_index = 0; 1129 min = uhcip->uhci_bandwidth[0]; 1130 1131 for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) { 1132 if (uhcip->uhci_bandwidth[i] < min) { 1133 min_index = i; 1134 min = uhcip->uhci_bandwidth[i]; 1135 } 1136 } 1137 1138 /* Save the minimum for later use */ 1139 uhcip->uhci_bandwidth_intr_min = min; 1140 1141 return (USB_SUCCESS); 1142 } 1143 1144 1145 /* 1146 * uhci_deallocate_bandwidth: 1147 * Deallocate bandwidth for the given node in the lattice 1148 * and the length of transfer. 1149 */ 1150 void 1151 uhci_deallocate_bandwidth(uhci_state_t *uhcip, 1152 usba_pipe_handle_data_t *pipe_handle) 1153 { 1154 uint_t bandwidth; 1155 uint_t height; 1156 uint_t leftmost; 1157 uint_t i; 1158 uint_t min; 1159 usb_ep_descr_t *endpoint = &pipe_handle->p_ep; 1160 uint_t node, length; 1161 uhci_pipe_private_t *pp = 1162 (uhci_pipe_private_t *)pipe_handle->p_hcd_private; 1163 1164 /* This routine is protected by the uhci_int_mutex */ 1165 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1166 1167 /* Obtain the length */ 1168 mutex_enter(&pipe_handle->p_usba_device->usb_mutex); 1169 length = uhci_compute_total_bandwidth(endpoint, 1170 pipe_handle->p_usba_device->usb_port_status); 1171 mutex_exit(&pipe_handle->p_usba_device->usb_mutex); 1172 1173 /* 1174 * If this is an isochronous endpoint, just delete endpoint's 1175 * bandwidth from the total allocated isochronous bandwidth. 1176 */ 1177 if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_ISOCH) { 1178 uhcip->uhci_bandwidth_isoch_sum -= length; 1179 1180 return; 1181 } 1182 1183 /* Obtain the node */ 1184 node = pp->pp_node; 1185 1186 /* Adjust bandwidth to be a power of 2 */ 1187 mutex_enter(&pipe_handle->p_usba_device->usb_mutex); 1188 bandwidth = uhci_bandwidth_adjust(uhcip, endpoint, 1189 pipe_handle->p_usba_device->usb_port_status); 1190 mutex_exit(&pipe_handle->p_usba_device->usb_mutex); 1191 1192 /* Find the height in the tree */ 1193 height = uhci_lattice_height(bandwidth); 1194 1195 /* 1196 * Find the leftmost leaf in the subtree specified by the node 1197 */ 1198 leftmost = uhci_leftmost_leaf(node, height); 1199 1200 /* Delete the bandwith from the appropriate lists */ 1201 for (i = leftmost; i < leftmost + (NUM_FRAME_LST_ENTRIES/bandwidth); 1202 i ++) { 1203 uhcip->uhci_bandwidth[i] -= length; 1204 } 1205 1206 min = uhcip->uhci_bandwidth[0]; 1207 1208 /* Recompute the minimum */ 1209 for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) { 1210 if (uhcip->uhci_bandwidth[i] < min) { 1211 min = uhcip->uhci_bandwidth[i]; 1212 } 1213 } 1214 1215 /* Save the minimum for later use */ 1216 uhcip->uhci_bandwidth_intr_min = min; 1217 } 1218 1219 1220 /* 1221 * uhci_compute_total_bandwidth: 1222 * 1223 * Given a periodic endpoint (interrupt or isochronous) determine the total 1224 * bandwidth for one transaction. The UHCI host controller traverses the 1225 * endpoint descriptor lists on a first-come-first-serve basis. When the HC 1226 * services an endpoint, only a single transaction attempt is made. The HC 1227 * moves to the next Endpoint Descriptor after the first transaction attempt 1228 * rather than finishing the entire Transfer Descriptor. Therefore, when a 1229 * Transfer Descriptor is inserted into the lattice, we will only count the 1230 * number of bytes for one transaction. 1231 * 1232 * The following are the formulas used for calculating bandwidth in terms 1233 * bytes and it is for the single USB full speed and low speed transaction 1234 * respectively. The protocol overheads will be different for each of type 1235 * of USB transfer and all these formulas & protocol overheads are derived 1236 * from the 5.9.3 section of USB Specification & with the help of Bandwidth 1237 * Analysis white paper which is posted on the USB developer forum. 1238 * 1239 * Full-Speed: 1240 * Protocol overhead + ((MaxPacketSize * 7)/6 ) + Host_Delay 1241 * 1242 * Low-Speed: 1243 * Protocol overhead + Hub LS overhead + 1244 * (Low-Speed clock * ((MaxPacketSize * 7)/6 )) + Host_Delay 1245 */ 1246 static uint_t 1247 uhci_compute_total_bandwidth(usb_ep_descr_t *endpoint, 1248 usb_port_status_t port_status) 1249 { 1250 uint_t bandwidth; 1251 ushort_t MaxPacketSize = endpoint->wMaxPacketSize; 1252 1253 /* Add Host Controller specific delay to required bandwidth */ 1254 bandwidth = HOST_CONTROLLER_DELAY; 1255 1256 /* Add bit-stuffing overhead */ 1257 MaxPacketSize = (ushort_t)((MaxPacketSize * 7) / 6); 1258 1259 /* Low Speed interrupt transaction */ 1260 if (port_status == USBA_LOW_SPEED_DEV) { 1261 /* Low Speed interrupt transaction */ 1262 bandwidth += (LOW_SPEED_PROTO_OVERHEAD + 1263 HUB_LOW_SPEED_PROTO_OVERHEAD + 1264 (LOW_SPEED_CLOCK * MaxPacketSize)); 1265 } else { 1266 /* Full Speed transaction */ 1267 bandwidth += MaxPacketSize; 1268 1269 if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_INTR) { 1270 /* Full Speed interrupt transaction */ 1271 bandwidth += FS_NON_ISOC_PROTO_OVERHEAD; 1272 } else { 1273 /* Isochronus and input transaction */ 1274 if (UHCI_XFER_DIR(endpoint) == USB_EP_DIR_IN) { 1275 bandwidth += FS_ISOC_INPUT_PROTO_OVERHEAD; 1276 } else { 1277 /* Isochronus and output transaction */ 1278 bandwidth += FS_ISOC_OUTPUT_PROTO_OVERHEAD; 1279 } 1280 } 1281 } 1282 1283 return (bandwidth); 1284 } 1285 1286 1287 /* 1288 * uhci_bandwidth_adjust: 1289 */ 1290 static int 1291 uhci_bandwidth_adjust( 1292 uhci_state_t *uhcip, 1293 usb_ep_descr_t *endpoint, 1294 usb_port_status_t port_status) 1295 { 1296 int i = 0; 1297 uint_t interval; 1298 1299 /* 1300 * Get the polling interval from the endpoint descriptor 1301 */ 1302 interval = endpoint->bInterval; 1303 1304 /* 1305 * The bInterval value in the endpoint descriptor can range 1306 * from 1 to 255ms. The interrupt lattice has 32 leaf nodes, 1307 * and the host controller cycles through these nodes every 1308 * 32ms. The longest polling interval that the controller 1309 * supports is 32ms. 1310 */ 1311 1312 /* 1313 * Return an error if the polling interval is less than 1ms 1314 * and greater than 255ms 1315 */ 1316 if ((interval < MIN_POLL_INTERVAL) || (interval > MAX_POLL_INTERVAL)) { 1317 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1318 "uhci_bandwidth_adjust: Endpoint's poll interval must be " 1319 "between %d and %d ms", MIN_POLL_INTERVAL, 1320 MAX_POLL_INTERVAL); 1321 1322 return (USB_FAILURE); 1323 } 1324 1325 /* 1326 * According USB Specifications, a full-speed endpoint can 1327 * specify a desired polling interval 1ms to 255ms and a low 1328 * speed endpoints are limited to specifying only 10ms to 1329 * 255ms. But some old keyboards & mice uses polling interval 1330 * of 8ms. For compatibility purpose, we are using polling 1331 * interval between 8ms & 255ms for low speed endpoints. 1332 */ 1333 if ((port_status == USBA_LOW_SPEED_DEV) && 1334 (interval < MIN_LOW_SPEED_POLL_INTERVAL)) { 1335 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1336 "uhci_bandwidth_adjust: Low speed endpoint's poll interval " 1337 "must be >= %d ms, adjusted", 1338 MIN_LOW_SPEED_POLL_INTERVAL); 1339 1340 interval = MIN_LOW_SPEED_POLL_INTERVAL; 1341 } 1342 1343 /* 1344 * If polling interval is greater than 32ms, 1345 * adjust polling interval equal to 32ms. 1346 */ 1347 if (interval > 32) { 1348 interval = 32; 1349 } 1350 1351 /* 1352 * Find the nearest power of 2 that's less 1353 * than interval. 1354 */ 1355 while ((pow_2(i)) <= interval) { 1356 i++; 1357 } 1358 1359 return (pow_2((i - 1))); 1360 } 1361 1362 1363 /* 1364 * uhci_lattice_height: 1365 * Given the requested bandwidth, find the height in the tree at 1366 * which the nodes for this bandwidth fall. The height is measured 1367 * as the number of nodes from the leaf to the level specified by 1368 * bandwidth The root of the tree is at height TREE_HEIGHT. 1369 */ 1370 static uint_t 1371 uhci_lattice_height(uint_t bandwidth) 1372 { 1373 return (TREE_HEIGHT - (log_2(bandwidth))); 1374 } 1375 1376 1377 static uint_t 1378 uhci_lattice_parent(uint_t node) 1379 { 1380 return (((node % 2) == 0) ? ((node/2) - 1) : (node/2)); 1381 } 1382 1383 1384 /* 1385 * uhci_leftmost_leaf: 1386 * Find the leftmost leaf in the subtree specified by the node. 1387 * Height refers to number of nodes from the bottom of the tree 1388 * to the node, including the node. 1389 */ 1390 static uint_t 1391 uhci_leftmost_leaf(uint_t node, uint_t height) 1392 { 1393 node = pow_2(height + VIRTUAL_TREE_HEIGHT) * (node+1) - 1394 NUM_FRAME_LST_ENTRIES; 1395 return (node); 1396 } 1397 1398 1399 /* 1400 * uhci_insert_qh: 1401 * Add the Queue Head (QH) into the Host Controller's (HC) 1402 * appropriate queue head list. 1403 */ 1404 void 1405 uhci_insert_qh(uhci_state_t *uhcip, usba_pipe_handle_data_t *ph) 1406 { 1407 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 1408 1409 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1410 "uhci_insert_qh:"); 1411 1412 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1413 1414 switch (UHCI_XFER_TYPE(&ph->p_ep)) { 1415 case USB_EP_ATTR_CONTROL: 1416 uhci_insert_ctrl_qh(uhcip, pp); 1417 break; 1418 case USB_EP_ATTR_BULK: 1419 uhci_insert_bulk_qh(uhcip, pp); 1420 break; 1421 case USB_EP_ATTR_INTR: 1422 uhci_insert_intr_qh(uhcip, pp); 1423 break; 1424 case USB_EP_ATTR_ISOCH: 1425 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 1426 "uhci_insert_qh: Illegal request"); 1427 break; 1428 } 1429 } 1430 1431 1432 /* 1433 * uhci_insert_ctrl_qh: 1434 * Insert a control QH into the Host Controller's (HC) control QH list. 1435 */ 1436 static void 1437 uhci_insert_ctrl_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 1438 { 1439 queue_head_t *qh = pp->pp_qh; 1440 1441 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1442 "uhci_insert_ctrl_qh:"); 1443 1444 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1445 1446 if (uhcip->uhci_ctrl_xfers_q_head == uhcip->uhci_ctrl_xfers_q_tail) { 1447 uhcip->uhci_ctrl_xfers_q_head->prev_qh = UHCI_INVALID_PTR; 1448 } 1449 1450 SetQH32(uhcip, qh->link_ptr, 1451 GetQH32(uhcip, uhcip->uhci_ctrl_xfers_q_tail->link_ptr)); 1452 qh->prev_qh = uhcip->uhci_ctrl_xfers_q_tail; 1453 SetQH32(uhcip, uhcip->uhci_ctrl_xfers_q_tail->link_ptr, 1454 QH_PADDR(qh) | HC_QUEUE_HEAD); 1455 uhcip->uhci_ctrl_xfers_q_tail = qh; 1456 1457 } 1458 1459 1460 /* 1461 * uhci_insert_bulk_qh: 1462 * Insert a bulk QH into the Host Controller's (HC) bulk QH list. 1463 */ 1464 static void 1465 uhci_insert_bulk_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 1466 { 1467 queue_head_t *qh = pp->pp_qh; 1468 1469 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1470 "uhci_insert_bulk_qh:"); 1471 1472 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1473 1474 if (uhcip->uhci_bulk_xfers_q_head == uhcip->uhci_bulk_xfers_q_tail) { 1475 uhcip->uhci_bulk_xfers_q_head->prev_qh = UHCI_INVALID_PTR; 1476 } else if (uhcip->uhci_bulk_xfers_q_head->link_ptr == 1477 uhcip->uhci_bulk_xfers_q_tail->link_ptr) { 1478 1479 /* If there is already a loop, we should keep the loop. */ 1480 qh->link_ptr = uhcip->uhci_bulk_xfers_q_tail->link_ptr; 1481 } 1482 1483 qh->prev_qh = uhcip->uhci_bulk_xfers_q_tail; 1484 SetQH32(uhcip, uhcip->uhci_bulk_xfers_q_tail->link_ptr, 1485 QH_PADDR(qh) | HC_QUEUE_HEAD); 1486 uhcip->uhci_bulk_xfers_q_tail = qh; 1487 } 1488 1489 1490 /* 1491 * uhci_insert_intr_qh: 1492 * Insert a periodic Queue head i.e Interrupt queue head into the 1493 * Host Controller's (HC) interrupt lattice tree. 1494 */ 1495 static void 1496 uhci_insert_intr_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 1497 { 1498 uint_t node = pp->pp_node; /* The appropriate node was */ 1499 /* found during the opening */ 1500 /* of the pipe. */ 1501 queue_head_t *qh = pp->pp_qh; 1502 queue_head_t *next_lattice_qh, *lattice_qh; 1503 1504 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1505 "uhci_insert_intr_qh:"); 1506 1507 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1508 1509 /* Find the lattice queue head */ 1510 lattice_qh = &uhcip->uhci_qh_pool_addr[node]; 1511 next_lattice_qh = 1512 QH_VADDR(GetQH32(uhcip, lattice_qh->link_ptr) & QH_LINK_PTR_MASK); 1513 1514 next_lattice_qh->prev_qh = qh; 1515 qh->link_ptr = lattice_qh->link_ptr; 1516 qh->prev_qh = lattice_qh; 1517 SetQH32(uhcip, lattice_qh->link_ptr, QH_PADDR(qh) | HC_QUEUE_HEAD); 1518 pp->pp_data_toggle = 0; 1519 } 1520 1521 1522 /* 1523 * uhci_insert_intr_td: 1524 * Create a TD and a data buffer for an interrupt endpoint. 1525 */ 1526 int 1527 uhci_insert_intr_td( 1528 uhci_state_t *uhcip, 1529 usba_pipe_handle_data_t *ph, 1530 usb_intr_req_t *req, 1531 usb_flags_t flags) 1532 { 1533 int error, pipe_dir; 1534 uint_t length, mps; 1535 uint32_t buf_offs; 1536 uhci_td_t *tmp_td; 1537 usb_intr_req_t *intr_reqp; 1538 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 1539 uhci_trans_wrapper_t *tw; 1540 1541 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1542 "uhci_insert_intr_td: req: 0x%p", req); 1543 1544 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1545 1546 /* Get the interrupt pipe direction */ 1547 pipe_dir = UHCI_XFER_DIR(&ph->p_ep); 1548 1549 /* Get the current interrupt request pointer */ 1550 if (req) { 1551 length = req->intr_len; 1552 } else { 1553 ASSERT(pipe_dir == USB_EP_DIR_IN); 1554 length = (pp->pp_client_periodic_in_reqp) ? 1555 (((usb_intr_req_t *)pp-> 1556 pp_client_periodic_in_reqp)->intr_len) : 1557 ph->p_ep.wMaxPacketSize; 1558 } 1559 1560 /* Check the size of interrupt request */ 1561 if (length > UHCI_MAX_TD_XFER_SIZE) { 1562 1563 /* the length shouldn't exceed 8K */ 1564 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1565 "uhci_insert_intr_td: Intr request size 0x%lx is " 1566 "more than 0x%x", length, UHCI_MAX_TD_XFER_SIZE); 1567 1568 return (USB_INVALID_REQUEST); 1569 } 1570 1571 USB_DPRINTF_L3(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1572 "uhci_insert_intr_td: length: 0x%lx", length); 1573 1574 /* Allocate a transaction wrapper */ 1575 if ((tw = uhci_create_transfer_wrapper(uhcip, pp, length, flags)) == 1576 NULL) { 1577 1578 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1579 "uhci_insert_intr_td: TW allocation failed"); 1580 1581 return (USB_NO_RESOURCES); 1582 } 1583 1584 /* 1585 * Initialize the callback and any callback 1586 * data for when the td completes. 1587 */ 1588 tw->tw_handle_td = uhci_handle_intr_td; 1589 tw->tw_handle_callback_value = NULL; 1590 tw->tw_direction = (pipe_dir == USB_EP_DIR_OUT) ? 1591 PID_OUT : PID_IN; 1592 tw->tw_curr_xfer_reqp = (usb_opaque_t)req; 1593 1594 /* 1595 * If it is an Interrupt IN request and interrupt request is NULL, 1596 * allocate the usb interrupt request structure for the current 1597 * interrupt polling request. 1598 */ 1599 if (tw->tw_direction == PID_IN) { 1600 if ((error = uhci_allocate_periodic_in_resource(uhcip, 1601 pp, tw, flags)) != USB_SUCCESS) { 1602 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1603 "uhci_insert_intr_td: Interrupt request structure " 1604 "allocation failed"); 1605 1606 /* free the transfer wrapper */ 1607 uhci_deallocate_tw(uhcip, pp, tw); 1608 1609 return (error); 1610 } 1611 } 1612 1613 intr_reqp = (usb_intr_req_t *)tw->tw_curr_xfer_reqp; 1614 ASSERT(tw->tw_curr_xfer_reqp != NULL); 1615 1616 tw->tw_timeout_cnt = (intr_reqp->intr_attributes & USB_ATTRS_ONE_XFER) ? 1617 intr_reqp->intr_timeout : 0; 1618 1619 /* DATA IN */ 1620 if (tw->tw_direction == PID_IN) { 1621 /* Insert the td onto the queue head */ 1622 error = uhci_insert_hc_td(uhcip, 0, 1623 length, pp, tw, PID_IN, intr_reqp->intr_attributes); 1624 1625 if (error != USB_SUCCESS) { 1626 1627 uhci_deallocate_periodic_in_resource(uhcip, pp, tw); 1628 /* free the transfer wrapper */ 1629 uhci_deallocate_tw(uhcip, pp, tw); 1630 1631 return (USB_NO_RESOURCES); 1632 } 1633 tw->tw_bytes_xfered = 0; 1634 1635 return (USB_SUCCESS); 1636 } 1637 1638 if (req->intr_len) { 1639 /* DATA OUT */ 1640 ASSERT(req->intr_data != NULL); 1641 1642 /* Copy the data into the message */ 1643 ddi_rep_put8(tw->tw_accesshandle, req->intr_data->b_rptr, 1644 (uint8_t *)tw->tw_buf, req->intr_len, DDI_DEV_AUTOINCR); 1645 } 1646 1647 /* set tw->tw_claim flag, so that nobody else works on this tw. */ 1648 tw->tw_claim = UHCI_INTR_HDLR_CLAIMED; 1649 1650 mps = ph->p_ep.wMaxPacketSize; 1651 buf_offs = 0; 1652 1653 /* Insert tds onto the queue head */ 1654 while (length > 0) { 1655 1656 error = uhci_insert_hc_td(uhcip, buf_offs, 1657 (length > mps) ? mps : length, 1658 pp, tw, PID_OUT, 1659 intr_reqp->intr_attributes); 1660 1661 if (error != USB_SUCCESS) { 1662 /* no resource. */ 1663 break; 1664 } 1665 1666 if (length <= mps) { 1667 /* inserted all data. */ 1668 length = 0; 1669 1670 } else { 1671 1672 buf_offs += mps; 1673 length -= mps; 1674 } 1675 } 1676 1677 if (error != USB_SUCCESS) { 1678 1679 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 1680 "uhci_insert_intr_td: allocate td failed, free resource"); 1681 1682 /* remove all the tds */ 1683 while (tw->tw_hctd_head != NULL) { 1684 uhci_delete_td(uhcip, tw->tw_hctd_head); 1685 } 1686 1687 tw->tw_claim = UHCI_NOT_CLAIMED; 1688 uhci_deallocate_tw(uhcip, pp, tw); 1689 1690 return (error); 1691 } 1692 1693 /* allow HC to xfer the tds of this tw */ 1694 tmp_td = tw->tw_hctd_head; 1695 while (tmp_td != NULL) { 1696 1697 SetTD_status(uhcip, tmp_td, UHCI_TD_ACTIVE); 1698 tmp_td = tmp_td->tw_td_next; 1699 } 1700 1701 tw->tw_bytes_xfered = 0; 1702 tw->tw_claim = UHCI_NOT_CLAIMED; 1703 1704 return (error); 1705 } 1706 1707 1708 /* 1709 * uhci_create_transfer_wrapper: 1710 * Create a Transaction Wrapper (TW) for non-isoc transfer types. 1711 * This involves the allocating of DMA resources. 1712 * 1713 * For non-isoc transfers, one DMA handle and one DMA buffer are 1714 * allocated per transfer. The DMA buffer may contain multiple 1715 * DMA cookies and the cookies should meet certain alignment 1716 * requirement to be able to fit in the multiple TDs. The alignment 1717 * needs to ensure: 1718 * 1. the size of a cookie be larger than max TD length (0x500) 1719 * 2. the size of a cookie be a multiple of wMaxPacketSize of the 1720 * ctrl/bulk pipes 1721 * 1722 * wMaxPacketSize for ctrl and bulk pipes may be 8, 16, 32 or 64 bytes. 1723 * So the alignment should be a multiple of 64. wMaxPacketSize for intr 1724 * pipes is a little different since it only specifies the max to be 1725 * 64 bytes, but as long as an intr transfer is limited to max TD length, 1726 * any alignment can work if the cookie size is larger than max TD length. 1727 * 1728 * Considering the above conditions, 2K alignment is used. 4K alignment 1729 * should also be fine. 1730 */ 1731 static uhci_trans_wrapper_t * 1732 uhci_create_transfer_wrapper( 1733 uhci_state_t *uhcip, 1734 uhci_pipe_private_t *pp, 1735 size_t length, 1736 usb_flags_t usb_flags) 1737 { 1738 size_t real_length; 1739 uhci_trans_wrapper_t *tw; 1740 ddi_device_acc_attr_t dev_attr; 1741 ddi_dma_attr_t dma_attr; 1742 int kmem_flag; 1743 int (*dmamem_wait)(caddr_t); 1744 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 1745 1746 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1747 "uhci_create_transfer_wrapper: length = 0x%lx flags = 0x%x", 1748 length, usb_flags); 1749 1750 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1751 1752 /* isochronous pipe should not call into this function */ 1753 if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_ISOCH) { 1754 1755 return (NULL); 1756 } 1757 1758 /* SLEEP flag should not be used in interrupt context */ 1759 if (servicing_interrupt()) { 1760 kmem_flag = KM_NOSLEEP; 1761 dmamem_wait = DDI_DMA_DONTWAIT; 1762 } else { 1763 kmem_flag = KM_SLEEP; 1764 dmamem_wait = DDI_DMA_SLEEP; 1765 } 1766 1767 /* Allocate space for the transfer wrapper */ 1768 if ((tw = kmem_zalloc(sizeof (uhci_trans_wrapper_t), kmem_flag)) == 1769 NULL) { 1770 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1771 "uhci_create_transfer_wrapper: kmem_alloc failed"); 1772 1773 return (NULL); 1774 } 1775 1776 /* zero-length packet doesn't need to allocate dma memory */ 1777 if (length == 0) { 1778 1779 goto dmadone; 1780 } 1781 1782 /* allow sg lists for transfer wrapper dma memory */ 1783 bcopy(&uhcip->uhci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t)); 1784 dma_attr.dma_attr_sgllen = UHCI_DMA_ATTR_SGLLEN; 1785 dma_attr.dma_attr_align = UHCI_DMA_ATTR_ALIGN; 1786 1787 /* Store the transfer length */ 1788 tw->tw_length = length; 1789 1790 /* Allocate the DMA handle */ 1791 if (ddi_dma_alloc_handle(uhcip->uhci_dip, &dma_attr, dmamem_wait, 1792 0, &tw->tw_dmahandle) != DDI_SUCCESS) { 1793 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1794 "uhci_create_transfer_wrapper: Alloc handle failed"); 1795 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 1796 1797 return (NULL); 1798 } 1799 1800 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 1801 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 1802 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1803 1804 /* Allocate the memory */ 1805 if (ddi_dma_mem_alloc(tw->tw_dmahandle, tw->tw_length, &dev_attr, 1806 DDI_DMA_CONSISTENT, dmamem_wait, NULL, (caddr_t *)&tw->tw_buf, 1807 &real_length, &tw->tw_accesshandle) != DDI_SUCCESS) { 1808 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1809 "uhci_create_transfer_wrapper: dma_mem_alloc fail"); 1810 ddi_dma_free_handle(&tw->tw_dmahandle); 1811 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 1812 1813 return (NULL); 1814 } 1815 1816 ASSERT(real_length >= length); 1817 1818 /* Bind the handle */ 1819 if (ddi_dma_addr_bind_handle(tw->tw_dmahandle, NULL, 1820 (caddr_t)tw->tw_buf, real_length, DDI_DMA_RDWR|DDI_DMA_CONSISTENT, 1821 dmamem_wait, NULL, &tw->tw_cookie, &tw->tw_ncookies) != 1822 DDI_DMA_MAPPED) { 1823 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1824 "uhci_create_transfer_wrapper: Bind handle failed"); 1825 ddi_dma_mem_free(&tw->tw_accesshandle); 1826 ddi_dma_free_handle(&tw->tw_dmahandle); 1827 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 1828 1829 return (NULL); 1830 } 1831 1832 tw->tw_cookie_idx = 0; 1833 tw->tw_dma_offs = 0; 1834 1835 dmadone: 1836 /* 1837 * Only allow one wrapper to be added at a time. Insert the 1838 * new transaction wrapper into the list for this pipe. 1839 */ 1840 if (pp->pp_tw_head == NULL) { 1841 pp->pp_tw_head = tw; 1842 pp->pp_tw_tail = tw; 1843 } else { 1844 pp->pp_tw_tail->tw_next = tw; 1845 pp->pp_tw_tail = tw; 1846 ASSERT(tw->tw_next == NULL); 1847 } 1848 1849 /* Store a back pointer to the pipe private structure */ 1850 tw->tw_pipe_private = pp; 1851 1852 /* Store the transfer type - synchronous or asynchronous */ 1853 tw->tw_flags = usb_flags; 1854 1855 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1856 "uhci_create_transfer_wrapper: tw = 0x%p, ncookies = %u", 1857 tw, tw->tw_ncookies); 1858 1859 return (tw); 1860 } 1861 1862 1863 /* 1864 * uhci_insert_hc_td: 1865 * Insert a Transfer Descriptor (TD) on an QH. 1866 */ 1867 int 1868 uhci_insert_hc_td( 1869 uhci_state_t *uhcip, 1870 uint32_t buffer_offset, 1871 size_t hcgtd_length, 1872 uhci_pipe_private_t *pp, 1873 uhci_trans_wrapper_t *tw, 1874 uchar_t PID, 1875 usb_req_attrs_t attrs) 1876 { 1877 uhci_td_t *td, *current_dummy; 1878 queue_head_t *qh = pp->pp_qh; 1879 1880 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1881 1882 if ((td = uhci_allocate_td_from_pool(uhcip)) == NULL) { 1883 1884 return (USB_NO_RESOURCES); 1885 } 1886 1887 current_dummy = qh->td_tailp; 1888 1889 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 1890 "uhci_insert_hc_td: td %p, attrs = 0x%x", td, attrs); 1891 1892 /* 1893 * Fill in the current dummy td and 1894 * add the new dummy to the end. 1895 */ 1896 uhci_fill_in_td(uhcip, td, current_dummy, buffer_offset, 1897 hcgtd_length, pp, PID, attrs, tw); 1898 1899 /* 1900 * Allow HC hardware xfer the td, except interrupt out td. 1901 */ 1902 if ((tw->tw_handle_td != uhci_handle_intr_td) || (PID != PID_OUT)) { 1903 1904 SetTD_status(uhcip, current_dummy, UHCI_TD_ACTIVE); 1905 } 1906 1907 /* Insert this td onto the tw */ 1908 1909 if (tw->tw_hctd_head == NULL) { 1910 ASSERT(tw->tw_hctd_tail == NULL); 1911 tw->tw_hctd_head = current_dummy; 1912 tw->tw_hctd_tail = current_dummy; 1913 } else { 1914 /* Add the td to the end of the list */ 1915 tw->tw_hctd_tail->tw_td_next = current_dummy; 1916 tw->tw_hctd_tail = current_dummy; 1917 } 1918 1919 /* 1920 * Insert the TD on to the QH. When this occurs, 1921 * the Host Controller will see the newly filled in TD 1922 */ 1923 current_dummy->outst_td_next = NULL; 1924 current_dummy->outst_td_prev = uhcip->uhci_outst_tds_tail; 1925 if (uhcip->uhci_outst_tds_head == NULL) { 1926 uhcip->uhci_outst_tds_head = current_dummy; 1927 } else { 1928 uhcip->uhci_outst_tds_tail->outst_td_next = current_dummy; 1929 } 1930 uhcip->uhci_outst_tds_tail = current_dummy; 1931 current_dummy->tw = tw; 1932 1933 return (USB_SUCCESS); 1934 } 1935 1936 1937 /* 1938 * uhci_fill_in_td: 1939 * Fill in the fields of a Transfer Descriptor (TD). 1940 */ 1941 static void 1942 uhci_fill_in_td( 1943 uhci_state_t *uhcip, 1944 uhci_td_t *td, 1945 uhci_td_t *current_dummy, 1946 uint32_t buffer_offset, 1947 size_t length, 1948 uhci_pipe_private_t *pp, 1949 uchar_t PID, 1950 usb_req_attrs_t attrs, 1951 uhci_trans_wrapper_t *tw) 1952 { 1953 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 1954 uint32_t buf_addr; 1955 1956 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 1957 "uhci_fill_in_td: td 0x%p buf_offs 0x%x len 0x%lx " 1958 "attrs 0x%x", td, buffer_offset, length, attrs); 1959 1960 /* 1961 * If this is an isochronous TD, just return 1962 */ 1963 if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_ISOCH) { 1964 1965 return; 1966 } 1967 1968 /* The maximum transfer length of UHCI cannot exceed 0x500 bytes */ 1969 ASSERT(length <= UHCI_MAX_TD_XFER_SIZE); 1970 1971 bzero((char *)td, sizeof (uhci_td_t)); /* Clear the TD */ 1972 SetTD32(uhcip, current_dummy->link_ptr, TD_PADDR(td)); 1973 1974 if (attrs & USB_ATTRS_SHORT_XFER_OK) { 1975 SetTD_spd(uhcip, current_dummy, 1); 1976 } 1977 1978 mutex_enter(&ph->p_usba_device->usb_mutex); 1979 if (ph->p_usba_device->usb_port_status == USBA_LOW_SPEED_DEV) { 1980 SetTD_ls(uhcip, current_dummy, LOW_SPEED_DEVICE); 1981 } 1982 1983 SetTD_c_err(uhcip, current_dummy, UHCI_MAX_ERR_COUNT); 1984 SetTD_mlen(uhcip, current_dummy, 1985 (length == 0) ? ZERO_LENGTH : (length - 1)); 1986 SetTD_dtogg(uhcip, current_dummy, pp->pp_data_toggle); 1987 1988 /* Adjust the data toggle bit */ 1989 ADJ_DATA_TOGGLE(pp); 1990 1991 SetTD_devaddr(uhcip, current_dummy, ph->p_usba_device->usb_addr); 1992 SetTD_endpt(uhcip, current_dummy, 1993 ph->p_ep.bEndpointAddress & END_POINT_ADDRESS_MASK); 1994 SetTD_PID(uhcip, current_dummy, PID); 1995 SetTD_ioc(uhcip, current_dummy, INTERRUPT_ON_COMPLETION); 1996 1997 buf_addr = uhci_get_tw_paddr_by_offs(uhcip, buffer_offset, length, tw); 1998 SetTD32(uhcip, current_dummy->buffer_address, buf_addr); 1999 2000 td->qh_td_prev = current_dummy; 2001 current_dummy->qh_td_prev = NULL; 2002 pp->pp_qh->td_tailp = td; 2003 mutex_exit(&ph->p_usba_device->usb_mutex); 2004 } 2005 2006 /* 2007 * uhci_get_tw_paddr_by_offs: 2008 * Walk through the DMA cookies of a TW buffer to retrieve 2009 * the device address used for a TD. 2010 * 2011 * buffer_offset - the starting offset into the TW buffer, where the 2012 * TD should transfer from. When a TW has more than 2013 * one TD, the TDs must be filled in increasing order. 2014 */ 2015 static uint32_t 2016 uhci_get_tw_paddr_by_offs( 2017 uhci_state_t *uhcip, 2018 uint32_t buffer_offset, 2019 size_t length, 2020 uhci_trans_wrapper_t *tw) 2021 { 2022 uint32_t buf_addr; 2023 int rem_len; 2024 2025 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2026 "uhci_get_tw_paddr_by_offs: buf_offs 0x%x len 0x%lx", 2027 buffer_offset, length); 2028 2029 /* 2030 * TDs must be filled in increasing DMA offset order. 2031 * tw_dma_offs is initialized to be 0 at TW creation and 2032 * is only increased in this function. 2033 */ 2034 ASSERT(length == 0 || buffer_offset >= tw->tw_dma_offs); 2035 2036 if (length == 0) { 2037 buf_addr = 0; 2038 2039 return (buf_addr); 2040 } 2041 2042 /* 2043 * Advance to the next DMA cookie until finding the cookie 2044 * that buffer_offset falls in. 2045 * It is very likely this loop will never repeat more than 2046 * once. It is here just to accommodate the case buffer_offset 2047 * is increased by multiple cookies during two consecutive 2048 * calls into this function. In that case, the interim DMA 2049 * buffer is allowed to be skipped. 2050 */ 2051 while ((tw->tw_dma_offs + tw->tw_cookie.dmac_size) <= 2052 buffer_offset) { 2053 /* 2054 * tw_dma_offs always points to the starting offset 2055 * of a cookie 2056 */ 2057 tw->tw_dma_offs += tw->tw_cookie.dmac_size; 2058 ddi_dma_nextcookie(tw->tw_dmahandle, &tw->tw_cookie); 2059 tw->tw_cookie_idx++; 2060 ASSERT(tw->tw_cookie_idx < tw->tw_ncookies); 2061 } 2062 2063 /* 2064 * Counting the remained buffer length to be filled in 2065 * the TDs for current DMA cookie 2066 */ 2067 rem_len = (tw->tw_dma_offs + tw->tw_cookie.dmac_size) - 2068 buffer_offset; 2069 2070 /* Calculate the beginning address of the buffer */ 2071 ASSERT(length <= rem_len); 2072 buf_addr = (buffer_offset - tw->tw_dma_offs) + 2073 tw->tw_cookie.dmac_address; 2074 2075 USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2076 "uhci_get_tw_paddr_by_offs: dmac_addr 0x%p dmac_size " 2077 "0x%lx idx %d", buf_addr, tw->tw_cookie.dmac_size, 2078 tw->tw_cookie_idx); 2079 2080 return (buf_addr); 2081 } 2082 2083 2084 /* 2085 * uhci_modify_td_active_bits: 2086 * Sets active bit in all the tds of QH to INACTIVE so that 2087 * the HC stops processing the TD's related to the QH. 2088 */ 2089 void 2090 uhci_modify_td_active_bits( 2091 uhci_state_t *uhcip, 2092 uhci_pipe_private_t *pp) 2093 { 2094 uhci_td_t *td_head; 2095 usb_ep_descr_t *ept = &pp->pp_pipe_handle->p_ep; 2096 uhci_trans_wrapper_t *tw_head = pp->pp_tw_head; 2097 2098 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2099 "uhci_modify_td_active_bits: tw head %p", (void *)tw_head); 2100 2101 while (tw_head != NULL) { 2102 tw_head->tw_claim = UHCI_MODIFY_TD_BITS_CLAIMED; 2103 td_head = tw_head->tw_hctd_head; 2104 2105 while (td_head) { 2106 if (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_ISOCH) { 2107 SetTD_status(uhcip, td_head, 2108 GetTD_status(uhcip, td_head) & TD_INACTIVE); 2109 } else { 2110 SetTD32(uhcip, td_head->link_ptr, 2111 GetTD32(uhcip, td_head->link_ptr) | 2112 HC_END_OF_LIST); 2113 } 2114 2115 td_head = td_head->tw_td_next; 2116 } 2117 tw_head = tw_head->tw_next; 2118 } 2119 } 2120 2121 2122 /* 2123 * uhci_insert_ctrl_td: 2124 * Create a TD and a data buffer for a control Queue Head. 2125 */ 2126 int 2127 uhci_insert_ctrl_td( 2128 uhci_state_t *uhcip, 2129 usba_pipe_handle_data_t *ph, 2130 usb_ctrl_req_t *ctrl_reqp, 2131 usb_flags_t flags) 2132 { 2133 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 2134 uhci_trans_wrapper_t *tw; 2135 size_t ctrl_buf_size; 2136 2137 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2138 "uhci_insert_ctrl_td: timeout: 0x%x", ctrl_reqp->ctrl_timeout); 2139 2140 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2141 2142 /* 2143 * If we have a control data phase, make the data buffer start 2144 * on the next 64-byte boundary so as to ensure the DMA cookie 2145 * can fit in the multiple TDs. The buffer in the range of 2146 * [SETUP_SIZE, UHCI_CTRL_EPT_MAX_SIZE) is just for padding 2147 * and not to be transferred. 2148 */ 2149 if (ctrl_reqp->ctrl_wLength) { 2150 ctrl_buf_size = UHCI_CTRL_EPT_MAX_SIZE + 2151 ctrl_reqp->ctrl_wLength; 2152 } else { 2153 ctrl_buf_size = SETUP_SIZE; 2154 } 2155 2156 /* Allocate a transaction wrapper */ 2157 if ((tw = uhci_create_transfer_wrapper(uhcip, pp, 2158 ctrl_buf_size, flags)) == NULL) { 2159 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2160 "uhci_insert_ctrl_td: TW allocation failed"); 2161 2162 return (USB_NO_RESOURCES); 2163 } 2164 2165 pp->pp_data_toggle = 0; 2166 2167 tw->tw_curr_xfer_reqp = (usb_opaque_t)ctrl_reqp; 2168 tw->tw_bytes_xfered = 0; 2169 tw->tw_bytes_pending = ctrl_reqp->ctrl_wLength; 2170 tw->tw_timeout_cnt = max(UHCI_CTRL_TIMEOUT, ctrl_reqp->ctrl_timeout); 2171 2172 /* 2173 * Initialize the callback and any callback 2174 * data for when the td completes. 2175 */ 2176 tw->tw_handle_td = uhci_handle_ctrl_td; 2177 tw->tw_handle_callback_value = NULL; 2178 2179 if ((uhci_create_setup_pkt(uhcip, pp, tw)) != USB_SUCCESS) { 2180 tw->tw_ctrl_state = 0; 2181 2182 /* free the transfer wrapper */ 2183 uhci_deallocate_tw(uhcip, pp, tw); 2184 2185 return (USB_NO_RESOURCES); 2186 } 2187 2188 tw->tw_ctrl_state = SETUP; 2189 2190 return (USB_SUCCESS); 2191 } 2192 2193 2194 /* 2195 * uhci_create_setup_pkt: 2196 * create a setup packet to initiate a control transfer. 2197 * 2198 * OHCI driver has seen the case where devices fail if there is 2199 * more than one control transfer to the device within a frame. 2200 * So, the UHCI ensures that only one TD will be put on the control 2201 * pipe to one device (to be consistent with OHCI driver). 2202 */ 2203 static int 2204 uhci_create_setup_pkt( 2205 uhci_state_t *uhcip, 2206 uhci_pipe_private_t *pp, 2207 uhci_trans_wrapper_t *tw) 2208 { 2209 int sdata; 2210 usb_ctrl_req_t *req = (usb_ctrl_req_t *)tw->tw_curr_xfer_reqp; 2211 2212 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2213 "uhci_create_setup_pkt: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%p", 2214 req->ctrl_bmRequestType, req->ctrl_bRequest, req->ctrl_wValue, 2215 req->ctrl_wIndex, req->ctrl_wLength, (void *)req->ctrl_data); 2216 2217 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2218 ASSERT(tw != NULL); 2219 2220 /* Create the first four bytes of the setup packet */ 2221 sdata = (req->ctrl_bmRequestType | (req->ctrl_bRequest << 8) | 2222 (req->ctrl_wValue << 16)); 2223 ddi_put32(tw->tw_accesshandle, (uint_t *)tw->tw_buf, sdata); 2224 2225 /* Create the second four bytes */ 2226 sdata = (uint32_t)(req->ctrl_wIndex | (req->ctrl_wLength << 16)); 2227 ddi_put32(tw->tw_accesshandle, 2228 (uint_t *)(tw->tw_buf + sizeof (uint_t)), sdata); 2229 2230 /* 2231 * The TD's are placed on the QH one at a time. 2232 * Once this TD is placed on the done list, the 2233 * data or status phase TD will be enqueued. 2234 */ 2235 if ((uhci_insert_hc_td(uhcip, 0, SETUP_SIZE, 2236 pp, tw, PID_SETUP, req->ctrl_attributes)) != USB_SUCCESS) { 2237 2238 return (USB_NO_RESOURCES); 2239 } 2240 2241 USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2242 "Create_setup: pp = 0x%p, attrs = 0x%x", pp, req->ctrl_attributes); 2243 2244 /* 2245 * If this control transfer has a data phase, record the 2246 * direction. If the data phase is an OUT transaction , 2247 * copy the data into the buffer of the transfer wrapper. 2248 */ 2249 if (req->ctrl_wLength != 0) { 2250 /* There is a data stage. Find the direction */ 2251 if (req->ctrl_bmRequestType & USB_DEV_REQ_DEV_TO_HOST) { 2252 tw->tw_direction = PID_IN; 2253 } else { 2254 tw->tw_direction = PID_OUT; 2255 2256 /* Copy the data into the buffer */ 2257 ddi_rep_put8(tw->tw_accesshandle, 2258 req->ctrl_data->b_rptr, 2259 (uint8_t *)(tw->tw_buf + UHCI_CTRL_EPT_MAX_SIZE), 2260 req->ctrl_wLength, 2261 DDI_DEV_AUTOINCR); 2262 } 2263 } 2264 2265 return (USB_SUCCESS); 2266 } 2267 2268 2269 /* 2270 * uhci_create_stats: 2271 * Allocate and initialize the uhci kstat structures 2272 */ 2273 void 2274 uhci_create_stats(uhci_state_t *uhcip) 2275 { 2276 int i; 2277 char kstatname[KSTAT_STRLEN]; 2278 char *usbtypes[USB_N_COUNT_KSTATS] = 2279 {"ctrl", "isoch", "bulk", "intr"}; 2280 uint_t instance = uhcip->uhci_instance; 2281 const char *dname = ddi_driver_name(uhcip->uhci_dip); 2282 uhci_intrs_stats_t *isp; 2283 2284 if (UHCI_INTRS_STATS(uhcip) == NULL) { 2285 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,intrs", 2286 dname, instance); 2287 UHCI_INTRS_STATS(uhcip) = kstat_create("usba", instance, 2288 kstatname, "usb_interrupts", KSTAT_TYPE_NAMED, 2289 sizeof (uhci_intrs_stats_t) / sizeof (kstat_named_t), 2290 KSTAT_FLAG_PERSISTENT); 2291 2292 if (UHCI_INTRS_STATS(uhcip) != NULL) { 2293 isp = UHCI_INTRS_STATS_DATA(uhcip); 2294 kstat_named_init(&isp->uhci_intrs_hc_halted, 2295 "HC Halted", KSTAT_DATA_UINT64); 2296 kstat_named_init(&isp->uhci_intrs_hc_process_err, 2297 "HC Process Errors", KSTAT_DATA_UINT64); 2298 kstat_named_init(&isp->uhci_intrs_host_sys_err, 2299 "Host Sys Errors", KSTAT_DATA_UINT64); 2300 kstat_named_init(&isp->uhci_intrs_resume_detected, 2301 "Resume Detected", KSTAT_DATA_UINT64); 2302 kstat_named_init(&isp->uhci_intrs_usb_err_intr, 2303 "USB Error", KSTAT_DATA_UINT64); 2304 kstat_named_init(&isp->uhci_intrs_usb_intr, 2305 "USB Interrupts", KSTAT_DATA_UINT64); 2306 kstat_named_init(&isp->uhci_intrs_total, 2307 "Total Interrupts", KSTAT_DATA_UINT64); 2308 kstat_named_init(&isp->uhci_intrs_not_claimed, 2309 "Not Claimed", KSTAT_DATA_UINT64); 2310 2311 UHCI_INTRS_STATS(uhcip)->ks_private = uhcip; 2312 UHCI_INTRS_STATS(uhcip)->ks_update = nulldev; 2313 kstat_install(UHCI_INTRS_STATS(uhcip)); 2314 } 2315 } 2316 2317 if (UHCI_TOTAL_STATS(uhcip) == NULL) { 2318 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,total", 2319 dname, instance); 2320 UHCI_TOTAL_STATS(uhcip) = kstat_create("usba", instance, 2321 kstatname, "usb_byte_count", KSTAT_TYPE_IO, 1, 2322 KSTAT_FLAG_PERSISTENT); 2323 2324 if (UHCI_TOTAL_STATS(uhcip) != NULL) { 2325 kstat_install(UHCI_TOTAL_STATS(uhcip)); 2326 } 2327 } 2328 2329 for (i = 0; i < USB_N_COUNT_KSTATS; i++) { 2330 if (uhcip->uhci_count_stats[i] == NULL) { 2331 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,%s", 2332 dname, instance, usbtypes[i]); 2333 uhcip->uhci_count_stats[i] = kstat_create("usba", 2334 instance, kstatname, "usb_byte_count", 2335 KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 2336 2337 if (uhcip->uhci_count_stats[i] != NULL) { 2338 kstat_install(uhcip->uhci_count_stats[i]); 2339 } 2340 } 2341 } 2342 } 2343 2344 2345 /* 2346 * uhci_destroy_stats: 2347 * Clean up uhci kstat structures 2348 */ 2349 void 2350 uhci_destroy_stats(uhci_state_t *uhcip) 2351 { 2352 int i; 2353 2354 if (UHCI_INTRS_STATS(uhcip)) { 2355 kstat_delete(UHCI_INTRS_STATS(uhcip)); 2356 UHCI_INTRS_STATS(uhcip) = NULL; 2357 } 2358 2359 if (UHCI_TOTAL_STATS(uhcip)) { 2360 kstat_delete(UHCI_TOTAL_STATS(uhcip)); 2361 UHCI_TOTAL_STATS(uhcip) = NULL; 2362 } 2363 2364 for (i = 0; i < USB_N_COUNT_KSTATS; i++) { 2365 if (uhcip->uhci_count_stats[i]) { 2366 kstat_delete(uhcip->uhci_count_stats[i]); 2367 uhcip->uhci_count_stats[i] = NULL; 2368 } 2369 } 2370 } 2371 2372 2373 void 2374 uhci_do_intrs_stats(uhci_state_t *uhcip, int val) 2375 { 2376 if (UHCI_INTRS_STATS(uhcip) == NULL) { 2377 2378 return; 2379 } 2380 2381 UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_total.value.ui64++; 2382 switch (val) { 2383 case USBSTS_REG_HC_HALTED: 2384 UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_hc_halted.value.ui64++; 2385 break; 2386 case USBSTS_REG_HC_PROCESS_ERR: 2387 UHCI_INTRS_STATS_DATA(uhcip)-> 2388 uhci_intrs_hc_process_err.value.ui64++; 2389 break; 2390 case USBSTS_REG_HOST_SYS_ERR: 2391 UHCI_INTRS_STATS_DATA(uhcip)-> 2392 uhci_intrs_host_sys_err.value.ui64++; 2393 break; 2394 case USBSTS_REG_RESUME_DETECT: 2395 UHCI_INTRS_STATS_DATA(uhcip)-> 2396 uhci_intrs_resume_detected.value.ui64++; 2397 break; 2398 case USBSTS_REG_USB_ERR_INTR: 2399 UHCI_INTRS_STATS_DATA(uhcip)-> 2400 uhci_intrs_usb_err_intr.value.ui64++; 2401 break; 2402 case USBSTS_REG_USB_INTR: 2403 UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_usb_intr.value.ui64++; 2404 break; 2405 default: 2406 UHCI_INTRS_STATS_DATA(uhcip)-> 2407 uhci_intrs_not_claimed.value.ui64++; 2408 break; 2409 } 2410 } 2411 2412 2413 void 2414 uhci_do_byte_stats(uhci_state_t *uhcip, size_t len, uint8_t attr, uint8_t addr) 2415 { 2416 uint8_t type = attr & USB_EP_ATTR_MASK; 2417 uint8_t dir = addr & USB_EP_DIR_MASK; 2418 2419 switch (dir) { 2420 case USB_EP_DIR_IN: 2421 UHCI_TOTAL_STATS_DATA(uhcip)->reads++; 2422 UHCI_TOTAL_STATS_DATA(uhcip)->nread += len; 2423 switch (type) { 2424 case USB_EP_ATTR_CONTROL: 2425 UHCI_CTRL_STATS(uhcip)->reads++; 2426 UHCI_CTRL_STATS(uhcip)->nread += len; 2427 break; 2428 case USB_EP_ATTR_BULK: 2429 UHCI_BULK_STATS(uhcip)->reads++; 2430 UHCI_BULK_STATS(uhcip)->nread += len; 2431 break; 2432 case USB_EP_ATTR_INTR: 2433 UHCI_INTR_STATS(uhcip)->reads++; 2434 UHCI_INTR_STATS(uhcip)->nread += len; 2435 break; 2436 case USB_EP_ATTR_ISOCH: 2437 UHCI_ISOC_STATS(uhcip)->reads++; 2438 UHCI_ISOC_STATS(uhcip)->nread += len; 2439 break; 2440 } 2441 break; 2442 case USB_EP_DIR_OUT: 2443 UHCI_TOTAL_STATS_DATA(uhcip)->writes++; 2444 UHCI_TOTAL_STATS_DATA(uhcip)->nwritten += len; 2445 switch (type) { 2446 case USB_EP_ATTR_CONTROL: 2447 UHCI_CTRL_STATS(uhcip)->writes++; 2448 UHCI_CTRL_STATS(uhcip)->nwritten += len; 2449 break; 2450 case USB_EP_ATTR_BULK: 2451 UHCI_BULK_STATS(uhcip)->writes++; 2452 UHCI_BULK_STATS(uhcip)->nwritten += len; 2453 break; 2454 case USB_EP_ATTR_INTR: 2455 UHCI_INTR_STATS(uhcip)->writes++; 2456 UHCI_INTR_STATS(uhcip)->nwritten += len; 2457 break; 2458 case USB_EP_ATTR_ISOCH: 2459 UHCI_ISOC_STATS(uhcip)->writes++; 2460 UHCI_ISOC_STATS(uhcip)->nwritten += len; 2461 break; 2462 } 2463 break; 2464 } 2465 } 2466 2467 2468 /* 2469 * uhci_free_tw: 2470 * Free the Transfer Wrapper (TW). 2471 */ 2472 void 2473 uhci_free_tw(uhci_state_t *uhcip, uhci_trans_wrapper_t *tw) 2474 { 2475 int rval, i; 2476 2477 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, "uhci_free_tw:"); 2478 2479 ASSERT(tw != NULL); 2480 2481 if (tw->tw_isoc_strtlen > 0) { 2482 ASSERT(tw->tw_isoc_bufs != NULL); 2483 for (i = 0; i < tw->tw_ncookies; i++) { 2484 rval = ddi_dma_unbind_handle( 2485 tw->tw_isoc_bufs[i].dma_handle); 2486 ASSERT(rval == USB_SUCCESS); 2487 ddi_dma_mem_free(&tw->tw_isoc_bufs[i].mem_handle); 2488 ddi_dma_free_handle(&tw->tw_isoc_bufs[i].dma_handle); 2489 } 2490 kmem_free(tw->tw_isoc_bufs, tw->tw_isoc_strtlen); 2491 } else if (tw->tw_dmahandle != NULL) { 2492 rval = ddi_dma_unbind_handle(tw->tw_dmahandle); 2493 ASSERT(rval == DDI_SUCCESS); 2494 2495 ddi_dma_mem_free(&tw->tw_accesshandle); 2496 ddi_dma_free_handle(&tw->tw_dmahandle); 2497 } 2498 2499 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 2500 } 2501 2502 2503 /* 2504 * uhci_deallocate_tw: 2505 * Deallocate of a Transaction Wrapper (TW) and this involves 2506 * the freeing of DMA resources. 2507 */ 2508 void 2509 uhci_deallocate_tw(uhci_state_t *uhcip, 2510 uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw) 2511 { 2512 uhci_trans_wrapper_t *head; 2513 2514 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2515 "uhci_deallocate_tw:"); 2516 2517 /* 2518 * If the transfer wrapper has no Host Controller (HC) 2519 * Transfer Descriptors (TD) associated with it, then 2520 * remove the transfer wrapper. The transfers are done 2521 * in FIFO order, so this should be the first transfer 2522 * wrapper on the list. 2523 */ 2524 if (tw->tw_hctd_head != NULL) { 2525 ASSERT(tw->tw_hctd_tail != NULL); 2526 2527 return; 2528 } 2529 2530 ASSERT(tw->tw_hctd_tail == NULL); 2531 ASSERT(pp->pp_tw_head != NULL); 2532 2533 /* 2534 * If pp->pp_tw_head is NULL, set the tail also to NULL. 2535 */ 2536 head = pp->pp_tw_head; 2537 2538 if (head == tw) { 2539 pp->pp_tw_head = head->tw_next; 2540 if (pp->pp_tw_head == NULL) { 2541 pp->pp_tw_tail = NULL; 2542 } 2543 } else { 2544 while (head->tw_next != tw) 2545 head = head->tw_next; 2546 head->tw_next = tw->tw_next; 2547 if (tw->tw_next == NULL) { 2548 pp->pp_tw_tail = head; 2549 } 2550 } 2551 uhci_free_tw(uhcip, tw); 2552 } 2553 2554 2555 void 2556 uhci_delete_td(uhci_state_t *uhcip, uhci_td_t *td) 2557 { 2558 uhci_td_t *tmp_td; 2559 uhci_trans_wrapper_t *tw = td->tw; 2560 2561 if ((td->outst_td_next == NULL) && (td->outst_td_prev == NULL)) { 2562 uhcip->uhci_outst_tds_head = NULL; 2563 uhcip->uhci_outst_tds_tail = NULL; 2564 } else if (td->outst_td_next == NULL) { 2565 td->outst_td_prev->outst_td_next = NULL; 2566 uhcip->uhci_outst_tds_tail = td->outst_td_prev; 2567 } else if (td->outst_td_prev == NULL) { 2568 td->outst_td_next->outst_td_prev = NULL; 2569 uhcip->uhci_outst_tds_head = td->outst_td_next; 2570 } else { 2571 td->outst_td_prev->outst_td_next = td->outst_td_next; 2572 td->outst_td_next->outst_td_prev = td->outst_td_prev; 2573 } 2574 2575 tmp_td = tw->tw_hctd_head; 2576 2577 if (tmp_td != td) { 2578 while (tmp_td->tw_td_next != td) { 2579 tmp_td = tmp_td->tw_td_next; 2580 } 2581 ASSERT(tmp_td); 2582 tmp_td->tw_td_next = td->tw_td_next; 2583 if (td->tw_td_next == NULL) { 2584 tw->tw_hctd_tail = tmp_td; 2585 } 2586 } else { 2587 tw->tw_hctd_head = tw->tw_hctd_head->tw_td_next; 2588 if (tw->tw_hctd_head == NULL) { 2589 tw->tw_hctd_tail = NULL; 2590 } 2591 } 2592 2593 td->flag = TD_FLAG_FREE; 2594 } 2595 2596 2597 void 2598 uhci_remove_tds_tws( 2599 uhci_state_t *uhcip, 2600 usba_pipe_handle_data_t *ph) 2601 { 2602 usb_opaque_t curr_reqp; 2603 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 2604 usb_ep_descr_t *ept = &pp->pp_pipe_handle->p_ep; 2605 uhci_trans_wrapper_t *tw_tmp; 2606 uhci_trans_wrapper_t *tw_head = pp->pp_tw_head; 2607 2608 while (tw_head != NULL) { 2609 tw_tmp = tw_head; 2610 tw_head = tw_head->tw_next; 2611 2612 curr_reqp = tw_tmp->tw_curr_xfer_reqp; 2613 if (curr_reqp) { 2614 /* do this for control/bulk/intr */ 2615 if ((tw_tmp->tw_direction == PID_IN) && 2616 (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_INTR)) { 2617 uhci_deallocate_periodic_in_resource(uhcip, 2618 pp, tw_tmp); 2619 } else { 2620 uhci_hcdi_callback(uhcip, pp, 2621 pp->pp_pipe_handle, tw_tmp, USB_CR_FLUSHED); 2622 } 2623 } /* end of curr_reqp */ 2624 2625 if (tw_tmp->tw_claim != UHCI_MODIFY_TD_BITS_CLAIMED) { 2626 continue; 2627 } 2628 2629 while (tw_tmp->tw_hctd_head != NULL) { 2630 uhci_delete_td(uhcip, tw_tmp->tw_hctd_head); 2631 } 2632 2633 uhci_deallocate_tw(uhcip, pp, tw_tmp); 2634 } 2635 } 2636 2637 2638 /* 2639 * uhci_remove_qh: 2640 * Remove the Queue Head from the Host Controller's 2641 * appropriate QH list. 2642 */ 2643 void 2644 uhci_remove_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 2645 { 2646 uhci_td_t *dummy_td; 2647 2648 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2649 2650 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2651 "uhci_remove_qh:"); 2652 2653 dummy_td = pp->pp_qh->td_tailp; 2654 dummy_td->flag = TD_FLAG_FREE; 2655 2656 switch (UHCI_XFER_TYPE(&pp->pp_pipe_handle->p_ep)) { 2657 case USB_EP_ATTR_CONTROL: 2658 uhci_remove_ctrl_qh(uhcip, pp); 2659 break; 2660 case USB_EP_ATTR_BULK: 2661 uhci_remove_bulk_qh(uhcip, pp); 2662 break; 2663 case USB_EP_ATTR_INTR: 2664 uhci_remove_intr_qh(uhcip, pp); 2665 break; 2666 } 2667 } 2668 2669 2670 static void 2671 uhci_remove_intr_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 2672 { 2673 queue_head_t *qh = pp->pp_qh; 2674 queue_head_t *next_lattice_qh = 2675 QH_VADDR(GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK); 2676 2677 qh->prev_qh->link_ptr = qh->link_ptr; 2678 next_lattice_qh->prev_qh = qh->prev_qh; 2679 qh->qh_flag = QUEUE_HEAD_FLAG_FREE; 2680 2681 } 2682 2683 /* 2684 * uhci_remove_bulk_qh: 2685 * Remove a bulk QH from the Host Controller's QH list. There may be a 2686 * loop for bulk QHs, we must care about this while removing a bulk QH. 2687 */ 2688 static void 2689 uhci_remove_bulk_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 2690 { 2691 queue_head_t *qh = pp->pp_qh; 2692 queue_head_t *next_lattice_qh; 2693 uint32_t paddr; 2694 2695 paddr = (GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK); 2696 next_lattice_qh = (qh == uhcip->uhci_bulk_xfers_q_tail) ? 2697 0 : QH_VADDR(paddr); 2698 2699 if ((qh == uhcip->uhci_bulk_xfers_q_tail) && 2700 (qh->prev_qh == uhcip->uhci_bulk_xfers_q_head)) { 2701 SetQH32(uhcip, qh->prev_qh->link_ptr, HC_END_OF_LIST); 2702 } else { 2703 qh->prev_qh->link_ptr = qh->link_ptr; 2704 } 2705 2706 if (next_lattice_qh == NULL) { 2707 uhcip->uhci_bulk_xfers_q_tail = qh->prev_qh; 2708 } else { 2709 next_lattice_qh->prev_qh = qh->prev_qh; 2710 } 2711 2712 qh->qh_flag = QUEUE_HEAD_FLAG_FREE; 2713 2714 } 2715 2716 2717 static void 2718 uhci_remove_ctrl_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 2719 { 2720 queue_head_t *qh = pp->pp_qh; 2721 queue_head_t *next_lattice_qh = 2722 QH_VADDR(GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK); 2723 2724 qh->prev_qh->link_ptr = qh->link_ptr; 2725 if (next_lattice_qh->prev_qh != NULL) { 2726 next_lattice_qh->prev_qh = qh->prev_qh; 2727 } else { 2728 uhcip->uhci_ctrl_xfers_q_tail = qh->prev_qh; 2729 } 2730 2731 qh->qh_flag = QUEUE_HEAD_FLAG_FREE; 2732 } 2733 2734 2735 /* 2736 * uhci_allocate_td_from_pool: 2737 * Allocate a Transfer Descriptor (TD) from the TD buffer pool. 2738 */ 2739 static uhci_td_t * 2740 uhci_allocate_td_from_pool(uhci_state_t *uhcip) 2741 { 2742 int index; 2743 uhci_td_t *td; 2744 2745 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2746 2747 /* 2748 * Search for a blank Transfer Descriptor (TD) 2749 * in the TD buffer pool. 2750 */ 2751 for (index = 0; index < uhci_td_pool_size; index ++) { 2752 if (uhcip->uhci_td_pool_addr[index].flag == TD_FLAG_FREE) { 2753 break; 2754 } 2755 } 2756 2757 if (index == uhci_td_pool_size) { 2758 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2759 "uhci_allocate_td_from_pool: TD exhausted"); 2760 2761 return (NULL); 2762 } 2763 2764 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2765 "uhci_allocate_td_from_pool: Allocated %d", index); 2766 2767 /* Create a new dummy for the end of the TD list */ 2768 td = &uhcip->uhci_td_pool_addr[index]; 2769 2770 /* Mark the newly allocated TD as a dummy */ 2771 td->flag = TD_FLAG_DUMMY; 2772 td->qh_td_prev = NULL; 2773 2774 return (td); 2775 } 2776 2777 2778 /* 2779 * uhci_insert_bulk_td: 2780 */ 2781 int 2782 uhci_insert_bulk_td( 2783 uhci_state_t *uhcip, 2784 usba_pipe_handle_data_t *ph, 2785 usb_bulk_req_t *req, 2786 usb_flags_t flags) 2787 { 2788 size_t length; 2789 uint_t mps; /* MaxPacketSize */ 2790 uint_t num_bulk_tds, i, j; 2791 uint32_t buf_offs; 2792 uhci_td_t *bulk_td_ptr; 2793 uhci_td_t *current_dummy, *tmp_td; 2794 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 2795 uhci_trans_wrapper_t *tw; 2796 uhci_bulk_isoc_xfer_t *bulk_xfer_info; 2797 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 2798 2799 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2800 "uhci_insert_bulk_td: req: 0x%p, flags = 0x%x", req, flags); 2801 2802 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2803 2804 /* 2805 * Create transfer wrapper 2806 */ 2807 if ((tw = uhci_create_transfer_wrapper(uhcip, pp, req->bulk_len, 2808 flags)) == NULL) { 2809 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2810 "uhci_insert_bulk_td: TW allocation failed"); 2811 2812 return (USB_NO_RESOURCES); 2813 } 2814 2815 tw->tw_bytes_xfered = 0; 2816 tw->tw_bytes_pending = req->bulk_len; 2817 tw->tw_handle_td = uhci_handle_bulk_td; 2818 tw->tw_handle_callback_value = (usb_opaque_t)req->bulk_data; 2819 tw->tw_timeout_cnt = req->bulk_timeout; 2820 tw->tw_data = req->bulk_data; 2821 tw->tw_curr_xfer_reqp = (usb_opaque_t)req; 2822 2823 /* Get the bulk pipe direction */ 2824 tw->tw_direction = (UHCI_XFER_DIR(&ph->p_ep) == USB_EP_DIR_OUT) ? 2825 PID_OUT : PID_IN; 2826 2827 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2828 "uhci_insert_bulk_td: direction: 0x%x", tw->tw_direction); 2829 2830 /* If the DATA OUT, copy the data into transfer buffer. */ 2831 if (tw->tw_direction == PID_OUT) { 2832 if (req->bulk_len) { 2833 ASSERT(req->bulk_data != NULL); 2834 2835 /* Copy the data into the message */ 2836 ddi_rep_put8(tw->tw_accesshandle, 2837 req->bulk_data->b_rptr, 2838 (uint8_t *)tw->tw_buf, 2839 req->bulk_len, DDI_DEV_AUTOINCR); 2840 } 2841 } 2842 2843 /* Get the max packet size. */ 2844 length = mps = pp->pp_pipe_handle->p_ep.wMaxPacketSize; 2845 2846 /* 2847 * Calculate number of TD's to insert in the current frame interval. 2848 * Max number TD's allowed (driver implementation) is 128 2849 * in one frame interval. Once all the TD's are completed 2850 * then the remaining TD's will be inserted into the lattice 2851 * in the uhci_handle_bulk_td(). 2852 */ 2853 if ((tw->tw_bytes_pending / mps) >= MAX_NUM_BULK_TDS_PER_XFER) { 2854 num_bulk_tds = MAX_NUM_BULK_TDS_PER_XFER; 2855 } else { 2856 num_bulk_tds = (tw->tw_bytes_pending / mps); 2857 2858 if (tw->tw_bytes_pending % mps || tw->tw_bytes_pending == 0) { 2859 num_bulk_tds++; 2860 length = (tw->tw_bytes_pending % mps); 2861 } 2862 } 2863 2864 /* 2865 * Allocate memory for the bulk xfer information structure 2866 */ 2867 if ((bulk_xfer_info = kmem_zalloc( 2868 sizeof (uhci_bulk_isoc_xfer_t), KM_NOSLEEP)) == NULL) { 2869 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2870 "uhci_insert_bulk_td: kmem_zalloc failed"); 2871 2872 /* Free the transfer wrapper */ 2873 uhci_deallocate_tw(uhcip, pp, tw); 2874 2875 return (USB_FAILURE); 2876 } 2877 2878 /* Allocate memory for the bulk TD's */ 2879 if (uhci_alloc_bulk_isoc_tds(uhcip, num_bulk_tds, bulk_xfer_info) != 2880 USB_SUCCESS) { 2881 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2882 "uhci_insert_bulk_td: alloc_bulk_isoc_tds failed"); 2883 2884 kmem_free(bulk_xfer_info, sizeof (uhci_bulk_isoc_xfer_t)); 2885 2886 /* Free the transfer wrapper */ 2887 uhci_deallocate_tw(uhcip, pp, tw); 2888 2889 return (USB_FAILURE); 2890 } 2891 2892 td_pool_ptr = &bulk_xfer_info->td_pools[0]; 2893 bulk_td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 2894 bulk_td_ptr[0].qh_td_prev = NULL; 2895 current_dummy = pp->pp_qh->td_tailp; 2896 buf_offs = 0; 2897 pp->pp_qh->bulk_xfer_info = bulk_xfer_info; 2898 2899 /* Fill up all the bulk TD's */ 2900 for (i = 0; i < bulk_xfer_info->num_pools; i++) { 2901 for (j = 0; j < (td_pool_ptr->num_tds - 1); j++) { 2902 uhci_fill_in_bulk_isoc_td(uhcip, &bulk_td_ptr[j], 2903 &bulk_td_ptr[j+1], BULKTD_PADDR(td_pool_ptr, 2904 &bulk_td_ptr[j+1]), ph, buf_offs, mps, tw); 2905 buf_offs += mps; 2906 } 2907 2908 /* fill in the last TD */ 2909 if (i == (bulk_xfer_info->num_pools - 1)) { 2910 uhci_fill_in_bulk_isoc_td(uhcip, &bulk_td_ptr[j], 2911 current_dummy, TD_PADDR(current_dummy), 2912 ph, buf_offs, length, tw); 2913 } else { 2914 /* fill in the TD at the tail of a pool */ 2915 tmp_td = &bulk_td_ptr[j]; 2916 td_pool_ptr = &bulk_xfer_info->td_pools[i + 1]; 2917 bulk_td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 2918 uhci_fill_in_bulk_isoc_td(uhcip, tmp_td, 2919 &bulk_td_ptr[0], BULKTD_PADDR(td_pool_ptr, 2920 &bulk_td_ptr[0]), ph, buf_offs, mps, tw); 2921 buf_offs += mps; 2922 } 2923 } 2924 2925 bulk_xfer_info->num_tds = num_bulk_tds; 2926 2927 /* 2928 * Point the end of the lattice tree to the start of the bulk xfers 2929 * queue head. This allows the HC to execute the same Queue Head/TD 2930 * in the same frame. There are some bulk devices, which NAKs after 2931 * completing each TD. As a result, the performance on such devices 2932 * is very bad. This loop will provide a chance to execute NAk'ed 2933 * bulk TDs again in the same frame. 2934 */ 2935 if (uhcip->uhci_pending_bulk_cmds++ == 0) { 2936 uhcip->uhci_bulk_xfers_q_tail->link_ptr = 2937 uhcip->uhci_bulk_xfers_q_head->link_ptr; 2938 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 2939 "uhci_insert_bulk_td: count = %d no tds %d", 2940 uhcip->uhci_pending_bulk_cmds, num_bulk_tds); 2941 } 2942 2943 /* Insert on the bulk queue head for the execution by HC */ 2944 SetQH32(uhcip, pp->pp_qh->element_ptr, 2945 bulk_xfer_info->td_pools[0].cookie.dmac_address); 2946 2947 return (USB_SUCCESS); 2948 } 2949 2950 2951 /* 2952 * uhci_fill_in_bulk_isoc_td 2953 * Fills the bulk/isoc TD 2954 * 2955 * offset - different meanings for bulk and isoc TDs: 2956 * starting offset into the TW buffer for a bulk TD 2957 * and the index into the isoc packet list for an isoc TD 2958 */ 2959 void 2960 uhci_fill_in_bulk_isoc_td(uhci_state_t *uhcip, uhci_td_t *current_td, 2961 uhci_td_t *next_td, 2962 uint32_t next_td_paddr, 2963 usba_pipe_handle_data_t *ph, 2964 uint_t offset, 2965 uint_t length, 2966 uhci_trans_wrapper_t *tw) 2967 { 2968 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 2969 usb_ep_descr_t *ept = &pp->pp_pipe_handle->p_ep; 2970 uint32_t buf_addr; 2971 2972 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2973 "uhci_fill_in_bulk_isoc_td: tw 0x%p offs 0x%x length 0x%x", 2974 tw, offset, length); 2975 2976 bzero((char *)current_td, sizeof (uhci_td_t)); 2977 SetTD32(uhcip, current_td->link_ptr, next_td_paddr | HC_DEPTH_FIRST); 2978 2979 switch (UHCI_XFER_TYPE(ept)) { 2980 case USB_EP_ATTR_ISOCH: 2981 if (((usb_isoc_req_t *)tw->tw_curr_xfer_reqp)->isoc_attributes 2982 & USB_ATTRS_SHORT_XFER_OK) { 2983 SetTD_spd(uhcip, current_td, 1); 2984 } 2985 break; 2986 case USB_EP_ATTR_BULK: 2987 if (((usb_bulk_req_t *)tw->tw_curr_xfer_reqp)->bulk_attributes 2988 & USB_ATTRS_SHORT_XFER_OK) { 2989 SetTD_spd(uhcip, current_td, 1); 2990 } 2991 break; 2992 } 2993 2994 mutex_enter(&ph->p_usba_device->usb_mutex); 2995 2996 SetTD_c_err(uhcip, current_td, UHCI_MAX_ERR_COUNT); 2997 SetTD_status(uhcip, current_td, UHCI_TD_ACTIVE); 2998 SetTD_ioc(uhcip, current_td, INTERRUPT_ON_COMPLETION); 2999 SetTD_mlen(uhcip, current_td, 3000 (length == 0) ? ZERO_LENGTH : (length - 1)); 3001 SetTD_dtogg(uhcip, current_td, pp->pp_data_toggle); 3002 SetTD_devaddr(uhcip, current_td, ph->p_usba_device->usb_addr); 3003 SetTD_endpt(uhcip, current_td, ph->p_ep.bEndpointAddress & 3004 END_POINT_ADDRESS_MASK); 3005 SetTD_PID(uhcip, current_td, tw->tw_direction); 3006 3007 /* Get the right buffer address for the current TD */ 3008 switch (UHCI_XFER_TYPE(ept)) { 3009 case USB_EP_ATTR_ISOCH: 3010 buf_addr = tw->tw_isoc_bufs[offset].cookie.dmac_address; 3011 break; 3012 case USB_EP_ATTR_BULK: 3013 buf_addr = uhci_get_tw_paddr_by_offs(uhcip, offset, 3014 length, tw); 3015 break; 3016 } 3017 SetTD32(uhcip, current_td->buffer_address, buf_addr); 3018 3019 /* 3020 * Adjust the data toggle. 3021 * The data toggle bit must always be 0 for isoc transfers. 3022 * And set the "iso" bit in the TD for isoc transfers. 3023 */ 3024 if (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_ISOCH) { 3025 pp->pp_data_toggle = 0; 3026 SetTD_iso(uhcip, current_td, 1); 3027 } else { 3028 ADJ_DATA_TOGGLE(pp); 3029 next_td->qh_td_prev = current_td; 3030 pp->pp_qh->td_tailp = next_td; 3031 } 3032 3033 current_td->outst_td_next = NULL; 3034 current_td->outst_td_prev = uhcip->uhci_outst_tds_tail; 3035 if (uhcip->uhci_outst_tds_head == NULL) { 3036 uhcip->uhci_outst_tds_head = current_td; 3037 } else { 3038 uhcip->uhci_outst_tds_tail->outst_td_next = current_td; 3039 } 3040 uhcip->uhci_outst_tds_tail = current_td; 3041 current_td->tw = tw; 3042 3043 if (tw->tw_hctd_head == NULL) { 3044 ASSERT(tw->tw_hctd_tail == NULL); 3045 tw->tw_hctd_head = current_td; 3046 tw->tw_hctd_tail = current_td; 3047 } else { 3048 /* Add the td to the end of the list */ 3049 tw->tw_hctd_tail->tw_td_next = current_td; 3050 tw->tw_hctd_tail = current_td; 3051 } 3052 3053 mutex_exit(&ph->p_usba_device->usb_mutex); 3054 } 3055 3056 3057 /* 3058 * uhci_alloc_bulk_isoc_tds: 3059 * - Allocates the isoc/bulk TD pools. It will allocate one whole 3060 * pool to store all the TDs if the system allows. Only when the 3061 * first allocation fails, it tries to allocate several small 3062 * pools with each pool limited in physical page size. 3063 */ 3064 static int 3065 uhci_alloc_bulk_isoc_tds( 3066 uhci_state_t *uhcip, 3067 uint_t num_tds, 3068 uhci_bulk_isoc_xfer_t *info) 3069 { 3070 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3071 "uhci_alloc_bulk_isoc_tds: num_tds: 0x%x info: 0x%p", 3072 num_tds, info); 3073 3074 info->num_pools = 1; 3075 /* allocate as a whole pool at the first time */ 3076 if (uhci_alloc_memory_for_tds(uhcip, num_tds, info) != 3077 USB_SUCCESS) { 3078 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3079 "alloc_memory_for_tds failed: num_tds %d num_pools %d", 3080 num_tds, info->num_pools); 3081 3082 /* reduce the td number per pool and alloc again */ 3083 info->num_pools = num_tds / UHCI_MAX_TD_NUM_PER_POOL; 3084 if (num_tds % UHCI_MAX_TD_NUM_PER_POOL) { 3085 info->num_pools++; 3086 } 3087 3088 if (uhci_alloc_memory_for_tds(uhcip, num_tds, info) != 3089 USB_SUCCESS) { 3090 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3091 "alloc_memory_for_tds failed: num_tds %d " 3092 "num_pools %d", num_tds, info->num_pools); 3093 3094 return (USB_NO_RESOURCES); 3095 } 3096 } 3097 3098 return (USB_SUCCESS); 3099 } 3100 3101 3102 /* 3103 * uhci_alloc_memory_for_tds: 3104 * - Allocates memory for the isoc/bulk td pools. 3105 */ 3106 static int 3107 uhci_alloc_memory_for_tds( 3108 uhci_state_t *uhcip, 3109 uint_t num_tds, 3110 uhci_bulk_isoc_xfer_t *info) 3111 { 3112 int result, i, j, err; 3113 size_t real_length; 3114 uint_t ccount, num; 3115 ddi_device_acc_attr_t dev_attr; 3116 uhci_bulk_isoc_td_pool_t *td_pool_ptr1, *td_pool_ptr2; 3117 3118 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3119 "uhci_alloc_memory_for_tds: num_tds: 0x%x info: 0x%p " 3120 "num_pools: %u", num_tds, info, info->num_pools); 3121 3122 /* The host controller will be little endian */ 3123 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 3124 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 3125 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 3126 3127 /* Allocate the TD pool structures */ 3128 if ((info->td_pools = kmem_zalloc( 3129 (sizeof (uhci_bulk_isoc_td_pool_t) * info->num_pools), 3130 KM_SLEEP)) == NULL) { 3131 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3132 "uhci_alloc_memory_for_tds: alloc td_pools failed"); 3133 3134 return (USB_FAILURE); 3135 } 3136 3137 for (i = 0; i < info->num_pools; i++) { 3138 if (info->num_pools == 1) { 3139 num = num_tds; 3140 } else if (i < (info->num_pools - 1)) { 3141 num = UHCI_MAX_TD_NUM_PER_POOL; 3142 } else { 3143 num = (num_tds % UHCI_MAX_TD_NUM_PER_POOL); 3144 } 3145 3146 td_pool_ptr1 = &info->td_pools[i]; 3147 3148 /* Allocate the bulk TD pool DMA handle */ 3149 if (ddi_dma_alloc_handle(uhcip->uhci_dip, 3150 &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 0, 3151 &td_pool_ptr1->dma_handle) != DDI_SUCCESS) { 3152 3153 for (j = 0; j < i; j++) { 3154 td_pool_ptr2 = &info->td_pools[j]; 3155 result = ddi_dma_unbind_handle( 3156 td_pool_ptr2->dma_handle); 3157 ASSERT(result == DDI_SUCCESS); 3158 ddi_dma_mem_free(&td_pool_ptr2->mem_handle); 3159 ddi_dma_free_handle(&td_pool_ptr2->dma_handle); 3160 } 3161 3162 kmem_free(info->td_pools, 3163 (sizeof (uhci_bulk_isoc_td_pool_t) * 3164 info->num_pools)); 3165 3166 return (USB_FAILURE); 3167 } 3168 3169 /* Allocate the memory for the bulk TD pool */ 3170 if (ddi_dma_mem_alloc(td_pool_ptr1->dma_handle, 3171 num * sizeof (uhci_td_t), &dev_attr, 3172 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0, 3173 &td_pool_ptr1->pool_addr, &real_length, 3174 &td_pool_ptr1->mem_handle) != DDI_SUCCESS) { 3175 3176 ddi_dma_free_handle(&td_pool_ptr1->dma_handle); 3177 3178 for (j = 0; j < i; j++) { 3179 td_pool_ptr2 = &info->td_pools[j]; 3180 result = ddi_dma_unbind_handle( 3181 td_pool_ptr2->dma_handle); 3182 ASSERT(result == DDI_SUCCESS); 3183 ddi_dma_mem_free(&td_pool_ptr2->mem_handle); 3184 ddi_dma_free_handle(&td_pool_ptr2->dma_handle); 3185 } 3186 3187 kmem_free(info->td_pools, 3188 (sizeof (uhci_bulk_isoc_td_pool_t) * 3189 info->num_pools)); 3190 3191 return (USB_FAILURE); 3192 } 3193 3194 /* Map the bulk TD pool into the I/O address space */ 3195 result = ddi_dma_addr_bind_handle(td_pool_ptr1->dma_handle, 3196 NULL, (caddr_t)td_pool_ptr1->pool_addr, real_length, 3197 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 3198 &td_pool_ptr1->cookie, &ccount); 3199 3200 /* Process the result */ 3201 err = USB_SUCCESS; 3202 3203 if (result != DDI_DMA_MAPPED) { 3204 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3205 "uhci_allocate_memory_for_tds: Result = %d", 3206 result); 3207 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, 3208 result); 3209 3210 err = USB_FAILURE; 3211 } 3212 3213 if ((result == DDI_DMA_MAPPED) && (ccount != 1)) { 3214 /* The cookie count should be 1 */ 3215 USB_DPRINTF_L2(PRINT_MASK_ATTA, 3216 uhcip->uhci_log_hdl, 3217 "uhci_allocate_memory_for_tds: " 3218 "More than 1 cookie"); 3219 3220 result = ddi_dma_unbind_handle( 3221 td_pool_ptr1->dma_handle); 3222 ASSERT(result == DDI_SUCCESS); 3223 3224 err = USB_FAILURE; 3225 } 3226 3227 if (err == USB_FAILURE) { 3228 3229 ddi_dma_mem_free(&td_pool_ptr1->mem_handle); 3230 ddi_dma_free_handle(&td_pool_ptr1->dma_handle); 3231 3232 for (j = 0; j < i; j++) { 3233 td_pool_ptr2 = &info->td_pools[j]; 3234 result = ddi_dma_unbind_handle( 3235 td_pool_ptr2->dma_handle); 3236 ASSERT(result == DDI_SUCCESS); 3237 ddi_dma_mem_free(&td_pool_ptr2->mem_handle); 3238 ddi_dma_free_handle(&td_pool_ptr2->dma_handle); 3239 } 3240 3241 kmem_free(info->td_pools, 3242 (sizeof (uhci_bulk_isoc_td_pool_t) * 3243 info->num_pools)); 3244 3245 return (USB_FAILURE); 3246 } 3247 3248 bzero((void *)td_pool_ptr1->pool_addr, 3249 num * sizeof (uhci_td_t)); 3250 td_pool_ptr1->num_tds = num; 3251 } 3252 3253 return (USB_SUCCESS); 3254 } 3255 3256 3257 /* 3258 * uhci_handle_bulk_td: 3259 * 3260 * Handles the completed bulk transfer descriptors 3261 */ 3262 void 3263 uhci_handle_bulk_td(uhci_state_t *uhcip, uhci_td_t *td) 3264 { 3265 uint_t num_bulk_tds, index, td_count, j; 3266 usb_cr_t error; 3267 uint_t length, bytes_xfered; 3268 ushort_t MaxPacketSize; 3269 uint32_t buf_offs, paddr; 3270 uhci_td_t *bulk_td_ptr, *current_dummy, *td_head; 3271 uhci_td_t *tmp_td; 3272 queue_head_t *qh, *next_qh; 3273 uhci_trans_wrapper_t *tw = td->tw; 3274 uhci_pipe_private_t *pp = tw->tw_pipe_private; 3275 uhci_bulk_isoc_xfer_t *bulk_xfer_info; 3276 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 3277 usba_pipe_handle_data_t *ph; 3278 3279 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3280 "uhci_handle_bulk_td: td = 0x%p tw = 0x%p", td, tw); 3281 3282 /* 3283 * Update the tw_bytes_pending, and tw_bytes_xfered 3284 */ 3285 bytes_xfered = ZERO_LENGTH; 3286 3287 /* 3288 * Check whether there are any errors occurred in the xfer. 3289 * If so, update the data_toggle for the queue head and 3290 * return error to the upper layer. 3291 */ 3292 if (GetTD_status(uhcip, td) & TD_STATUS_MASK) { 3293 uhci_handle_bulk_td_errors(uhcip, td); 3294 3295 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3296 "uhci_handle_bulk_td: error; data toggle: 0x%x", 3297 pp->pp_data_toggle); 3298 3299 return; 3300 } 3301 3302 /* 3303 * Update the tw_bytes_pending, and tw_bytes_xfered 3304 */ 3305 bytes_xfered = GetTD_alen(uhcip, td); 3306 if (bytes_xfered != ZERO_LENGTH) { 3307 tw->tw_bytes_pending -= (bytes_xfered + 1); 3308 tw->tw_bytes_xfered += (bytes_xfered + 1); 3309 } 3310 3311 /* 3312 * Get Bulk pipe information and pipe handle 3313 */ 3314 bulk_xfer_info = pp->pp_qh->bulk_xfer_info; 3315 ph = tw->tw_pipe_private->pp_pipe_handle; 3316 3317 /* 3318 * Check whether data underrun occurred. 3319 * If so, complete the transfer 3320 * Update the data toggle bit 3321 */ 3322 if (bytes_xfered != GetTD_mlen(uhcip, td)) { 3323 bulk_xfer_info->num_tds = 1; 3324 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3325 "uhci_handle_bulk_td: Data underrun occured"); 3326 3327 pp->pp_data_toggle = GetTD_dtogg(uhcip, td) == 0 ? 1 : 0; 3328 } 3329 3330 /* 3331 * If the TD's in the current frame are completed, then check 3332 * whether we have any more bytes to xfer. If so, insert TD's. 3333 * If no more bytes needs to be transferred, then do callback to the 3334 * upper layer. 3335 * If the TD's in the current frame are not completed, then 3336 * just delete the TD from the linked lists. 3337 */ 3338 USB_DPRINTF_L3(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3339 "uhci_handle_bulk_td: completed TD data toggle: 0x%x", 3340 GetTD_dtogg(uhcip, td)); 3341 3342 if (--bulk_xfer_info->num_tds == 0) { 3343 uhci_delete_td(uhcip, td); 3344 3345 if ((tw->tw_bytes_pending) && 3346 (GetTD_mlen(uhcip, td) - GetTD_alen(uhcip, td) == 0)) { 3347 3348 MaxPacketSize = pp->pp_pipe_handle->p_ep.wMaxPacketSize; 3349 length = MaxPacketSize; 3350 3351 qh = pp->pp_qh; 3352 paddr = GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK; 3353 if (GetQH32(uhcip, qh->link_ptr) != 3354 GetQH32(uhcip, 3355 uhcip->uhci_bulk_xfers_q_head->link_ptr)) { 3356 next_qh = QH_VADDR(paddr); 3357 SetQH32(uhcip, qh->prev_qh->link_ptr, 3358 paddr|(0x2)); 3359 next_qh->prev_qh = qh->prev_qh; 3360 SetQH32(uhcip, qh->link_ptr, 3361 GetQH32(uhcip, 3362 uhcip->uhci_bulk_xfers_q_head->link_ptr)); 3363 qh->prev_qh = uhcip->uhci_bulk_xfers_q_tail; 3364 SetQH32(uhcip, 3365 uhcip->uhci_bulk_xfers_q_tail->link_ptr, 3366 QH_PADDR(qh) | 0x2); 3367 uhcip->uhci_bulk_xfers_q_tail = qh; 3368 } 3369 3370 if ((tw->tw_bytes_pending / MaxPacketSize) >= 3371 MAX_NUM_BULK_TDS_PER_XFER) { 3372 num_bulk_tds = MAX_NUM_BULK_TDS_PER_XFER; 3373 } else { 3374 num_bulk_tds = 3375 (tw->tw_bytes_pending / MaxPacketSize); 3376 if (tw->tw_bytes_pending % MaxPacketSize) { 3377 num_bulk_tds++; 3378 length = (tw->tw_bytes_pending % 3379 MaxPacketSize); 3380 } 3381 } 3382 3383 current_dummy = pp->pp_qh->td_tailp; 3384 td_pool_ptr = &bulk_xfer_info->td_pools[0]; 3385 bulk_td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 3386 buf_offs = tw->tw_bytes_xfered; 3387 td_count = num_bulk_tds; 3388 index = 0; 3389 3390 /* reuse the TDs to transfer more data */ 3391 while (td_count > 0) { 3392 for (j = 0; 3393 (j < (td_pool_ptr->num_tds - 1)) && 3394 (td_count > 1); j++, td_count--) { 3395 uhci_fill_in_bulk_isoc_td(uhcip, 3396 &bulk_td_ptr[j], &bulk_td_ptr[j+1], 3397 BULKTD_PADDR(td_pool_ptr, 3398 &bulk_td_ptr[j+1]), ph, buf_offs, 3399 MaxPacketSize, tw); 3400 buf_offs += MaxPacketSize; 3401 } 3402 3403 if (td_count == 1) { 3404 uhci_fill_in_bulk_isoc_td(uhcip, 3405 &bulk_td_ptr[j], current_dummy, 3406 TD_PADDR(current_dummy), ph, 3407 buf_offs, length, tw); 3408 3409 break; 3410 } else { 3411 tmp_td = &bulk_td_ptr[j]; 3412 ASSERT(index < 3413 (bulk_xfer_info->num_pools - 1)); 3414 td_pool_ptr = &bulk_xfer_info-> 3415 td_pools[index + 1]; 3416 bulk_td_ptr = (uhci_td_t *) 3417 td_pool_ptr->pool_addr; 3418 uhci_fill_in_bulk_isoc_td(uhcip, 3419 tmp_td, &bulk_td_ptr[0], 3420 BULKTD_PADDR(td_pool_ptr, 3421 &bulk_td_ptr[0]), ph, buf_offs, 3422 MaxPacketSize, tw); 3423 buf_offs += MaxPacketSize; 3424 td_count--; 3425 index++; 3426 } 3427 } 3428 3429 pp->pp_qh->bulk_xfer_info = bulk_xfer_info; 3430 bulk_xfer_info->num_tds = num_bulk_tds; 3431 SetQH32(uhcip, pp->pp_qh->element_ptr, 3432 bulk_xfer_info->td_pools[0].cookie.dmac_address); 3433 } else { 3434 usba_pipe_handle_data_t *usb_pp = pp->pp_pipe_handle; 3435 3436 pp->pp_qh->bulk_xfer_info = NULL; 3437 3438 if (tw->tw_bytes_pending) { 3439 /* Update the element pointer */ 3440 SetQH32(uhcip, pp->pp_qh->element_ptr, 3441 TD_PADDR(pp->pp_qh->td_tailp)); 3442 3443 /* Remove all the tds */ 3444 td_head = tw->tw_hctd_head; 3445 while (td_head != NULL) { 3446 uhci_delete_td(uhcip, td_head); 3447 td_head = tw->tw_hctd_head; 3448 } 3449 } 3450 3451 if (tw->tw_direction == PID_IN) { 3452 usb_req_attrs_t attrs = ((usb_bulk_req_t *) 3453 tw->tw_curr_xfer_reqp)->bulk_attributes; 3454 3455 error = USB_CR_OK; 3456 3457 /* Data run occurred */ 3458 if (tw->tw_bytes_pending && 3459 (!(attrs & USB_ATTRS_SHORT_XFER_OK))) { 3460 error = USB_CR_DATA_UNDERRUN; 3461 } 3462 3463 uhci_sendup_td_message(uhcip, error, tw); 3464 } else { 3465 uhci_do_byte_stats(uhcip, tw->tw_length, 3466 usb_pp->p_ep.bmAttributes, 3467 usb_pp->p_ep.bEndpointAddress); 3468 3469 /* Data underrun occurred */ 3470 if (tw->tw_bytes_pending) { 3471 3472 tw->tw_data->b_rptr += 3473 tw->tw_bytes_xfered; 3474 3475 USB_DPRINTF_L2(PRINT_MASK_ATTA, 3476 uhcip->uhci_log_hdl, 3477 "uhci_handle_bulk_td: " 3478 "data underrun occurred"); 3479 3480 uhci_hcdi_callback(uhcip, pp, 3481 tw->tw_pipe_private->pp_pipe_handle, 3482 tw, USB_CR_DATA_UNDERRUN); 3483 } else { 3484 uhci_hcdi_callback(uhcip, pp, 3485 tw->tw_pipe_private->pp_pipe_handle, 3486 tw, USB_CR_OK); 3487 } 3488 } /* direction */ 3489 3490 /* Deallocate DMA memory */ 3491 uhci_deallocate_tw(uhcip, pp, tw); 3492 for (j = 0; j < bulk_xfer_info->num_pools; j++) { 3493 td_pool_ptr = &bulk_xfer_info->td_pools[j]; 3494 (void) ddi_dma_unbind_handle( 3495 td_pool_ptr->dma_handle); 3496 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 3497 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 3498 } 3499 kmem_free(bulk_xfer_info->td_pools, 3500 (sizeof (uhci_bulk_isoc_td_pool_t) * 3501 bulk_xfer_info->num_pools)); 3502 kmem_free(bulk_xfer_info, 3503 sizeof (uhci_bulk_isoc_xfer_t)); 3504 3505 /* 3506 * When there are no pending bulk commands, point the 3507 * end of the lattice tree to NULL. This will make sure 3508 * that the HC control does not loop anymore and PCI 3509 * bus is not affected. 3510 */ 3511 if (--uhcip->uhci_pending_bulk_cmds == 0) { 3512 uhcip->uhci_bulk_xfers_q_tail->link_ptr = 3513 HC_END_OF_LIST; 3514 USB_DPRINTF_L3(PRINT_MASK_ATTA, 3515 uhcip->uhci_log_hdl, 3516 "uhci_handle_bulk_td: count = %d", 3517 uhcip->uhci_pending_bulk_cmds); 3518 } 3519 } 3520 } else { 3521 uhci_delete_td(uhcip, td); 3522 } 3523 } 3524 3525 3526 void 3527 uhci_handle_bulk_td_errors(uhci_state_t *uhcip, uhci_td_t *td) 3528 { 3529 usb_cr_t usb_err; 3530 uint32_t paddr_tail, element_ptr, paddr; 3531 uhci_td_t *next_td; 3532 uhci_pipe_private_t *pp; 3533 uhci_trans_wrapper_t *tw = td->tw; 3534 usba_pipe_handle_data_t *ph; 3535 uhci_bulk_isoc_td_pool_t *td_pool_ptr = NULL; 3536 3537 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3538 "uhci_handle_bulk_td_errors: td = %p", (void *)td); 3539 3540 #ifdef DEBUG 3541 uhci_print_td(uhcip, td); 3542 #endif 3543 3544 tw = td->tw; 3545 ph = tw->tw_pipe_private->pp_pipe_handle; 3546 pp = (uhci_pipe_private_t *)ph->p_hcd_private; 3547 3548 /* 3549 * Find the type of error occurred and return the error 3550 * to the upper layer. And adjust the data toggle. 3551 */ 3552 element_ptr = GetQH32(uhcip, pp->pp_qh->element_ptr) & 3553 QH_ELEMENT_PTR_MASK; 3554 paddr_tail = TD_PADDR(pp->pp_qh->td_tailp); 3555 3556 /* 3557 * If a timeout occurs before a transfer has completed, 3558 * the timeout handler sets the CRC/Timeout bit and clears the Active 3559 * bit in the link_ptr for each td in the transfer. 3560 * It then waits (at least) 1 ms so that any tds the controller might 3561 * have been executing will have completed. 3562 * So at this point element_ptr will point to either: 3563 * 1) the next td for the transfer (which has not been executed, 3564 * and has the CRC/Timeout status bit set and Active bit cleared), 3565 * 2) the dummy td for this qh. 3566 * So if the element_ptr does not point to the dummy td, we know 3567 * it points to the next td that would have been executed. 3568 * That td has the data toggle we want to save. 3569 * All outstanding tds have been marked as CRC/Timeout, 3570 * so it doesn't matter which td we pass to uhci_parse_td_error 3571 * for the error status. 3572 */ 3573 if (element_ptr != paddr_tail) { 3574 paddr = (element_ptr & QH_ELEMENT_PTR_MASK); 3575 uhci_get_bulk_td_by_paddr(uhcip, pp->pp_qh->bulk_xfer_info, 3576 paddr, &td_pool_ptr); 3577 next_td = BULKTD_VADDR(td_pool_ptr, paddr); 3578 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3579 "uhci_handle_bulk_td_errors: next td = %p", 3580 (void *)next_td); 3581 3582 usb_err = uhci_parse_td_error(uhcip, pp, next_td); 3583 } else { 3584 usb_err = uhci_parse_td_error(uhcip, pp, td); 3585 } 3586 3587 /* 3588 * Update the link pointer. 3589 */ 3590 SetQH32(uhcip, pp->pp_qh->element_ptr, TD_PADDR(pp->pp_qh->td_tailp)); 3591 3592 /* 3593 * Send up number of bytes transferred before the error condition. 3594 */ 3595 if ((tw->tw_direction == PID_OUT) && tw->tw_data) { 3596 tw->tw_data->b_rptr += tw->tw_bytes_xfered; 3597 } 3598 3599 uhci_remove_bulk_tds_tws(uhcip, tw->tw_pipe_private, UHCI_IN_ERROR); 3600 3601 /* 3602 * When there are no pending bulk commands, point the end of the 3603 * lattice tree to NULL. This will make sure that the HC control 3604 * does not loop anymore and PCI bus is not affected. 3605 */ 3606 if (--uhcip->uhci_pending_bulk_cmds == 0) { 3607 uhcip->uhci_bulk_xfers_q_tail->link_ptr = HC_END_OF_LIST; 3608 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3609 "uhci_handle_bulk_td_errors: count = %d", 3610 uhcip->uhci_pending_bulk_cmds); 3611 } 3612 3613 uhci_hcdi_callback(uhcip, pp, ph, tw, usb_err); 3614 uhci_deallocate_tw(uhcip, pp, tw); 3615 } 3616 3617 3618 /* 3619 * uhci_get_bulk_td_by_paddr: 3620 * Obtain the address of the TD pool the physical address falls in. 3621 * 3622 * td_pool_pp - pointer to the address of the TD pool containing the paddr 3623 */ 3624 /* ARGSUSED */ 3625 static void 3626 uhci_get_bulk_td_by_paddr( 3627 uhci_state_t *uhcip, 3628 uhci_bulk_isoc_xfer_t *info, 3629 uint32_t paddr, 3630 uhci_bulk_isoc_td_pool_t **td_pool_pp) 3631 { 3632 uint_t i = 0; 3633 3634 while (i < info->num_pools) { 3635 *td_pool_pp = &info->td_pools[i]; 3636 if (((*td_pool_pp)->cookie.dmac_address <= paddr) && 3637 (((*td_pool_pp)->cookie.dmac_address + 3638 (*td_pool_pp)->cookie.dmac_size) > paddr)) { 3639 3640 break; 3641 } 3642 i++; 3643 } 3644 3645 ASSERT(i < info->num_pools); 3646 } 3647 3648 3649 void 3650 uhci_remove_bulk_tds_tws( 3651 uhci_state_t *uhcip, 3652 uhci_pipe_private_t *pp, 3653 int what) 3654 { 3655 uint_t rval, i; 3656 uhci_td_t *head; 3657 uhci_td_t *head_next; 3658 usb_opaque_t curr_reqp; 3659 uhci_bulk_isoc_xfer_t *info; 3660 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 3661 3662 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 3663 3664 if ((info = pp->pp_qh->bulk_xfer_info) == NULL) { 3665 3666 return; 3667 } 3668 3669 head = uhcip->uhci_outst_tds_head; 3670 3671 while (head) { 3672 uhci_trans_wrapper_t *tw_tmp = head->tw; 3673 head_next = head->outst_td_next; 3674 3675 if (pp->pp_qh == tw_tmp->tw_pipe_private->pp_qh) { 3676 curr_reqp = tw_tmp->tw_curr_xfer_reqp; 3677 if (curr_reqp && 3678 ((what == UHCI_IN_CLOSE) || 3679 (what == UHCI_IN_RESET))) { 3680 uhci_hcdi_callback(uhcip, pp, 3681 pp->pp_pipe_handle, 3682 tw_tmp, USB_CR_FLUSHED); 3683 } /* end of curr_reqp */ 3684 3685 uhci_delete_td(uhcip, head); 3686 3687 if (what == UHCI_IN_CLOSE || what == UHCI_IN_RESET) { 3688 ASSERT(info->num_tds > 0); 3689 if (--info->num_tds == 0) { 3690 uhci_deallocate_tw(uhcip, pp, tw_tmp); 3691 3692 /* 3693 * This will make sure that the HC 3694 * does not loop anymore when there 3695 * are no pending bulk commands. 3696 */ 3697 if (--uhcip->uhci_pending_bulk_cmds 3698 == 0) { 3699 uhcip->uhci_bulk_xfers_q_tail-> 3700 link_ptr = HC_END_OF_LIST; 3701 USB_DPRINTF_L3(PRINT_MASK_ATTA, 3702 uhcip->uhci_log_hdl, 3703 "uhci_remove_bulk_tds_tws:" 3704 " count = %d", 3705 uhcip-> 3706 uhci_pending_bulk_cmds); 3707 } 3708 } 3709 } 3710 } 3711 3712 head = head_next; 3713 } 3714 3715 if (what == UHCI_IN_CLOSE || what == UHCI_IN_RESET) { 3716 ASSERT(info->num_tds == 0); 3717 } 3718 3719 for (i = 0; i < info->num_pools; i++) { 3720 td_pool_ptr = &info->td_pools[i]; 3721 rval = ddi_dma_unbind_handle(td_pool_ptr->dma_handle); 3722 ASSERT(rval == DDI_SUCCESS); 3723 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 3724 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 3725 } 3726 kmem_free(info->td_pools, (sizeof (uhci_bulk_isoc_td_pool_t) * 3727 info->num_pools)); 3728 kmem_free(info, sizeof (uhci_bulk_isoc_xfer_t)); 3729 pp->pp_qh->bulk_xfer_info = NULL; 3730 } 3731 3732 3733 /* 3734 * uhci_save_data_toggle () 3735 * Save the data toggle in the usba_device structure 3736 */ 3737 void 3738 uhci_save_data_toggle(uhci_pipe_private_t *pp) 3739 { 3740 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 3741 3742 /* Save the data toggle in the usb devices structure. */ 3743 mutex_enter(&ph->p_mutex); 3744 usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress, 3745 pp->pp_data_toggle); 3746 mutex_exit(&ph->p_mutex); 3747 } 3748 3749 /* 3750 * uhci_create_isoc_transfer_wrapper: 3751 * Create a Transaction Wrapper (TW) for isoc transfer. 3752 * This involves the allocating of DMA resources. 3753 * 3754 * For isoc transfers, one isoc transfer includes multiple packets 3755 * and each packet may have a different length. So each packet is 3756 * transfered by one TD. We only know the individual packet length 3757 * won't exceed 1023 bytes, but we don't know exactly the lengths. 3758 * It is hard to make one physically discontiguous DMA buffer which 3759 * can fit in all the TDs like what can be done to the ctrl/bulk/ 3760 * intr transfers. It is also undesirable to make one physically 3761 * contiguous DMA buffer for all the packets, since this may easily 3762 * fail when the system is in low memory. So an individual DMA 3763 * buffer is allocated for an individual isoc packet and each DMA 3764 * buffer is physically contiguous. An extra structure is allocated 3765 * to save the multiple DMA handles. 3766 */ 3767 static uhci_trans_wrapper_t * 3768 uhci_create_isoc_transfer_wrapper( 3769 uhci_state_t *uhcip, 3770 uhci_pipe_private_t *pp, 3771 usb_isoc_req_t *req, 3772 size_t length, 3773 usb_flags_t usb_flags) 3774 { 3775 int result; 3776 size_t real_length, strtlen, xfer_size; 3777 uhci_trans_wrapper_t *tw; 3778 ddi_device_acc_attr_t dev_attr; 3779 ddi_dma_attr_t dma_attr; 3780 int kmem_flag; 3781 int (*dmamem_wait)(caddr_t); 3782 uint_t i, j, ccount; 3783 usb_isoc_req_t *tmp_req = req; 3784 3785 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 3786 3787 if (UHCI_XFER_TYPE(&pp->pp_pipe_handle->p_ep) != USB_EP_ATTR_ISOCH) { 3788 3789 return (NULL); 3790 } 3791 3792 if ((req == NULL) && (UHCI_XFER_DIR(&pp->pp_pipe_handle->p_ep) == 3793 USB_EP_DIR_IN)) { 3794 tmp_req = (usb_isoc_req_t *)pp->pp_client_periodic_in_reqp; 3795 } 3796 3797 if (tmp_req == NULL) { 3798 3799 return (NULL); 3800 } 3801 3802 3803 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3804 "uhci_create_isoc_transfer_wrapper: length = 0x%lx flags = 0x%x", 3805 length, usb_flags); 3806 3807 /* SLEEP flag should not be used in interrupt context */ 3808 if (servicing_interrupt()) { 3809 kmem_flag = KM_NOSLEEP; 3810 dmamem_wait = DDI_DMA_DONTWAIT; 3811 } else { 3812 kmem_flag = KM_SLEEP; 3813 dmamem_wait = DDI_DMA_SLEEP; 3814 } 3815 3816 /* Allocate space for the transfer wrapper */ 3817 if ((tw = kmem_zalloc(sizeof (uhci_trans_wrapper_t), kmem_flag)) == 3818 NULL) { 3819 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3820 "uhci_create_isoc_transfer_wrapper: kmem_alloc failed"); 3821 3822 return (NULL); 3823 } 3824 3825 /* Allocate space for the isoc buffer handles */ 3826 strtlen = sizeof (uhci_isoc_buf_t) * tmp_req->isoc_pkts_count; 3827 if ((tw->tw_isoc_bufs = kmem_zalloc(strtlen, kmem_flag)) == NULL) { 3828 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3829 "uhci_create_isoc_transfer_wrapper: kmem_alloc " 3830 "isoc buffer failed"); 3831 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 3832 3833 return (NULL); 3834 } 3835 3836 bcopy(&uhcip->uhci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t)); 3837 dma_attr.dma_attr_sgllen = 1; 3838 3839 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 3840 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 3841 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 3842 3843 /* Store the transfer length */ 3844 tw->tw_length = length; 3845 3846 for (i = 0; i < tmp_req->isoc_pkts_count; i++) { 3847 tw->tw_isoc_bufs[i].index = i; 3848 3849 /* Allocate the DMA handle */ 3850 if ((result = ddi_dma_alloc_handle(uhcip->uhci_dip, &dma_attr, 3851 dmamem_wait, 0, &tw->tw_isoc_bufs[i].dma_handle)) != 3852 DDI_SUCCESS) { 3853 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3854 "uhci_create_isoc_transfer_wrapper: " 3855 "Alloc handle %d failed", i); 3856 3857 for (j = 0; j < i; j++) { 3858 result = ddi_dma_unbind_handle( 3859 tw->tw_isoc_bufs[j].dma_handle); 3860 ASSERT(result == USB_SUCCESS); 3861 ddi_dma_mem_free(&tw->tw_isoc_bufs[j]. 3862 mem_handle); 3863 ddi_dma_free_handle(&tw->tw_isoc_bufs[j]. 3864 dma_handle); 3865 } 3866 kmem_free(tw->tw_isoc_bufs, strtlen); 3867 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 3868 3869 return (NULL); 3870 } 3871 3872 /* Allocate the memory */ 3873 xfer_size = tmp_req->isoc_pkt_descr[i].isoc_pkt_length; 3874 if ((result = ddi_dma_mem_alloc(tw->tw_isoc_bufs[i].dma_handle, 3875 xfer_size, &dev_attr, DDI_DMA_CONSISTENT, dmamem_wait, 3876 NULL, (caddr_t *)&tw->tw_isoc_bufs[i].buf_addr, 3877 &real_length, &tw->tw_isoc_bufs[i].mem_handle)) != 3878 DDI_SUCCESS) { 3879 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3880 "uhci_create_isoc_transfer_wrapper: " 3881 "dma_mem_alloc %d fail", i); 3882 ddi_dma_free_handle(&tw->tw_isoc_bufs[i].dma_handle); 3883 3884 for (j = 0; j < i; j++) { 3885 result = ddi_dma_unbind_handle( 3886 tw->tw_isoc_bufs[j].dma_handle); 3887 ASSERT(result == USB_SUCCESS); 3888 ddi_dma_mem_free(&tw->tw_isoc_bufs[j]. 3889 mem_handle); 3890 ddi_dma_free_handle(&tw->tw_isoc_bufs[j]. 3891 dma_handle); 3892 } 3893 kmem_free(tw->tw_isoc_bufs, strtlen); 3894 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 3895 3896 return (NULL); 3897 } 3898 3899 ASSERT(real_length >= xfer_size); 3900 3901 /* Bind the handle */ 3902 result = ddi_dma_addr_bind_handle( 3903 tw->tw_isoc_bufs[i].dma_handle, NULL, 3904 (caddr_t)tw->tw_isoc_bufs[i].buf_addr, real_length, 3905 DDI_DMA_RDWR|DDI_DMA_CONSISTENT, dmamem_wait, NULL, 3906 &tw->tw_isoc_bufs[i].cookie, &ccount); 3907 3908 if ((result == DDI_DMA_MAPPED) && (ccount == 1)) { 3909 tw->tw_isoc_bufs[i].length = xfer_size; 3910 3911 continue; 3912 } else { 3913 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3914 "uhci_create_isoc_transfer_wrapper: " 3915 "Bind handle %d failed", i); 3916 if (result == DDI_DMA_MAPPED) { 3917 result = ddi_dma_unbind_handle( 3918 tw->tw_isoc_bufs[i].dma_handle); 3919 ASSERT(result == USB_SUCCESS); 3920 } 3921 ddi_dma_mem_free(&tw->tw_isoc_bufs[i].mem_handle); 3922 ddi_dma_free_handle(&tw->tw_isoc_bufs[i].dma_handle); 3923 3924 for (j = 0; j < i; j++) { 3925 result = ddi_dma_unbind_handle( 3926 tw->tw_isoc_bufs[j].dma_handle); 3927 ASSERT(result == USB_SUCCESS); 3928 ddi_dma_mem_free(&tw->tw_isoc_bufs[j]. 3929 mem_handle); 3930 ddi_dma_free_handle(&tw->tw_isoc_bufs[j]. 3931 dma_handle); 3932 } 3933 kmem_free(tw->tw_isoc_bufs, strtlen); 3934 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 3935 3936 return (NULL); 3937 } 3938 } 3939 3940 tw->tw_ncookies = tmp_req->isoc_pkts_count; 3941 tw->tw_isoc_strtlen = strtlen; 3942 3943 /* 3944 * Only allow one wrapper to be added at a time. Insert the 3945 * new transaction wrapper into the list for this pipe. 3946 */ 3947 if (pp->pp_tw_head == NULL) { 3948 pp->pp_tw_head = tw; 3949 pp->pp_tw_tail = tw; 3950 } else { 3951 pp->pp_tw_tail->tw_next = tw; 3952 pp->pp_tw_tail = tw; 3953 ASSERT(tw->tw_next == NULL); 3954 } 3955 3956 /* Store a back pointer to the pipe private structure */ 3957 tw->tw_pipe_private = pp; 3958 3959 /* Store the transfer type - synchronous or asynchronous */ 3960 tw->tw_flags = usb_flags; 3961 3962 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3963 "uhci_create_isoc_transfer_wrapper: tw = 0x%p, ncookies = %u", 3964 tw, tw->tw_ncookies); 3965 3966 return (tw); 3967 } 3968 3969 /* 3970 * uhci_insert_isoc_td: 3971 * - Create transfer wrapper 3972 * - Allocate memory for the isoc td's 3973 * - Fill up all the TD's and submit to the HC 3974 * - Update all the linked lists 3975 */ 3976 int 3977 uhci_insert_isoc_td( 3978 uhci_state_t *uhcip, 3979 usba_pipe_handle_data_t *ph, 3980 usb_isoc_req_t *isoc_req, 3981 size_t length, 3982 usb_flags_t flags) 3983 { 3984 int rval = USB_SUCCESS; 3985 int error; 3986 uint_t ddic; 3987 uint32_t i, j, index; 3988 uint32_t bytes_to_xfer; 3989 uint32_t expired_frames = 0; 3990 usb_frame_number_t start_frame, end_frame, current_frame; 3991 uhci_td_t *td_ptr; 3992 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 3993 uhci_trans_wrapper_t *tw; 3994 uhci_bulk_isoc_xfer_t *isoc_xfer_info; 3995 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 3996 3997 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 3998 "uhci_insert_isoc_td: ph = 0x%p isoc req = %p length = %lu", 3999 ph, (void *)isoc_req, length); 4000 4001 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4002 4003 /* Allocate a transfer wrapper */ 4004 if ((tw = uhci_create_isoc_transfer_wrapper(uhcip, pp, isoc_req, 4005 length, flags)) == NULL) { 4006 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4007 "uhci_insert_isoc_td: TW allocation failed"); 4008 4009 return (USB_NO_RESOURCES); 4010 } 4011 4012 /* Save current isochronous request pointer */ 4013 tw->tw_curr_xfer_reqp = (usb_opaque_t)isoc_req; 4014 4015 /* 4016 * Initialize the transfer wrapper. These values are useful 4017 * for sending back the reply. 4018 */ 4019 tw->tw_handle_td = uhci_handle_isoc_td; 4020 tw->tw_handle_callback_value = NULL; 4021 tw->tw_direction = (UHCI_XFER_DIR(&ph->p_ep) == USB_EP_DIR_OUT) ? 4022 PID_OUT : PID_IN; 4023 4024 /* 4025 * If the transfer isoc send, then copy the data from the request 4026 * to the transfer wrapper. 4027 */ 4028 if ((tw->tw_direction == PID_OUT) && length) { 4029 uchar_t *p; 4030 4031 ASSERT(isoc_req->isoc_data != NULL); 4032 p = isoc_req->isoc_data->b_rptr; 4033 4034 /* Copy the data into the message */ 4035 for (i = 0; i < isoc_req->isoc_pkts_count; i++) { 4036 ddi_rep_put8(tw->tw_isoc_bufs[i].mem_handle, 4037 p, (uint8_t *)tw->tw_isoc_bufs[i].buf_addr, 4038 isoc_req->isoc_pkt_descr[i].isoc_pkt_length, 4039 DDI_DEV_AUTOINCR); 4040 p += isoc_req->isoc_pkt_descr[i].isoc_pkt_length; 4041 } 4042 } 4043 4044 if (tw->tw_direction == PID_IN) { 4045 if ((rval = uhci_allocate_periodic_in_resource(uhcip, pp, tw, 4046 flags)) != USB_SUCCESS) { 4047 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4048 "uhci_insert_isoc_td: isoc_req_t alloc failed"); 4049 uhci_deallocate_tw(uhcip, pp, tw); 4050 4051 return (rval); 4052 } 4053 4054 isoc_req = (usb_isoc_req_t *)tw->tw_curr_xfer_reqp; 4055 } 4056 4057 tw->tw_isoc_req = (usb_isoc_req_t *)tw->tw_curr_xfer_reqp; 4058 4059 /* Get the pointer to the isoc_xfer_info structure */ 4060 isoc_xfer_info = (uhci_bulk_isoc_xfer_t *)&tw->tw_xfer_info; 4061 isoc_xfer_info->num_tds = isoc_req->isoc_pkts_count; 4062 4063 /* 4064 * Allocate memory for isoc tds 4065 */ 4066 if ((rval = uhci_alloc_bulk_isoc_tds(uhcip, isoc_req->isoc_pkts_count, 4067 isoc_xfer_info)) != USB_SUCCESS) { 4068 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4069 "uhci_alloc_bulk_isoc_td: Memory allocation failure"); 4070 4071 if (tw->tw_direction == PID_IN) { 4072 uhci_deallocate_periodic_in_resource(uhcip, pp, tw); 4073 } 4074 uhci_deallocate_tw(uhcip, pp, tw); 4075 4076 return (rval); 4077 } 4078 4079 /* 4080 * Get the isoc td pool address, buffer address and 4081 * max packet size that the device supports. 4082 */ 4083 td_pool_ptr = &isoc_xfer_info->td_pools[0]; 4084 td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 4085 index = 0; 4086 4087 /* 4088 * Fill up the isoc tds 4089 */ 4090 USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4091 "uhci_insert_isoc_td : isoc pkts %d", isoc_req->isoc_pkts_count); 4092 4093 for (i = 0; i < isoc_xfer_info->num_pools; i++) { 4094 for (j = 0; j < td_pool_ptr->num_tds; j++) { 4095 bytes_to_xfer = 4096 isoc_req->isoc_pkt_descr[index].isoc_pkt_length; 4097 4098 uhci_fill_in_bulk_isoc_td(uhcip, &td_ptr[j], 4099 (uhci_td_t *)NULL, HC_END_OF_LIST, ph, index, 4100 bytes_to_xfer, tw); 4101 td_ptr[j].isoc_pkt_index = index; 4102 index++; 4103 } 4104 4105 if (i < (isoc_xfer_info->num_pools - 1)) { 4106 td_pool_ptr = &isoc_xfer_info->td_pools[i + 1]; 4107 td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 4108 } 4109 } 4110 4111 /* 4112 * Get the starting frame number. 4113 * The client drivers sets the flag USB_ATTRS_ISOC_XFER_ASAP to inform 4114 * the HCD to care of starting frame number. 4115 * 4116 * Following code is very time critical. So, perform atomic execution. 4117 */ 4118 ddic = ddi_enter_critical(); 4119 current_frame = uhci_get_sw_frame_number(uhcip); 4120 4121 if (isoc_req->isoc_attributes & USB_ATTRS_ISOC_START_FRAME) { 4122 start_frame = isoc_req->isoc_frame_no; 4123 end_frame = start_frame + isoc_req->isoc_pkts_count; 4124 4125 /* Check available frames */ 4126 if ((end_frame - current_frame) < UHCI_MAX_ISOC_FRAMES) { 4127 if (current_frame > start_frame) { 4128 if ((current_frame + FRNUM_OFFSET) < 4129 end_frame) { 4130 expired_frames = current_frame + 4131 FRNUM_OFFSET - start_frame; 4132 start_frame = current_frame + 4133 FRNUM_OFFSET; 4134 } else { 4135 rval = USB_INVALID_START_FRAME; 4136 } 4137 } 4138 } else { 4139 rval = USB_INVALID_START_FRAME; 4140 } 4141 4142 } else if (isoc_req->isoc_attributes & USB_ATTRS_ISOC_XFER_ASAP) { 4143 start_frame = pp->pp_frame_num; 4144 4145 if (start_frame == INVALID_FRNUM) { 4146 start_frame = current_frame + FRNUM_OFFSET; 4147 } else if (current_frame > start_frame) { 4148 start_frame = current_frame + FRNUM_OFFSET; 4149 } 4150 4151 end_frame = start_frame + isoc_req->isoc_pkts_count; 4152 isoc_req->isoc_frame_no = start_frame; 4153 4154 } 4155 4156 if (rval != USB_SUCCESS) { 4157 4158 /* Exit the critical */ 4159 ddi_exit_critical(ddic); 4160 4161 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4162 "uhci_insert_isoc_td: Invalid starting frame number"); 4163 4164 if (tw->tw_direction == PID_IN) { 4165 uhci_deallocate_periodic_in_resource(uhcip, pp, tw); 4166 } 4167 4168 while (tw->tw_hctd_head) { 4169 uhci_delete_td(uhcip, tw->tw_hctd_head); 4170 } 4171 4172 for (i = 0; i < isoc_xfer_info->num_pools; i++) { 4173 td_pool_ptr = &isoc_xfer_info->td_pools[i]; 4174 error = ddi_dma_unbind_handle(td_pool_ptr->dma_handle); 4175 ASSERT(error == DDI_SUCCESS); 4176 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 4177 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 4178 } 4179 kmem_free(isoc_xfer_info->td_pools, 4180 (sizeof (uhci_bulk_isoc_td_pool_t) * 4181 isoc_xfer_info->num_pools)); 4182 4183 uhci_deallocate_tw(uhcip, pp, tw); 4184 4185 return (rval); 4186 } 4187 4188 for (i = 0; i < expired_frames; i++) { 4189 isoc_req->isoc_pkt_descr[i].isoc_pkt_status = 4190 USB_CR_NOT_ACCESSED; 4191 isoc_req->isoc_pkt_descr[i].isoc_pkt_actual_length = 4192 isoc_req->isoc_pkt_descr[i].isoc_pkt_length; 4193 uhci_get_isoc_td_by_index(uhcip, isoc_xfer_info, i, 4194 &td_ptr, &td_pool_ptr); 4195 uhci_delete_td(uhcip, td_ptr); 4196 --isoc_xfer_info->num_tds; 4197 } 4198 4199 /* 4200 * Add the TD's to the HC list 4201 */ 4202 start_frame = (start_frame & 0x3ff); 4203 for (; i < isoc_req->isoc_pkts_count; i++) { 4204 uhci_get_isoc_td_by_index(uhcip, isoc_xfer_info, i, 4205 &td_ptr, &td_pool_ptr); 4206 if (uhcip->uhci_isoc_q_tailp[start_frame]) { 4207 td_ptr->isoc_prev = 4208 uhcip->uhci_isoc_q_tailp[start_frame]; 4209 td_ptr->isoc_next = NULL; 4210 td_ptr->link_ptr = 4211 uhcip->uhci_isoc_q_tailp[start_frame]->link_ptr; 4212 uhcip->uhci_isoc_q_tailp[start_frame]->isoc_next = 4213 td_ptr; 4214 SetTD32(uhcip, 4215 uhcip->uhci_isoc_q_tailp[start_frame]->link_ptr, 4216 ISOCTD_PADDR(td_pool_ptr, td_ptr)); 4217 uhcip->uhci_isoc_q_tailp[start_frame] = td_ptr; 4218 } else { 4219 uhcip->uhci_isoc_q_tailp[start_frame] = td_ptr; 4220 td_ptr->isoc_next = NULL; 4221 td_ptr->isoc_prev = NULL; 4222 SetTD32(uhcip, td_ptr->link_ptr, 4223 GetFL32(uhcip, 4224 uhcip->uhci_frame_lst_tablep[start_frame])); 4225 SetFL32(uhcip, 4226 uhcip->uhci_frame_lst_tablep[start_frame], 4227 ISOCTD_PADDR(td_pool_ptr, td_ptr)); 4228 } 4229 td_ptr->starting_frame = start_frame; 4230 4231 if (++start_frame == NUM_FRAME_LST_ENTRIES) 4232 start_frame = 0; 4233 } 4234 4235 ddi_exit_critical(ddic); 4236 pp->pp_frame_num = end_frame; 4237 4238 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4239 "uhci_insert_isoc_td: current frame number 0x%llx, pipe frame num" 4240 " 0x%llx", current_frame, pp->pp_frame_num); 4241 4242 return (rval); 4243 } 4244 4245 4246 /* 4247 * uhci_get_isoc_td_by_index: 4248 * Obtain the addresses of the TD pool and the TD at the index. 4249 * 4250 * tdpp - pointer to the address of the TD at the isoc packet index 4251 * td_pool_pp - pointer to the address of the TD pool containing 4252 * the specified TD 4253 */ 4254 /* ARGSUSED */ 4255 static void 4256 uhci_get_isoc_td_by_index( 4257 uhci_state_t *uhcip, 4258 uhci_bulk_isoc_xfer_t *info, 4259 uint_t index, 4260 uhci_td_t **tdpp, 4261 uhci_bulk_isoc_td_pool_t **td_pool_pp) 4262 { 4263 uint_t i = 0, j = 0; 4264 uhci_td_t *td_ptr; 4265 4266 while (j < info->num_pools) { 4267 if ((i + info->td_pools[j].num_tds) <= index) { 4268 i += info->td_pools[j].num_tds; 4269 j++; 4270 } else { 4271 i = index - i; 4272 4273 break; 4274 } 4275 } 4276 4277 ASSERT(j < info->num_pools); 4278 *td_pool_pp = &info->td_pools[j]; 4279 td_ptr = (uhci_td_t *)((*td_pool_pp)->pool_addr); 4280 *tdpp = &td_ptr[i]; 4281 } 4282 4283 4284 /* 4285 * uhci_handle_isoc_td: 4286 * Handles the completed isoc tds 4287 */ 4288 void 4289 uhci_handle_isoc_td(uhci_state_t *uhcip, uhci_td_t *td) 4290 { 4291 uint_t rval, i; 4292 uint32_t pkt_index = td->isoc_pkt_index; 4293 usb_cr_t cr; 4294 uhci_trans_wrapper_t *tw = td->tw; 4295 usb_isoc_req_t *isoc_req = (usb_isoc_req_t *)tw->tw_isoc_req; 4296 uhci_pipe_private_t *pp = tw->tw_pipe_private; 4297 uhci_bulk_isoc_xfer_t *isoc_xfer_info = &tw->tw_xfer_info; 4298 usba_pipe_handle_data_t *usb_pp; 4299 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 4300 4301 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4302 "uhci_handle_isoc_td: td = 0x%p, pp = 0x%p, tw = 0x%p, req = 0x%p, " 4303 "index = %x", td, pp, tw, isoc_req, pkt_index); 4304 4305 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4306 4307 usb_pp = pp->pp_pipe_handle; 4308 4309 /* 4310 * Check whether there are any errors occurred. If so, update error 4311 * count and return it to the upper.But never return a non zero 4312 * completion reason. 4313 */ 4314 cr = USB_CR_OK; 4315 if (GetTD_status(uhcip, td) & TD_STATUS_MASK) { 4316 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4317 "uhci_handle_isoc_td: Error Occurred: TD Status = %x", 4318 GetTD_status(uhcip, td)); 4319 isoc_req->isoc_error_count++; 4320 } 4321 4322 if (isoc_req != NULL) { 4323 isoc_req->isoc_pkt_descr[pkt_index].isoc_pkt_status = cr; 4324 isoc_req->isoc_pkt_descr[pkt_index].isoc_pkt_actual_length = 4325 (GetTD_alen(uhcip, td) == ZERO_LENGTH) ? 0 : 4326 GetTD_alen(uhcip, td) + 1; 4327 } 4328 4329 uhci_delete_isoc_td(uhcip, td); 4330 4331 if (--isoc_xfer_info->num_tds != 0) { 4332 USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4333 "uhci_handle_isoc_td: Number of TDs %d", 4334 isoc_xfer_info->num_tds); 4335 4336 return; 4337 } 4338 4339 tw->tw_claim = UHCI_INTR_HDLR_CLAIMED; 4340 if (tw->tw_direction == PID_IN) { 4341 uhci_sendup_td_message(uhcip, cr, tw); 4342 4343 if ((uhci_handle_isoc_receive(uhcip, pp, tw)) != USB_SUCCESS) { 4344 USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4345 "uhci_handle_isoc_td: Drop message"); 4346 } 4347 4348 } else { 4349 /* update kstats only for OUT. sendup_td_msg() does it for IN */ 4350 uhci_do_byte_stats(uhcip, tw->tw_length, 4351 usb_pp->p_ep.bmAttributes, usb_pp->p_ep.bEndpointAddress); 4352 4353 uhci_hcdi_callback(uhcip, pp, usb_pp, tw, USB_CR_OK); 4354 } 4355 4356 for (i = 0; i < isoc_xfer_info->num_pools; i++) { 4357 td_pool_ptr = &isoc_xfer_info->td_pools[i]; 4358 rval = ddi_dma_unbind_handle(td_pool_ptr->dma_handle); 4359 ASSERT(rval == DDI_SUCCESS); 4360 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 4361 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 4362 } 4363 kmem_free(isoc_xfer_info->td_pools, 4364 (sizeof (uhci_bulk_isoc_td_pool_t) * 4365 isoc_xfer_info->num_pools)); 4366 uhci_deallocate_tw(uhcip, pp, tw); 4367 } 4368 4369 4370 /* 4371 * uhci_handle_isoc_receive: 4372 * - Sends the isoc data to the client 4373 * - Inserts another isoc receive request 4374 */ 4375 static int 4376 uhci_handle_isoc_receive( 4377 uhci_state_t *uhcip, 4378 uhci_pipe_private_t *pp, 4379 uhci_trans_wrapper_t *tw) 4380 { 4381 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4382 "uhci_handle_isoc_receive: tw = 0x%p", tw); 4383 4384 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4385 4386 /* 4387 * -- check for pipe state being polling before 4388 * inserting a new request. Check when is TD 4389 * de-allocation being done? (so we can reuse the same TD) 4390 */ 4391 if (uhci_start_isoc_receive_polling(uhcip, 4392 pp->pp_pipe_handle, (usb_isoc_req_t *)tw->tw_curr_xfer_reqp, 4393 0) != USB_SUCCESS) { 4394 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4395 "uhci_handle_isoc_receive: receive polling failed"); 4396 4397 return (USB_FAILURE); 4398 } 4399 4400 return (USB_SUCCESS); 4401 } 4402 4403 4404 /* 4405 * uhci_delete_isoc_td: 4406 * - Delete from the outstanding command queue 4407 * - Delete from the tw queue 4408 * - Delete from the isoc queue 4409 * - Delete from the HOST CONTROLLER list 4410 */ 4411 static void 4412 uhci_delete_isoc_td(uhci_state_t *uhcip, uhci_td_t *td) 4413 { 4414 uint32_t starting_frame = td->starting_frame; 4415 4416 if ((td->isoc_next == NULL) && (td->isoc_prev == NULL)) { 4417 SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[starting_frame], 4418 GetTD32(uhcip, td->link_ptr)); 4419 uhcip->uhci_isoc_q_tailp[starting_frame] = 0; 4420 } else if (td->isoc_next == NULL) { 4421 td->isoc_prev->link_ptr = td->link_ptr; 4422 td->isoc_prev->isoc_next = NULL; 4423 uhcip->uhci_isoc_q_tailp[starting_frame] = td->isoc_prev; 4424 } else if (td->isoc_prev == NULL) { 4425 td->isoc_next->isoc_prev = NULL; 4426 SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[starting_frame], 4427 GetTD32(uhcip, td->link_ptr)); 4428 } else { 4429 td->isoc_prev->isoc_next = td->isoc_next; 4430 td->isoc_next->isoc_prev = td->isoc_prev; 4431 td->isoc_prev->link_ptr = td->link_ptr; 4432 } 4433 4434 uhci_delete_td(uhcip, td); 4435 } 4436 4437 4438 /* 4439 * uhci_send_isoc_receive 4440 * - Allocates usb_isoc_request 4441 * - Updates the isoc request 4442 * - Inserts the isoc td's into the HC processing list. 4443 */ 4444 int 4445 uhci_start_isoc_receive_polling( 4446 uhci_state_t *uhcip, 4447 usba_pipe_handle_data_t *ph, 4448 usb_isoc_req_t *isoc_req, 4449 usb_flags_t usb_flags) 4450 { 4451 int ii, error; 4452 size_t max_isoc_xfer_size, length, isoc_pkts_length; 4453 ushort_t isoc_pkt_count; 4454 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 4455 usb_isoc_pkt_descr_t *isoc_pkt_descr; 4456 4457 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4458 "uhci_start_isoc_receive_polling: usb_flags = %x", usb_flags); 4459 4460 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4461 4462 max_isoc_xfer_size = ph->p_ep.wMaxPacketSize * UHCI_MAX_ISOC_PKTS; 4463 4464 if (isoc_req) { 4465 isoc_pkt_descr = isoc_req->isoc_pkt_descr; 4466 isoc_pkt_count = isoc_req->isoc_pkts_count; 4467 isoc_pkts_length = isoc_req->isoc_pkts_length; 4468 } else { 4469 isoc_pkt_descr = ((usb_isoc_req_t *) 4470 pp->pp_client_periodic_in_reqp)->isoc_pkt_descr; 4471 isoc_pkt_count = ((usb_isoc_req_t *) 4472 pp->pp_client_periodic_in_reqp)->isoc_pkts_count; 4473 isoc_pkts_length = ((usb_isoc_req_t *) 4474 pp->pp_client_periodic_in_reqp)->isoc_pkts_length; 4475 } 4476 4477 for (ii = 0, length = 0; ii < isoc_pkt_count; ii++) { 4478 length += isoc_pkt_descr->isoc_pkt_length; 4479 isoc_pkt_descr++; 4480 } 4481 4482 if ((isoc_pkts_length) && (isoc_pkts_length != length)) { 4483 4484 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 4485 "uhci_start_isoc_receive_polling: isoc_pkts_length 0x%x " 4486 "is not equal to the sum of all pkt lengths 0x%x in " 4487 "an isoc request", isoc_pkts_length, length); 4488 4489 return (USB_FAILURE); 4490 } 4491 4492 /* Check the size of isochronous request */ 4493 if (length > max_isoc_xfer_size) { 4494 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4495 "uhci_start_isoc_receive_polling: " 4496 "Max isoc request size = %lx, Given isoc req size = %lx", 4497 max_isoc_xfer_size, length); 4498 4499 return (USB_FAILURE); 4500 } 4501 4502 /* Add the TD into the Host Controller's isoc list */ 4503 error = uhci_insert_isoc_td(uhcip, ph, isoc_req, length, usb_flags); 4504 4505 return (error); 4506 } 4507 4508 4509 /* 4510 * uhci_remove_isoc_tds_tws 4511 * This routine scans the pipe and removes all the td's 4512 * and transfer wrappers and deallocates the memory 4513 * associated with those td's and tw's. 4514 */ 4515 void 4516 uhci_remove_isoc_tds_tws(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 4517 { 4518 uint_t rval, i; 4519 uhci_td_t *tmp_td, *td_head; 4520 usb_isoc_req_t *isoc_req; 4521 uhci_trans_wrapper_t *tmp_tw, *tw_head; 4522 uhci_bulk_isoc_xfer_t *isoc_xfer_info; 4523 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 4524 4525 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4526 "uhci_remove_isoc_tds_tws: pp = %p", (void *)pp); 4527 4528 tw_head = pp->pp_tw_head; 4529 while (tw_head) { 4530 tmp_tw = tw_head; 4531 tw_head = tw_head->tw_next; 4532 td_head = tmp_tw->tw_hctd_head; 4533 if (tmp_tw->tw_direction == PID_IN) { 4534 uhci_deallocate_periodic_in_resource(uhcip, pp, 4535 tmp_tw); 4536 } else if (tmp_tw->tw_direction == PID_OUT) { 4537 uhci_hcdi_callback(uhcip, pp, pp->pp_pipe_handle, 4538 tmp_tw, USB_CR_FLUSHED); 4539 } 4540 4541 while (td_head) { 4542 tmp_td = td_head; 4543 td_head = td_head->tw_td_next; 4544 uhci_delete_isoc_td(uhcip, tmp_td); 4545 } 4546 4547 isoc_req = (usb_isoc_req_t *)tmp_tw->tw_isoc_req; 4548 if (isoc_req) { 4549 usb_free_isoc_req(isoc_req); 4550 } 4551 4552 ASSERT(tmp_tw->tw_hctd_head == NULL); 4553 4554 if (tmp_tw->tw_xfer_info.td_pools) { 4555 isoc_xfer_info = 4556 (uhci_bulk_isoc_xfer_t *)&tmp_tw->tw_xfer_info; 4557 for (i = 0; i < isoc_xfer_info->num_pools; i++) { 4558 td_pool_ptr = &isoc_xfer_info->td_pools[i]; 4559 rval = ddi_dma_unbind_handle( 4560 td_pool_ptr->dma_handle); 4561 ASSERT(rval == DDI_SUCCESS); 4562 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 4563 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 4564 } 4565 kmem_free(isoc_xfer_info->td_pools, 4566 (sizeof (uhci_bulk_isoc_td_pool_t) * 4567 isoc_xfer_info->num_pools)); 4568 } 4569 4570 uhci_deallocate_tw(uhcip, pp, tmp_tw); 4571 } 4572 } 4573 4574 4575 /* 4576 * uhci_isoc_update_sw_frame_number() 4577 * to avoid code duplication, call uhci_get_sw_frame_number() 4578 */ 4579 void 4580 uhci_isoc_update_sw_frame_number(uhci_state_t *uhcip) 4581 { 4582 (void) uhci_get_sw_frame_number(uhcip); 4583 } 4584 4585 4586 /* 4587 * uhci_get_sw_frame_number: 4588 * Hold the uhci_int_mutex before calling this routine. 4589 */ 4590 uint64_t 4591 uhci_get_sw_frame_number(uhci_state_t *uhcip) 4592 { 4593 uint64_t sw_frnum, hw_frnum, current_frnum; 4594 4595 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4596 4597 sw_frnum = uhcip->uhci_sw_frnum; 4598 hw_frnum = Get_OpReg16(FRNUM); 4599 4600 /* 4601 * Check bit 10 in the software counter and hardware frame counter. 4602 * If both are same, then don't increment the software frame counter 4603 * (Bit 10 of hw frame counter toggle for every 1024 frames) 4604 * The lower 11 bits of software counter contains the hardware frame 4605 * counter value. The MSB (bit 10) of software counter is incremented 4606 * for every 1024 frames either here or in get frame number routine. 4607 */ 4608 if ((sw_frnum & UHCI_BIT_10_MASK) == (hw_frnum & UHCI_BIT_10_MASK)) { 4609 /* The MSB of hw counter did not toggle */ 4610 current_frnum = ((sw_frnum & (SW_FRNUM_MASK)) | hw_frnum); 4611 } else { 4612 /* 4613 * The hw counter wrapped around. And the interrupt handler 4614 * did not get a chance to update the sw frame counter. 4615 * So, update the sw frame counter and return correct frame no. 4616 */ 4617 sw_frnum >>= UHCI_SIZE_OF_HW_FRNUM - 1; 4618 current_frnum = 4619 ((++sw_frnum << (UHCI_SIZE_OF_HW_FRNUM - 1)) | hw_frnum); 4620 } 4621 uhcip->uhci_sw_frnum = current_frnum; 4622 4623 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 4624 "uhci_get_sw_frame_number: sw=%ld hd=%ld", 4625 uhcip->uhci_sw_frnum, hw_frnum); 4626 4627 return (current_frnum); 4628 } 4629 4630 4631 /* 4632 * uhci_cmd_timeout_hdlr: 4633 * This routine will get called for every second. It checks for 4634 * timed out control commands/bulk commands. Timeout any commands 4635 * that exceeds the time out period specified by the pipe policy. 4636 */ 4637 void 4638 uhci_cmd_timeout_hdlr(void *arg) 4639 { 4640 uint_t flag = B_FALSE; 4641 uhci_td_t *head, *tmp_td; 4642 uhci_state_t *uhcip = (uhci_state_t *)arg; 4643 uhci_pipe_private_t *pp; 4644 4645 /* 4646 * Check whether any of the control xfers are timed out. 4647 * If so, complete those commands with time out as reason. 4648 */ 4649 mutex_enter(&uhcip->uhci_int_mutex); 4650 head = uhcip->uhci_outst_tds_head; 4651 4652 while (head) { 4653 /* 4654 * If timeout out is zero, then dont timeout command. 4655 */ 4656 if (head->tw->tw_timeout_cnt == 0) { 4657 head = head->outst_td_next; 4658 continue; 4659 } 4660 4661 if (!(head->tw->tw_flags & TW_TIMEOUT_FLAG)) { 4662 head->tw->tw_flags |= TW_TIMEOUT_FLAG; 4663 --head->tw->tw_timeout_cnt; 4664 } 4665 4666 /* only do it for bulk and control TDs */ 4667 if ((head->tw->tw_timeout_cnt == 0) && 4668 (head->tw->tw_handle_td != uhci_handle_isoc_td)) { 4669 4670 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 4671 "Command timed out: td = %p", (void *)head); 4672 4673 head->tw->tw_claim = UHCI_TIMEOUT_HDLR_CLAIMED; 4674 4675 /* 4676 * Check finally whether the command completed 4677 */ 4678 if (GetTD_status(uhcip, head) & UHCI_TD_ACTIVE) { 4679 SetTD32(uhcip, head->link_ptr, 4680 GetTD32(uhcip, head->link_ptr) | 4681 HC_END_OF_LIST); 4682 pp = head->tw->tw_pipe_private; 4683 SetQH32(uhcip, pp->pp_qh->element_ptr, 4684 GetQH32(uhcip, pp->pp_qh->element_ptr) | 4685 HC_END_OF_LIST); 4686 } 4687 4688 flag = B_TRUE; 4689 } 4690 4691 head = head->outst_td_next; 4692 } 4693 4694 if (flag) { 4695 (void) uhci_wait_for_sof(uhcip); 4696 } 4697 4698 head = uhcip->uhci_outst_tds_head; 4699 while (head) { 4700 if (head->tw->tw_flags & TW_TIMEOUT_FLAG) { 4701 head->tw->tw_flags &= ~TW_TIMEOUT_FLAG; 4702 } 4703 if (head->tw->tw_claim == UHCI_TIMEOUT_HDLR_CLAIMED) { 4704 head->tw->tw_claim = UHCI_NOT_CLAIMED; 4705 tmp_td = head->tw->tw_hctd_head; 4706 while (tmp_td) { 4707 SetTD_status(uhcip, tmp_td, 4708 UHCI_TD_CRC_TIMEOUT); 4709 tmp_td = tmp_td->tw_td_next; 4710 } 4711 } 4712 head = head->outst_td_next; 4713 } 4714 4715 /* 4716 * Process the td which was completed before shifting from normal 4717 * mode to polled mode 4718 */ 4719 if (uhcip->uhci_polled_flag == UHCI_POLLED_FLAG_TRUE) { 4720 uhci_process_submitted_td_queue(uhcip); 4721 uhcip->uhci_polled_flag = UHCI_POLLED_FLAG_FALSE; 4722 } else if (flag) { 4723 /* Process the completed/timed out commands */ 4724 uhci_process_submitted_td_queue(uhcip); 4725 } 4726 4727 /* Re-register the control/bulk/intr commands' timeout handler */ 4728 if (uhcip->uhci_cmd_timeout_id) { 4729 uhcip->uhci_cmd_timeout_id = timeout(uhci_cmd_timeout_hdlr, 4730 (void *)uhcip, UHCI_ONE_SECOND); 4731 } 4732 4733 mutex_exit(&uhcip->uhci_int_mutex); 4734 } 4735 4736 4737 /* 4738 * uhci_wait_for_sof: 4739 * Wait for the start of the next frame (implying any changes made in the 4740 * lattice have now taken effect). 4741 * To be sure this is the case, we wait for the completion of the current 4742 * frame (which might have already been pending), then another complete 4743 * frame to ensure everything has taken effect. 4744 */ 4745 int 4746 uhci_wait_for_sof(uhci_state_t *uhcip) 4747 { 4748 int n, error; 4749 ushort_t cmd_reg; 4750 usb_frame_number_t before_frame_number, after_frame_number; 4751 clock_t time, rval; 4752 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 4753 "uhci_wait_for_sof: uhcip = %p", uhcip); 4754 4755 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4756 4757 error = uhci_state_is_operational(uhcip); 4758 4759 if (error != USB_SUCCESS) { 4760 4761 return (error); 4762 } 4763 4764 before_frame_number = uhci_get_sw_frame_number(uhcip); 4765 for (n = 0; n < MAX_SOF_WAIT_COUNT; n++) { 4766 SetTD_ioc(uhcip, uhcip->uhci_sof_td, 1); 4767 uhcip->uhci_cv_signal = B_TRUE; 4768 4769 time = ddi_get_lbolt() + UHCI_ONE_SECOND; 4770 rval = cv_timedwait(&uhcip->uhci_cv_SOF, 4771 &uhcip->uhci_int_mutex, time); 4772 4773 after_frame_number = uhci_get_sw_frame_number(uhcip); 4774 if ((rval == -1) && 4775 (after_frame_number <= before_frame_number)) { 4776 cmd_reg = Get_OpReg16(USBCMD); 4777 Set_OpReg16(USBCMD, (cmd_reg | USBCMD_REG_HC_RUN)); 4778 Set_OpReg16(USBINTR, ENABLE_ALL_INTRS); 4779 after_frame_number = uhci_get_sw_frame_number(uhcip); 4780 } 4781 before_frame_number = after_frame_number; 4782 } 4783 4784 SetTD_ioc(uhcip, uhcip->uhci_sof_td, 0); 4785 4786 return (uhcip->uhci_cv_signal ? USB_FAILURE : USB_SUCCESS); 4787 4788 } 4789 4790 /* 4791 * uhci_allocate_periodic_in_resource: 4792 * Allocate interrupt/isochronous request structure for the 4793 * interrupt/isochronous IN transfer. 4794 */ 4795 int 4796 uhci_allocate_periodic_in_resource( 4797 uhci_state_t *uhcip, 4798 uhci_pipe_private_t *pp, 4799 uhci_trans_wrapper_t *tw, 4800 usb_flags_t flags) 4801 { 4802 size_t length = 0; 4803 usb_opaque_t client_periodic_in_reqp; 4804 usb_intr_req_t *cur_intr_req; 4805 usb_isoc_req_t *curr_isoc_reqp; 4806 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 4807 4808 USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4809 "uhci_allocate_periodic_in_resource:\n\t" 4810 "ph = 0x%p, pp = 0x%p, tw = 0x%p, flags = 0x%x", ph, pp, tw, flags); 4811 4812 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4813 4814 /* Check the current periodic in request pointer */ 4815 if (tw->tw_curr_xfer_reqp) { 4816 USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4817 "uhci_allocate_periodic_in_resource: Interrupt " 4818 "request structure already exists: " 4819 "allocation failed"); 4820 4821 return (USB_SUCCESS); 4822 } 4823 4824 /* Get the client periodic in request pointer */ 4825 client_periodic_in_reqp = pp->pp_client_periodic_in_reqp; 4826 4827 /* 4828 * If it a periodic IN request and periodic request is NULL, 4829 * allocate corresponding usb periodic IN request for the 4830 * current periodic polling request and copy the information 4831 * from the saved periodic request structure. 4832 */ 4833 if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_INTR) { 4834 /* Get the interrupt transfer length */ 4835 length = ((usb_intr_req_t *)client_periodic_in_reqp)-> 4836 intr_len; 4837 4838 cur_intr_req = usba_hcdi_dup_intr_req(ph->p_dip, 4839 (usb_intr_req_t *)client_periodic_in_reqp, length, flags); 4840 if (cur_intr_req == NULL) { 4841 USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4842 "uhci_allocate_periodic_in_resource: Interrupt " 4843 "request structure allocation failed"); 4844 4845 return (USB_NO_RESOURCES); 4846 } 4847 4848 /* Check and save the timeout value */ 4849 tw->tw_timeout_cnt = (cur_intr_req->intr_attributes & 4850 USB_ATTRS_ONE_XFER) ? cur_intr_req->intr_timeout: 0; 4851 tw->tw_curr_xfer_reqp = (usb_opaque_t)cur_intr_req; 4852 tw->tw_length = cur_intr_req->intr_len; 4853 } else { 4854 ASSERT(client_periodic_in_reqp != NULL); 4855 4856 if ((curr_isoc_reqp = usba_hcdi_dup_isoc_req(ph->p_dip, 4857 (usb_isoc_req_t *)client_periodic_in_reqp, flags)) == 4858 NULL) { 4859 USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4860 "uhci_allocate_periodic_in_resource: Isochronous " 4861 "request structure allocation failed"); 4862 4863 return (USB_NO_RESOURCES); 4864 } 4865 4866 /* 4867 * Save the client's isochronous request pointer and 4868 * length of isochronous transfer in transfer wrapper. 4869 * The dup'ed request is saved in pp_client_periodic_in_reqp 4870 */ 4871 tw->tw_curr_xfer_reqp = 4872 (usb_opaque_t)pp->pp_client_periodic_in_reqp; 4873 pp->pp_client_periodic_in_reqp = (usb_opaque_t)curr_isoc_reqp; 4874 } 4875 4876 mutex_enter(&ph->p_mutex); 4877 ph->p_req_count++; 4878 mutex_exit(&ph->p_mutex); 4879 4880 return (USB_SUCCESS); 4881 } 4882 4883 4884 /* 4885 * uhci_deallocate_periodic_in_resource: 4886 * Deallocate interrupt/isochronous request structure for the 4887 * interrupt/isochronous IN transfer. 4888 */ 4889 void 4890 uhci_deallocate_periodic_in_resource( 4891 uhci_state_t *uhcip, 4892 uhci_pipe_private_t *pp, 4893 uhci_trans_wrapper_t *tw) 4894 { 4895 usb_opaque_t curr_xfer_reqp; 4896 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 4897 4898 USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4899 "uhci_deallocate_periodic_in_resource: " 4900 "pp = 0x%p tw = 0x%p", pp, tw); 4901 4902 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4903 4904 curr_xfer_reqp = tw->tw_curr_xfer_reqp; 4905 if (curr_xfer_reqp) { 4906 /* 4907 * Reset periodic in request usb isoch 4908 * packet request pointers to null. 4909 */ 4910 tw->tw_curr_xfer_reqp = NULL; 4911 tw->tw_isoc_req = NULL; 4912 4913 mutex_enter(&ph->p_mutex); 4914 ph->p_req_count--; 4915 mutex_exit(&ph->p_mutex); 4916 4917 /* 4918 * Free pre-allocated interrupt or isochronous requests. 4919 */ 4920 switch (UHCI_XFER_TYPE(&ph->p_ep)) { 4921 case USB_EP_ATTR_INTR: 4922 usb_free_intr_req((usb_intr_req_t *)curr_xfer_reqp); 4923 break; 4924 case USB_EP_ATTR_ISOCH: 4925 usb_free_isoc_req((usb_isoc_req_t *)curr_xfer_reqp); 4926 break; 4927 } 4928 } 4929 } 4930 4931 4932 /* 4933 * uhci_hcdi_callback() 4934 * convenience wrapper around usba_hcdi_callback() 4935 */ 4936 void 4937 uhci_hcdi_callback(uhci_state_t *uhcip, uhci_pipe_private_t *pp, 4938 usba_pipe_handle_data_t *ph, uhci_trans_wrapper_t *tw, usb_cr_t cr) 4939 { 4940 usb_opaque_t curr_xfer_reqp; 4941 4942 USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4943 "uhci_hcdi_callback: ph = 0x%p, tw = 0x%p, cr = 0x%x", ph, tw, cr); 4944 4945 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4946 4947 if (tw && tw->tw_curr_xfer_reqp) { 4948 curr_xfer_reqp = tw->tw_curr_xfer_reqp; 4949 tw->tw_curr_xfer_reqp = NULL; 4950 tw->tw_isoc_req = NULL; 4951 } else { 4952 ASSERT(pp->pp_client_periodic_in_reqp != NULL); 4953 4954 curr_xfer_reqp = pp->pp_client_periodic_in_reqp; 4955 pp->pp_client_periodic_in_reqp = NULL; 4956 } 4957 4958 ASSERT(curr_xfer_reqp != NULL); 4959 4960 mutex_exit(&uhcip->uhci_int_mutex); 4961 usba_hcdi_cb(ph, curr_xfer_reqp, cr); 4962 mutex_enter(&uhcip->uhci_int_mutex); 4963 } 4964 4965 4966 /* 4967 * uhci_state_is_operational: 4968 * 4969 * Check the Host controller state and return proper values. 4970 */ 4971 int 4972 uhci_state_is_operational(uhci_state_t *uhcip) 4973 { 4974 int val; 4975 4976 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4977 4978 switch (uhcip->uhci_hc_soft_state) { 4979 case UHCI_CTLR_INIT_STATE: 4980 case UHCI_CTLR_SUSPEND_STATE: 4981 val = USB_FAILURE; 4982 break; 4983 case UHCI_CTLR_OPERATIONAL_STATE: 4984 val = USB_SUCCESS; 4985 break; 4986 case UHCI_CTLR_ERROR_STATE: 4987 val = USB_HC_HARDWARE_ERROR; 4988 break; 4989 default: 4990 val = USB_FAILURE; 4991 break; 4992 } 4993 4994 return (val); 4995 } 4996 4997 4998 #ifdef DEBUG 4999 static void 5000 uhci_print_td(uhci_state_t *uhcip, uhci_td_t *td) 5001 { 5002 uint_t *ptr = (uint_t *)td; 5003 5004 #ifndef lint 5005 _NOTE(NO_COMPETING_THREADS_NOW); 5006 #endif 5007 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 5008 "\tDWORD 1 0x%x\t DWORD 2 0x%x", ptr[0], ptr[1]); 5009 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 5010 "\tDWORD 3 0x%x\t DWORD 4 0x%x", ptr[2], ptr[3]); 5011 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 5012 "\tBytes xfered = %d", td->tw->tw_bytes_xfered); 5013 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 5014 "\tBytes Pending = %d", td->tw->tw_bytes_pending); 5015 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 5016 "Queue Head Details:"); 5017 uhci_print_qh(uhcip, td->tw->tw_pipe_private->pp_qh); 5018 5019 #ifndef lint 5020 _NOTE(COMPETING_THREADS_NOW); 5021 #endif 5022 } 5023 5024 5025 static void 5026 uhci_print_qh(uhci_state_t *uhcip, queue_head_t *qh) 5027 { 5028 uint_t *ptr = (uint_t *)qh; 5029 5030 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 5031 "\tLink Ptr = %x Element Ptr = %x", ptr[0], ptr[1]); 5032 } 5033 #endif 5034