1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * Universal Host Controller Driver (UHCI) 30 * 31 * The UHCI driver is a driver which interfaces to the Universal 32 * Serial Bus Driver (USBA) and the Host Controller (HC). The interface to 33 * the Host Controller is defined by the UHCI. 34 * This file contains misc functions. 35 */ 36 #include <sys/usb/hcd/uhci/uhcid.h> 37 #include <sys/usb/hcd/uhci/uhciutil.h> 38 #include <sys/usb/hcd/uhci/uhcipolled.h> 39 40 #include <sys/disp.h> 41 42 /* Globals */ 43 extern uint_t uhci_td_pool_size; /* Num TDs */ 44 extern uint_t uhci_qh_pool_size; /* Num QHs */ 45 extern ushort_t uhci_tree_bottom_nodes[]; 46 extern void *uhci_statep; 47 48 /* function prototypes */ 49 static void uhci_build_interrupt_lattice(uhci_state_t *uhcip); 50 static int uhci_init_frame_lst_table(dev_info_t *dip, uhci_state_t *uhcip); 51 52 static uint_t uhci_lattice_height(uint_t bandwidth); 53 static uint_t uhci_lattice_parent(uint_t node); 54 static uint_t uhci_leftmost_leaf(uint_t node, uint_t height); 55 static uint_t uhci_compute_total_bandwidth(usb_ep_descr_t *endpoint, 56 usb_port_status_t port_status); 57 58 static int uhci_bandwidth_adjust(uhci_state_t *uhcip, 59 usb_ep_descr_t *endpoint, usb_port_status_t port_status); 60 61 static uhci_td_t *uhci_allocate_td_from_pool(uhci_state_t *uhcip); 62 static void uhci_fill_in_td(uhci_state_t *uhcip, 63 uhci_td_t *td, uhci_td_t *current_dummy, 64 uint32_t buffer_offset, size_t length, 65 uhci_pipe_private_t *pp, uchar_t PID, 66 usb_req_attrs_t attrs, uhci_trans_wrapper_t *tw); 67 static uint32_t uhci_get_tw_paddr_by_offs(uhci_state_t *uhcip, 68 uint32_t buffer_offset, size_t length, 69 uhci_trans_wrapper_t *tw); 70 static uhci_trans_wrapper_t *uhci_create_transfer_wrapper( 71 uhci_state_t *uhcip, uhci_pipe_private_t *pp, 72 size_t length, usb_flags_t usb_flags); 73 static uhci_trans_wrapper_t *uhci_create_isoc_transfer_wrapper( 74 uhci_state_t *uhcip, uhci_pipe_private_t *pp, 75 usb_isoc_req_t *req, size_t length, 76 usb_flags_t usb_flags); 77 78 static int uhci_create_setup_pkt(uhci_state_t *uhcip, 79 uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw); 80 static void uhci_insert_ctrl_qh(uhci_state_t *uhcip, 81 uhci_pipe_private_t *pp); 82 static void uhci_remove_ctrl_qh(uhci_state_t *uhcip, 83 uhci_pipe_private_t *pp); 84 static void uhci_insert_intr_qh(uhci_state_t *uhcip, 85 uhci_pipe_private_t *pp); 86 static void uhci_remove_intr_qh(uhci_state_t *uhcip, 87 uhci_pipe_private_t *pp); 88 static void uhci_remove_bulk_qh(uhci_state_t *uhcip, 89 uhci_pipe_private_t *pp); 90 static void uhci_insert_bulk_qh(uhci_state_t *uhcip, 91 uhci_pipe_private_t *pp); 92 static void uhci_handle_bulk_td_errors(uhci_state_t *uhcip, uhci_td_t *td); 93 static int uhci_alloc_memory_for_tds(uhci_state_t *uhcip, uint_t num_tds, 94 uhci_bulk_isoc_xfer_t *info); 95 static int uhci_alloc_bulk_isoc_tds(uhci_state_t *uhcip, uint_t num_tds, 96 uhci_bulk_isoc_xfer_t *info); 97 static void uhci_get_isoc_td_by_index(uhci_state_t *uhcip, 98 uhci_bulk_isoc_xfer_t *info, uint_t index, 99 uhci_td_t **tdpp, uhci_bulk_isoc_td_pool_t **td_pool_pp); 100 static void uhci_get_bulk_td_by_paddr(uhci_state_t *uhcip, 101 uhci_bulk_isoc_xfer_t *info, uint32_t paddr, 102 uhci_bulk_isoc_td_pool_t **td_pool_pp); 103 104 static int uhci_handle_isoc_receive(uhci_state_t *uhcip, 105 uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw); 106 static void uhci_delete_isoc_td(uhci_state_t *uhcip, 107 uhci_td_t *td); 108 #ifdef DEBUG 109 static void uhci_print_td(uhci_state_t *uhcip, uhci_td_t *td); 110 static void uhci_print_qh(uhci_state_t *uhcip, queue_head_t *qh); 111 #endif 112 113 114 /* 115 * uhci_build_interrupt_lattice: 116 * 117 * Construct the interrupt lattice tree using static Queue Head pointers. 118 * This interrupt lattice tree will have total of 63 queue heads and the 119 * Host Controller (HC) processes queue heads every frame. 120 */ 121 static void 122 uhci_build_interrupt_lattice(uhci_state_t *uhcip) 123 { 124 int half_list = NUM_INTR_QH_LISTS / 2; 125 uint16_t i, j, k; 126 uhci_td_t *sof_td, *isoc_td; 127 uintptr_t addr; 128 queue_head_t *list_array = uhcip->uhci_qh_pool_addr; 129 queue_head_t *tmp_qh; 130 frame_lst_table_t *frame_lst_tablep = 131 uhcip->uhci_frame_lst_tablep; 132 133 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 134 "uhci_build_interrupt_lattice:"); 135 136 /* 137 * Reserve the first 63 queue head structures in the pool as static 138 * queue heads & these are required for constructing interrupt 139 * lattice tree. 140 */ 141 for (i = 0; i < NUM_INTR_QH_LISTS; i++) { 142 SetQH32(uhcip, list_array[i].link_ptr, HC_END_OF_LIST); 143 SetQH32(uhcip, list_array[i].element_ptr, HC_END_OF_LIST); 144 list_array[i].qh_flag = QUEUE_HEAD_FLAG_STATIC; 145 list_array[i].node = i; 146 } 147 148 /* Build the interrupt lattice tree */ 149 for (i = 0; i < half_list - 1; i++) { 150 /* 151 * The next pointer in the host controller queue head 152 * descriptor must contain an iommu address. Calculate 153 * the offset into the cpu address and add this to the 154 * starting iommu address. 155 */ 156 addr = QH_PADDR(&list_array[i]) | HC_QUEUE_HEAD; 157 158 SetQH32(uhcip, list_array[2*i + 1].link_ptr, addr); 159 SetQH32(uhcip, list_array[2*i + 2].link_ptr, addr); 160 } 161 162 /* 163 * Initialize the interrupt list in the Frame list Table 164 * so that it points to the bottom of the tree. 165 */ 166 for (i = 0, j = 0; i < pow_2(TREE_HEIGHT); i++) { 167 addr = QH_PADDR(&list_array[half_list + i - 1]); 168 for (k = 0; k < pow_2(VIRTUAL_TREE_HEIGHT); k++) { 169 SetFL32(uhcip, 170 frame_lst_tablep[uhci_tree_bottom_nodes[j++]], 171 addr | HC_QUEUE_HEAD); 172 } 173 } 174 175 /* 176 * Create a controller and bulk Queue heads 177 */ 178 uhcip->uhci_ctrl_xfers_q_head = uhci_alloc_queue_head(uhcip); 179 tmp_qh = uhcip->uhci_ctrl_xfers_q_tail = uhcip->uhci_ctrl_xfers_q_head; 180 181 SetQH32(uhcip, list_array[0].link_ptr, 182 (QH_PADDR(tmp_qh) | HC_QUEUE_HEAD)); 183 184 uhcip->uhci_bulk_xfers_q_head = uhci_alloc_queue_head(uhcip); 185 uhcip->uhci_bulk_xfers_q_tail = uhcip->uhci_bulk_xfers_q_head; 186 SetQH32(uhcip, tmp_qh->link_ptr, 187 (QH_PADDR(uhcip->uhci_bulk_xfers_q_head)|HC_QUEUE_HEAD)); 188 189 SetQH32(uhcip, uhcip->uhci_bulk_xfers_q_head->link_ptr, HC_END_OF_LIST); 190 191 /* 192 * Add a dummy TD to the static queue head 0. THis is used 193 * to generate an at the end of frame. 194 */ 195 sof_td = uhci_allocate_td_from_pool(uhcip); 196 197 SetQH32(uhcip, list_array[0].element_ptr, 198 TD_PADDR(sof_td) | HC_TD_HEAD); 199 SetTD32(uhcip, sof_td->link_ptr, HC_END_OF_LIST); 200 uhcip->uhci_sof_td = sof_td; 201 202 /* 203 * Add a dummy td that is used to generate an interrupt for 204 * every 1024 frames. 205 */ 206 isoc_td = uhci_allocate_td_from_pool(uhcip); 207 SetTD32(uhcip, isoc_td->link_ptr, HC_END_OF_LIST); 208 uhcip->uhci_isoc_td = isoc_td; 209 210 uhcip->uhci_isoc_qh = uhci_alloc_queue_head(uhcip); 211 SetQH32(uhcip, uhcip->uhci_isoc_qh->link_ptr, 212 GetFL32(uhcip, uhcip->uhci_frame_lst_tablep[MAX_FRAME_NUM])); 213 SetQH32(uhcip, uhcip->uhci_isoc_qh->element_ptr, TD_PADDR(isoc_td)); 214 SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[MAX_FRAME_NUM], 215 QH_PADDR(uhcip->uhci_isoc_qh) | HC_QUEUE_HEAD); 216 } 217 218 219 /* 220 * uhci_allocate_pools: 221 * Allocate the system memory for the Queue Heads Descriptor and 222 * for the Transfer Descriptor (TD) pools. Both QH and TD structures 223 * must be aligned to a 16 byte boundary. 224 */ 225 int 226 uhci_allocate_pools(uhci_state_t *uhcip) 227 { 228 dev_info_t *dip = uhcip->uhci_dip; 229 size_t real_length; 230 int i, result; 231 uint_t ccount; 232 ddi_device_acc_attr_t dev_attr; 233 234 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 235 "uhci_allocate_pools:"); 236 237 /* The host controller will be little endian */ 238 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 239 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 240 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 241 242 /* Allocate the TD pool DMA handle */ 243 if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 0, 244 &uhcip->uhci_td_pool_dma_handle) != DDI_SUCCESS) { 245 246 return (USB_FAILURE); 247 } 248 249 /* Allocate the memory for the TD pool */ 250 if (ddi_dma_mem_alloc(uhcip->uhci_td_pool_dma_handle, 251 uhci_td_pool_size * sizeof (uhci_td_t), 252 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0, 253 (caddr_t *)&uhcip->uhci_td_pool_addr, &real_length, 254 &uhcip->uhci_td_pool_mem_handle)) { 255 256 return (USB_FAILURE); 257 } 258 259 /* Map the TD pool into the I/O address space */ 260 result = ddi_dma_addr_bind_handle(uhcip->uhci_td_pool_dma_handle, 261 NULL, (caddr_t)uhcip->uhci_td_pool_addr, real_length, 262 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 263 NULL, &uhcip->uhci_td_pool_cookie, &ccount); 264 265 bzero((void *)uhcip->uhci_td_pool_addr, 266 uhci_td_pool_size * sizeof (uhci_td_t)); 267 268 /* Process the result */ 269 if (result == DDI_DMA_MAPPED) { 270 /* The cookie count should be 1 */ 271 if (ccount != 1) { 272 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 273 "uhci_allocate_pools: More than 1 cookie"); 274 275 return (USB_FAILURE); 276 } 277 } else { 278 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 279 "uhci_allocate_pools: Result = %d", result); 280 281 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result); 282 283 return (USB_FAILURE); 284 } 285 286 uhcip->uhci_dma_addr_bind_flag |= UHCI_TD_POOL_BOUND; 287 288 /* Initialize the TD pool */ 289 for (i = 0; i < uhci_td_pool_size; i++) { 290 uhcip->uhci_td_pool_addr[i].flag = TD_FLAG_FREE; 291 } 292 293 /* Allocate the TD pool DMA handle */ 294 if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 295 0, &uhcip->uhci_qh_pool_dma_handle) != DDI_SUCCESS) { 296 297 return (USB_FAILURE); 298 } 299 300 /* Allocate the memory for the QH pool */ 301 if (ddi_dma_mem_alloc(uhcip->uhci_qh_pool_dma_handle, 302 uhci_qh_pool_size * sizeof (queue_head_t), 303 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0, 304 (caddr_t *)&uhcip->uhci_qh_pool_addr, &real_length, 305 &uhcip->uhci_qh_pool_mem_handle) != DDI_SUCCESS) { 306 307 return (USB_FAILURE); 308 } 309 310 result = ddi_dma_addr_bind_handle(uhcip->uhci_qh_pool_dma_handle, 311 NULL, (caddr_t)uhcip->uhci_qh_pool_addr, real_length, 312 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 313 &uhcip->uhci_qh_pool_cookie, &ccount); 314 315 /* Process the result */ 316 if (result == DDI_DMA_MAPPED) { 317 /* The cookie count should be 1 */ 318 if (ccount != 1) { 319 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 320 "uhci_allocate_pools: More than 1 cookie"); 321 322 return (USB_FAILURE); 323 } 324 } else { 325 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result); 326 327 return (USB_FAILURE); 328 } 329 330 uhcip->uhci_dma_addr_bind_flag |= UHCI_QH_POOL_BOUND; 331 332 bzero((void *)uhcip->uhci_qh_pool_addr, 333 uhci_qh_pool_size * sizeof (queue_head_t)); 334 335 /* Initialize the QH pool */ 336 for (i = 0; i < uhci_qh_pool_size; i ++) { 337 uhcip->uhci_qh_pool_addr[i].qh_flag = QUEUE_HEAD_FLAG_FREE; 338 } 339 340 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 341 "uhci_allocate_pools: Completed"); 342 343 return (USB_SUCCESS); 344 } 345 346 347 /* 348 * uhci_free_pools: 349 * Cleanup on attach failure or detach 350 */ 351 void 352 uhci_free_pools(uhci_state_t *uhcip) 353 { 354 int i, flag, rval; 355 uhci_td_t *td; 356 uhci_trans_wrapper_t *tw; 357 358 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 359 "uhci_free_pools:"); 360 361 if (uhcip->uhci_td_pool_addr && uhcip->uhci_td_pool_mem_handle) { 362 for (i = 0; i < uhci_td_pool_size; i ++) { 363 td = &uhcip->uhci_td_pool_addr[i]; 364 365 flag = uhcip->uhci_td_pool_addr[i].flag; 366 if ((flag != TD_FLAG_FREE) && 367 (flag != TD_FLAG_DUMMY) && (td->tw != NULL)) { 368 tw = td->tw; 369 uhci_free_tw(uhcip, tw); 370 } 371 372 } 373 374 if (uhcip->uhci_dma_addr_bind_flag & UHCI_TD_POOL_BOUND) { 375 rval = ddi_dma_unbind_handle( 376 uhcip->uhci_td_pool_dma_handle); 377 ASSERT(rval == DDI_SUCCESS); 378 } 379 380 ddi_dma_mem_free(&uhcip->uhci_td_pool_mem_handle); 381 } 382 383 /* Free the TD pool */ 384 if (uhcip->uhci_td_pool_dma_handle) { 385 ddi_dma_free_handle(&uhcip->uhci_td_pool_dma_handle); 386 } 387 388 if (uhcip->uhci_qh_pool_addr && uhcip->uhci_qh_pool_mem_handle) { 389 if (uhcip->uhci_dma_addr_bind_flag & UHCI_QH_POOL_BOUND) { 390 rval = ddi_dma_unbind_handle( 391 uhcip->uhci_qh_pool_dma_handle); 392 ASSERT(rval == DDI_SUCCESS); 393 } 394 ddi_dma_mem_free(&uhcip->uhci_qh_pool_mem_handle); 395 } 396 397 /* Free the QH pool */ 398 if (uhcip->uhci_qh_pool_dma_handle) { 399 ddi_dma_free_handle(&uhcip->uhci_qh_pool_dma_handle); 400 } 401 402 /* Free the Frame list Table area */ 403 if (uhcip->uhci_frame_lst_tablep && uhcip->uhci_flt_mem_handle) { 404 if (uhcip->uhci_dma_addr_bind_flag & UHCI_FLA_POOL_BOUND) { 405 rval = ddi_dma_unbind_handle( 406 uhcip->uhci_flt_dma_handle); 407 ASSERT(rval == DDI_SUCCESS); 408 } 409 ddi_dma_mem_free(&uhcip->uhci_flt_mem_handle); 410 } 411 412 if (uhcip->uhci_flt_dma_handle) { 413 ddi_dma_free_handle(&uhcip->uhci_flt_dma_handle); 414 } 415 } 416 417 418 /* 419 * uhci_decode_ddi_dma_addr_bind_handle_result: 420 * Process the return values of ddi_dma_addr_bind_handle() 421 */ 422 void 423 uhci_decode_ddi_dma_addr_bind_handle_result(uhci_state_t *uhcip, int result) 424 { 425 char *msg; 426 427 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 428 "uhci_decode_ddi_dma_addr_bind_handle_result:"); 429 430 switch (result) { 431 case DDI_DMA_PARTIAL_MAP: 432 msg = "Partial transfers not allowed"; 433 break; 434 case DDI_DMA_INUSE: 435 msg = "Handle is in use"; 436 break; 437 case DDI_DMA_NORESOURCES: 438 msg = "No resources"; 439 break; 440 case DDI_DMA_NOMAPPING: 441 msg = "No mapping"; 442 break; 443 case DDI_DMA_TOOBIG: 444 msg = "Object is too big"; 445 break; 446 default: 447 msg = "Unknown dma error"; 448 } 449 450 USB_DPRINTF_L4(PRINT_MASK_ALL, uhcip->uhci_log_hdl, "%s", msg); 451 } 452 453 454 /* 455 * uhci_init_ctlr: 456 * Initialize the Host Controller (HC). 457 */ 458 int 459 uhci_init_ctlr(uhci_state_t *uhcip) 460 { 461 dev_info_t *dip = uhcip->uhci_dip; 462 uint_t cmd_reg; 463 uint_t frame_base_addr; 464 465 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, "uhci_init_ctlr:"); 466 467 /* 468 * When USB legacy mode is enabled, the BIOS manages the USB keyboard 469 * attached to the UHCI controller. It has been observed that some 470 * times the BIOS does not clear the interrupts in the legacy mode 471 * register in the PCI configuration space. So, disable the SMI intrs 472 * and route the intrs to PIRQD here. 473 */ 474 pci_config_put16(uhcip->uhci_config_handle, 475 LEGACYMODE_REG_OFFSET, LEGACYMODE_REG_INIT_VALUE); 476 477 /* 478 * Disable all the interrupts. 479 */ 480 Set_OpReg16(USBINTR, DISABLE_ALL_INTRS); 481 482 mutex_enter(&uhcip->uhci_int_mutex); 483 cmd_reg = Get_OpReg16(USBCMD); 484 cmd_reg &= (~USBCMD_REG_HC_RUN); 485 486 /* Stop the controller */ 487 Set_OpReg16(USBCMD, cmd_reg); 488 489 /* Reset the host controller */ 490 Set_OpReg16(USBCMD, USBCMD_REG_GBL_RESET); 491 492 /* Wait 10ms for reset to complete */ 493 mutex_exit(&uhcip->uhci_int_mutex); 494 delay(drv_usectohz(UHCI_RESET_DELAY)); 495 mutex_enter(&uhcip->uhci_int_mutex); 496 497 Set_OpReg16(USBCMD, 0); 498 499 /* Set the frame number to zero */ 500 Set_OpReg16(FRNUM, 0); 501 502 /* Initialize the Frame list base address area */ 503 if (uhci_init_frame_lst_table(dip, uhcip) != USB_SUCCESS) { 504 mutex_exit(&uhcip->uhci_int_mutex); 505 506 return (USB_FAILURE); 507 } 508 509 /* Save the contents of the Frame Interval Registers */ 510 uhcip->uhci_frame_interval = Get_OpReg8(SOFMOD); 511 512 frame_base_addr = uhcip->uhci_flt_cookie.dmac_address; 513 514 /* Set the Frame list base address */ 515 Set_OpReg32(FRBASEADD, frame_base_addr); 516 517 /* 518 * Begin sending SOFs 519 * Set the Host Controller Functional State to Operational 520 */ 521 cmd_reg = Get_OpReg16(USBCMD); 522 cmd_reg |= (USBCMD_REG_HC_RUN | USBCMD_REG_MAXPKT_64 | 523 USBCMD_REG_CONFIG_FLAG); 524 525 Set_OpReg16(USBCMD, cmd_reg); 526 mutex_exit(&uhcip->uhci_int_mutex); 527 528 /* 529 * Verify the Command and interrupt enable registers, 530 * a sanity check whether actually initialized or not 531 */ 532 cmd_reg = Get_OpReg16(USBCMD); 533 534 if (!(cmd_reg & (USBCMD_REG_HC_RUN | USBCMD_REG_MAXPKT_64 | 535 USBCMD_REG_CONFIG_FLAG))) { 536 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 537 "uhci_init_ctlr: Controller initialization failed"); 538 539 return (USB_FAILURE); 540 } 541 542 /* 543 * Set the ioc bit of the isoc intr td. This enables 544 * the generation of an interrupt for every 1024 frames. 545 */ 546 SetTD_ioc(uhcip, uhcip->uhci_isoc_td, 1); 547 548 /* Set the flag that uhci controller has been initialized. */ 549 uhcip->uhci_ctlr_init_flag = B_TRUE; 550 551 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 552 "uhci_init_ctlr: Completed"); 553 554 return (USB_SUCCESS); 555 } 556 557 558 /* 559 * uhci_uninit_ctlr: 560 * uninitialize the Host Controller (HC). 561 */ 562 void 563 uhci_uninit_ctlr(uhci_state_t *uhcip) 564 { 565 if (uhcip->uhci_regs_handle) { 566 /* Disable all the interrupts. */ 567 Set_OpReg16(USBINTR, DISABLE_ALL_INTRS); 568 569 /* Complete the current transaction and then halt. */ 570 Set_OpReg16(USBCMD, 0); 571 572 /* Wait for sometime */ 573 mutex_exit(&uhcip->uhci_int_mutex); 574 delay(drv_usectohz(UHCI_TIMEWAIT)); 575 mutex_enter(&uhcip->uhci_int_mutex); 576 } 577 } 578 579 580 /* 581 * uhci_map_regs: 582 * The Host Controller (HC) contains a set of on-chip operational 583 * registers and which should be mapped into a non-cacheable 584 * portion of the system addressable space. 585 */ 586 int 587 uhci_map_regs(uhci_state_t *uhcip) 588 { 589 dev_info_t *dip = uhcip->uhci_dip; 590 int index; 591 uint32_t regs_prop_len; 592 int32_t *regs_list; 593 uint16_t command_reg; 594 ddi_device_acc_attr_t attr; 595 596 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, "uhci_map_regs:"); 597 598 /* The host controller will be little endian */ 599 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 600 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 601 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 602 603 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, uhcip->uhci_dip, 604 DDI_PROP_DONTPASS, "reg", ®s_list, ®s_prop_len) != 605 DDI_PROP_SUCCESS) { 606 607 return (USB_FAILURE); 608 } 609 610 for (index = 0; index * 5 < regs_prop_len; index++) { 611 if (regs_list[index * 5] & UHCI_PROP_MASK) { 612 break; 613 } 614 } 615 616 /* 617 * Deallocate the memory allocated by the ddi_prop_lookup_int_array 618 */ 619 ddi_prop_free(regs_list); 620 621 if (index * 5 >= regs_prop_len) { 622 623 return (USB_FAILURE); 624 } 625 626 /* Map in operational registers */ 627 if (ddi_regs_map_setup(dip, index, (caddr_t *)&uhcip->uhci_regsp, 628 0, sizeof (hc_regs_t), &attr, &uhcip->uhci_regs_handle) != 629 DDI_SUCCESS) { 630 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 631 "ddi_regs_map_setup: failed"); 632 633 return (USB_FAILURE); 634 } 635 636 if (pci_config_setup(dip, &uhcip->uhci_config_handle) != DDI_SUCCESS) { 637 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 638 "uhci_map_regs: Config error"); 639 640 return (USB_FAILURE); 641 } 642 643 /* Make sure Memory Access Enable and Master Enable are set */ 644 command_reg = pci_config_get16(uhcip->uhci_config_handle, 645 PCI_CONF_COMM); 646 if (!(command_reg & (PCI_COMM_MAE | PCI_COMM_ME))) { 647 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 648 "uhci_map_regs: No MAE/ME"); 649 } 650 651 command_reg |= PCI_COMM_MAE | PCI_COMM_ME; 652 pci_config_put16(uhcip->uhci_config_handle, PCI_CONF_COMM, command_reg); 653 654 /* 655 * Check whether I/O base address is configured and enabled. 656 */ 657 if (!(command_reg & PCI_COMM_IO)) { 658 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 659 "I/O Base address access disabled"); 660 661 return (USB_FAILURE); 662 } 663 /* 664 * Get the IO base address of the controller 665 */ 666 uhcip->uhci_iobase = (pci_config_get16(uhcip->uhci_config_handle, 667 PCI_CONF_IOBASE) & PCI_CONF_IOBASE_MASK); 668 669 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 670 "uhci_map_regs: Completed"); 671 672 return (USB_SUCCESS); 673 } 674 675 676 void 677 uhci_unmap_regs(uhci_state_t *uhcip) 678 { 679 /* Unmap the UHCI registers */ 680 if (uhcip->uhci_regs_handle) { 681 /* Reset the host controller */ 682 Set_OpReg16(USBCMD, USBCMD_REG_GBL_RESET); 683 684 ddi_regs_map_free(&uhcip->uhci_regs_handle); 685 } 686 687 if (uhcip->uhci_config_handle) { 688 pci_config_teardown(&uhcip->uhci_config_handle); 689 } 690 } 691 692 693 /* 694 * uhci_set_dma_attributes: 695 * Set the limits in the DMA attributes structure. Most of the values used 696 * in the DMA limit structres are the default values as specified by the 697 * Writing PCI device drivers document. 698 */ 699 void 700 uhci_set_dma_attributes(uhci_state_t *uhcip) 701 { 702 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 703 "uhci_set_dma_attributes:"); 704 705 /* Initialize the DMA attributes */ 706 uhcip->uhci_dma_attr.dma_attr_version = DMA_ATTR_V0; 707 uhcip->uhci_dma_attr.dma_attr_addr_lo = 0x00000000ull; 708 uhcip->uhci_dma_attr.dma_attr_addr_hi = 0xfffffff0ull; 709 710 /* 32 bit addressing */ 711 uhcip->uhci_dma_attr.dma_attr_count_max = 0xffffffull; 712 713 /* 714 * Setting the dam_att_align to 512, some times fails the 715 * binding handle. I dont know why ? But setting to 16 will 716 * be right for our case (16 byte alignment required per 717 * UHCI spec for TD descriptors). 718 */ 719 720 /* 16 byte alignment */ 721 uhcip->uhci_dma_attr.dma_attr_align = 0x10; 722 723 /* 724 * Since PCI specification is byte alignment, the 725 * burstsize field should be set to 1 for PCI devices. 726 */ 727 uhcip->uhci_dma_attr.dma_attr_burstsizes = 0x1; 728 729 uhcip->uhci_dma_attr.dma_attr_minxfer = 0x1; 730 uhcip->uhci_dma_attr.dma_attr_maxxfer = 0xffffffull; 731 uhcip->uhci_dma_attr.dma_attr_seg = 0xffffffffull; 732 uhcip->uhci_dma_attr.dma_attr_sgllen = 1; 733 uhcip->uhci_dma_attr.dma_attr_granular = 1; 734 uhcip->uhci_dma_attr.dma_attr_flags = 0; 735 } 736 737 738 uint_t 739 pow_2(uint_t x) 740 { 741 return ((x == 0) ? 1 : (1 << x)); 742 } 743 744 745 uint_t 746 log_2(uint_t x) 747 { 748 int ret_val = 0; 749 750 while (x != 1) { 751 ret_val++; 752 x = x >> 1; 753 } 754 755 return (ret_val); 756 } 757 758 759 /* 760 * uhci_obtain_state: 761 */ 762 uhci_state_t * 763 uhci_obtain_state(dev_info_t *dip) 764 { 765 int instance = ddi_get_instance(dip); 766 uhci_state_t *state = ddi_get_soft_state(uhci_statep, instance); 767 768 ASSERT(state != NULL); 769 770 return (state); 771 } 772 773 774 /* 775 * uhci_alloc_hcdi_ops: 776 * The HCDI interfaces or entry points are the software interfaces used by 777 * the Universal Serial Bus Driver (USBA) to access the services of the 778 * Host Controller Driver (HCD). During HCD initialization, inform USBA 779 * about all available HCDI interfaces or entry points. 780 */ 781 usba_hcdi_ops_t * 782 uhci_alloc_hcdi_ops(uhci_state_t *uhcip) 783 { 784 usba_hcdi_ops_t *hcdi_ops; 785 786 USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 787 "uhci_alloc_hcdi_ops:"); 788 789 hcdi_ops = usba_alloc_hcdi_ops(); 790 791 hcdi_ops->usba_hcdi_ops_version = HCDI_OPS_VERSION_1; 792 793 hcdi_ops->usba_hcdi_pipe_open = uhci_hcdi_pipe_open; 794 hcdi_ops->usba_hcdi_pipe_close = uhci_hcdi_pipe_close; 795 hcdi_ops->usba_hcdi_pipe_reset = uhci_hcdi_pipe_reset; 796 797 hcdi_ops->usba_hcdi_pipe_ctrl_xfer = uhci_hcdi_pipe_ctrl_xfer; 798 hcdi_ops->usba_hcdi_pipe_bulk_xfer = uhci_hcdi_pipe_bulk_xfer; 799 hcdi_ops->usba_hcdi_pipe_intr_xfer = uhci_hcdi_pipe_intr_xfer; 800 hcdi_ops->usba_hcdi_pipe_isoc_xfer = uhci_hcdi_pipe_isoc_xfer; 801 802 hcdi_ops->usba_hcdi_bulk_transfer_size = uhci_hcdi_bulk_transfer_size; 803 hcdi_ops->usba_hcdi_pipe_stop_intr_polling = 804 uhci_hcdi_pipe_stop_intr_polling; 805 hcdi_ops->usba_hcdi_pipe_stop_isoc_polling = 806 uhci_hcdi_pipe_stop_isoc_polling; 807 808 hcdi_ops->usba_hcdi_get_current_frame_number = 809 uhci_hcdi_get_current_frame_number; 810 hcdi_ops->usba_hcdi_get_max_isoc_pkts = uhci_hcdi_get_max_isoc_pkts; 811 812 hcdi_ops->usba_hcdi_console_input_init = uhci_hcdi_polled_input_init; 813 hcdi_ops->usba_hcdi_console_input_enter = uhci_hcdi_polled_input_enter; 814 hcdi_ops->usba_hcdi_console_read = uhci_hcdi_polled_read; 815 hcdi_ops->usba_hcdi_console_input_exit = uhci_hcdi_polled_input_exit; 816 hcdi_ops->usba_hcdi_console_input_fini = uhci_hcdi_polled_input_fini; 817 818 hcdi_ops->usba_hcdi_console_output_init = uhci_hcdi_polled_output_init; 819 hcdi_ops->usba_hcdi_console_output_enter = 820 uhci_hcdi_polled_output_enter; 821 hcdi_ops->usba_hcdi_console_write = uhci_hcdi_polled_write; 822 hcdi_ops->usba_hcdi_console_output_exit = uhci_hcdi_polled_output_exit; 823 hcdi_ops->usba_hcdi_console_output_fini = uhci_hcdi_polled_output_fini; 824 825 return (hcdi_ops); 826 } 827 828 829 /* 830 * uhci_init_frame_lst_table : 831 * Allocate the system memory and initialize Host Controller 832 * Frame list table area The starting of the Frame list Table 833 * area must be 4096 byte aligned. 834 */ 835 static int 836 uhci_init_frame_lst_table(dev_info_t *dip, uhci_state_t *uhcip) 837 { 838 int result; 839 uint_t ccount; 840 size_t real_length; 841 ddi_device_acc_attr_t dev_attr; 842 843 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 844 845 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 846 "uhci_init_frame_lst_table:"); 847 848 /* The host controller will be little endian */ 849 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 850 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 851 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 852 853 /* 4K alignment required */ 854 uhcip->uhci_dma_attr.dma_attr_align = 0x1000; 855 856 /* Create space for the HCCA block */ 857 if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 858 0, &uhcip->uhci_flt_dma_handle) != DDI_SUCCESS) { 859 860 return (USB_FAILURE); 861 } 862 863 /* Reset to default 16 bytes */ 864 uhcip->uhci_dma_attr.dma_attr_align = 0x10; 865 866 if (ddi_dma_mem_alloc(uhcip->uhci_flt_dma_handle, 867 SIZE_OF_FRAME_LST_TABLE, &dev_attr, DDI_DMA_CONSISTENT, 868 DDI_DMA_SLEEP, 0, (caddr_t *)&uhcip->uhci_frame_lst_tablep, 869 &real_length, &uhcip->uhci_flt_mem_handle)) { 870 871 return (USB_FAILURE); 872 } 873 874 /* Map the whole Frame list base area into the I/O address space */ 875 result = ddi_dma_addr_bind_handle(uhcip->uhci_flt_dma_handle, 876 NULL, (caddr_t)uhcip->uhci_frame_lst_tablep, real_length, 877 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 878 &uhcip->uhci_flt_cookie, &ccount); 879 880 if (result == DDI_DMA_MAPPED) { 881 /* The cookie count should be 1 */ 882 if (ccount != 1) { 883 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 884 "uhci_init_frame_list_table: More than 1 cookie"); 885 886 return (USB_FAILURE); 887 } 888 } else { 889 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result); 890 891 return (USB_FAILURE); 892 } 893 894 uhcip->uhci_dma_addr_bind_flag |= UHCI_FLA_POOL_BOUND; 895 896 bzero((void *)uhcip->uhci_frame_lst_tablep, real_length); 897 898 /* Initialize the interrupt lists */ 899 uhci_build_interrupt_lattice(uhcip); 900 901 return (USB_SUCCESS); 902 } 903 904 905 /* 906 * uhci_alloc_queue_head: 907 * Allocate a queue head 908 */ 909 queue_head_t * 910 uhci_alloc_queue_head(uhci_state_t *uhcip) 911 { 912 int index; 913 uhci_td_t *dummy_td; 914 queue_head_t *queue_head; 915 916 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 917 "uhci_alloc_queue_head"); 918 919 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 920 921 /* Allocate a dummy td first. */ 922 if ((dummy_td = uhci_allocate_td_from_pool(uhcip)) == NULL) { 923 924 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 925 "uhci_alloc_queue_head: allocate td from pool failed"); 926 927 return (NULL); 928 } 929 930 /* 931 * The first 63 queue heads in the Queue Head (QH) 932 * buffer pool are reserved for building interrupt lattice 933 * tree. Search for a blank Queue head in the QH buffer pool. 934 */ 935 for (index = NUM_STATIC_NODES; index < uhci_qh_pool_size; index++) { 936 if (uhcip->uhci_qh_pool_addr[index].qh_flag == 937 QUEUE_HEAD_FLAG_FREE) { 938 break; 939 } 940 } 941 942 USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 943 "uhci_alloc_queue_head: Allocated %d", index); 944 945 if (index == uhci_qh_pool_size) { 946 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 947 "uhci_alloc_queue_head: All QH exhausted"); 948 949 /* Free the dummy td allocated for this qh. */ 950 dummy_td->flag = TD_FLAG_FREE; 951 952 return (NULL); 953 } 954 955 queue_head = &uhcip->uhci_qh_pool_addr[index]; 956 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 957 "uhci_alloc_queue_head: Allocated address 0x%p", queue_head); 958 959 bzero((void *)queue_head, sizeof (queue_head_t)); 960 SetQH32(uhcip, queue_head->link_ptr, HC_END_OF_LIST); 961 SetQH32(uhcip, queue_head->element_ptr, HC_END_OF_LIST); 962 queue_head->prev_qh = NULL; 963 queue_head->qh_flag = QUEUE_HEAD_FLAG_BUSY; 964 965 bzero((char *)dummy_td, sizeof (uhci_td_t)); 966 queue_head->td_tailp = dummy_td; 967 SetQH32(uhcip, queue_head->element_ptr, TD_PADDR(dummy_td)); 968 969 return (queue_head); 970 } 971 972 973 /* 974 * uhci_allocate_bandwidth: 975 * Figure out whether or not this interval may be supported. Return 976 * the index into the lattice if it can be supported. Return 977 * allocation failure if it can not be supported. 978 */ 979 int 980 uhci_allocate_bandwidth( 981 uhci_state_t *uhcip, 982 usba_pipe_handle_data_t *pipe_handle, 983 uint_t *node) 984 { 985 int bandwidth; /* Requested bandwidth */ 986 uint_t min, min_index; 987 uint_t i; 988 uint_t height; /* Bandwidth's height in the tree */ 989 uint_t leftmost; 990 uint_t length; 991 uint32_t paddr; 992 queue_head_t *tmp_qh; 993 usb_ep_descr_t *endpoint = &pipe_handle->p_ep; 994 995 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 996 997 /* 998 * Calculate the length in bytes of a transaction on this 999 * periodic endpoint. 1000 */ 1001 mutex_enter(&pipe_handle->p_usba_device->usb_mutex); 1002 1003 length = uhci_compute_total_bandwidth(endpoint, 1004 pipe_handle->p_usba_device->usb_port_status); 1005 mutex_exit(&pipe_handle->p_usba_device->usb_mutex); 1006 1007 /* 1008 * If the length in bytes plus the allocated bandwidth exceeds 1009 * the maximum, return bandwidth allocation failure. 1010 */ 1011 if ((length + uhcip->uhci_bandwidth_intr_min + 1012 uhcip->uhci_bandwidth_isoch_sum) > (MAX_PERIODIC_BANDWIDTH)) { 1013 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1014 "uhci_allocate_bandwidth: " 1015 "Reached maximum bandwidth value and cannot allocate " 1016 "bandwidth for a given Interrupt/Isoch endpoint"); 1017 1018 return (USB_NO_BANDWIDTH); 1019 } 1020 1021 /* 1022 * ISOC xfers are not supported at this point type 1023 */ 1024 if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_ISOCH) { 1025 uhcip->uhci_bandwidth_isoch_sum += length; 1026 1027 return (USB_SUCCESS); 1028 } 1029 1030 /* 1031 * This is an interrupt endpoint. 1032 * Adjust bandwidth to be a power of 2 1033 */ 1034 mutex_enter(&pipe_handle->p_usba_device->usb_mutex); 1035 bandwidth = uhci_bandwidth_adjust(uhcip, endpoint, 1036 pipe_handle->p_usba_device->usb_port_status); 1037 mutex_exit(&pipe_handle->p_usba_device->usb_mutex); 1038 1039 /* 1040 * If this bandwidth can't be supported, 1041 * return allocation failure. 1042 */ 1043 if (bandwidth == USB_FAILURE) { 1044 1045 return (USB_FAILURE); 1046 } 1047 1048 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1049 "The new bandwidth is %d", bandwidth); 1050 1051 /* Find the leaf with the smallest allocated bandwidth */ 1052 min_index = 0; 1053 min = uhcip->uhci_bandwidth[0]; 1054 1055 for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) { 1056 if (uhcip->uhci_bandwidth[i] < min) { 1057 min_index = i; 1058 min = uhcip->uhci_bandwidth[i]; 1059 } 1060 } 1061 1062 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1063 "The leaf with minimal bandwidth %d, " 1064 "The smallest bandwidth %d", min_index, min); 1065 1066 /* 1067 * Find the index into the lattice given the 1068 * leaf with the smallest allocated bandwidth. 1069 */ 1070 height = uhci_lattice_height(bandwidth); 1071 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1072 "The height is %d", height); 1073 1074 *node = uhci_tree_bottom_nodes[min_index]; 1075 1076 /* check if there are isocs TDs scheduled for this frame */ 1077 if (uhcip->uhci_isoc_q_tailp[*node]) { 1078 paddr = (uhcip->uhci_isoc_q_tailp[*node]->link_ptr & 1079 FRAME_LST_PTR_MASK); 1080 } else { 1081 paddr = (uhcip->uhci_frame_lst_tablep[*node] & 1082 FRAME_LST_PTR_MASK); 1083 } 1084 1085 tmp_qh = QH_VADDR(paddr); 1086 *node = tmp_qh->node; 1087 for (i = 0; i < height; i++) { 1088 *node = uhci_lattice_parent(*node); 1089 } 1090 1091 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1092 "The real node is %d", *node); 1093 1094 /* 1095 * Find the leftmost leaf in the subtree specified by the node. 1096 */ 1097 leftmost = uhci_leftmost_leaf(*node, height); 1098 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1099 "Leftmost %d", leftmost); 1100 1101 for (i = leftmost; i < leftmost + 1102 (NUM_FRAME_LST_ENTRIES/bandwidth); i ++) { 1103 1104 if ((length + uhcip->uhci_bandwidth_isoch_sum + 1105 uhcip->uhci_bandwidth[i]) > MAX_PERIODIC_BANDWIDTH) { 1106 1107 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1108 "uhci_allocate_bandwidth: " 1109 "Reached maximum bandwidth value and cannot " 1110 "allocate bandwidth for Interrupt endpoint"); 1111 1112 return (USB_NO_BANDWIDTH); 1113 } 1114 } 1115 1116 /* 1117 * All the leaves for this node must be updated with the bandwidth. 1118 */ 1119 for (i = leftmost; i < leftmost + 1120 (NUM_FRAME_LST_ENTRIES/bandwidth); i ++) { 1121 uhcip->uhci_bandwidth[i] += length; 1122 } 1123 1124 /* Find the leaf with the smallest allocated bandwidth */ 1125 min_index = 0; 1126 min = uhcip->uhci_bandwidth[0]; 1127 1128 for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) { 1129 if (uhcip->uhci_bandwidth[i] < min) { 1130 min_index = i; 1131 min = uhcip->uhci_bandwidth[i]; 1132 } 1133 } 1134 1135 /* Save the minimum for later use */ 1136 uhcip->uhci_bandwidth_intr_min = min; 1137 1138 return (USB_SUCCESS); 1139 } 1140 1141 1142 /* 1143 * uhci_deallocate_bandwidth: 1144 * Deallocate bandwidth for the given node in the lattice 1145 * and the length of transfer. 1146 */ 1147 void 1148 uhci_deallocate_bandwidth(uhci_state_t *uhcip, 1149 usba_pipe_handle_data_t *pipe_handle) 1150 { 1151 uint_t bandwidth; 1152 uint_t height; 1153 uint_t leftmost; 1154 uint_t i; 1155 uint_t min; 1156 usb_ep_descr_t *endpoint = &pipe_handle->p_ep; 1157 uint_t node, length; 1158 uhci_pipe_private_t *pp = 1159 (uhci_pipe_private_t *)pipe_handle->p_hcd_private; 1160 1161 /* This routine is protected by the uhci_int_mutex */ 1162 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1163 1164 /* Obtain the length */ 1165 mutex_enter(&pipe_handle->p_usba_device->usb_mutex); 1166 length = uhci_compute_total_bandwidth(endpoint, 1167 pipe_handle->p_usba_device->usb_port_status); 1168 mutex_exit(&pipe_handle->p_usba_device->usb_mutex); 1169 1170 /* 1171 * If this is an isochronous endpoint, just delete endpoint's 1172 * bandwidth from the total allocated isochronous bandwidth. 1173 */ 1174 if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_ISOCH) { 1175 uhcip->uhci_bandwidth_isoch_sum -= length; 1176 1177 return; 1178 } 1179 1180 /* Obtain the node */ 1181 node = pp->pp_node; 1182 1183 /* Adjust bandwidth to be a power of 2 */ 1184 mutex_enter(&pipe_handle->p_usba_device->usb_mutex); 1185 bandwidth = uhci_bandwidth_adjust(uhcip, endpoint, 1186 pipe_handle->p_usba_device->usb_port_status); 1187 mutex_exit(&pipe_handle->p_usba_device->usb_mutex); 1188 1189 /* Find the height in the tree */ 1190 height = uhci_lattice_height(bandwidth); 1191 1192 /* 1193 * Find the leftmost leaf in the subtree specified by the node 1194 */ 1195 leftmost = uhci_leftmost_leaf(node, height); 1196 1197 /* Delete the bandwith from the appropriate lists */ 1198 for (i = leftmost; i < leftmost + (NUM_FRAME_LST_ENTRIES/bandwidth); 1199 i ++) { 1200 uhcip->uhci_bandwidth[i] -= length; 1201 } 1202 1203 min = uhcip->uhci_bandwidth[0]; 1204 1205 /* Recompute the minimum */ 1206 for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) { 1207 if (uhcip->uhci_bandwidth[i] < min) { 1208 min = uhcip->uhci_bandwidth[i]; 1209 } 1210 } 1211 1212 /* Save the minimum for later use */ 1213 uhcip->uhci_bandwidth_intr_min = min; 1214 } 1215 1216 1217 /* 1218 * uhci_compute_total_bandwidth: 1219 * 1220 * Given a periodic endpoint (interrupt or isochronous) determine the total 1221 * bandwidth for one transaction. The UHCI host controller traverses the 1222 * endpoint descriptor lists on a first-come-first-serve basis. When the HC 1223 * services an endpoint, only a single transaction attempt is made. The HC 1224 * moves to the next Endpoint Descriptor after the first transaction attempt 1225 * rather than finishing the entire Transfer Descriptor. Therefore, when a 1226 * Transfer Descriptor is inserted into the lattice, we will only count the 1227 * number of bytes for one transaction. 1228 * 1229 * The following are the formulas used for calculating bandwidth in terms 1230 * bytes and it is for the single USB full speed and low speed transaction 1231 * respectively. The protocol overheads will be different for each of type 1232 * of USB transfer and all these formulas & protocol overheads are derived 1233 * from the 5.9.3 section of USB Specification & with the help of Bandwidth 1234 * Analysis white paper which is posted on the USB developer forum. 1235 * 1236 * Full-Speed: 1237 * Protocol overhead + ((MaxPacketSize * 7)/6 ) + Host_Delay 1238 * 1239 * Low-Speed: 1240 * Protocol overhead + Hub LS overhead + 1241 * (Low-Speed clock * ((MaxPacketSize * 7)/6 )) + Host_Delay 1242 */ 1243 static uint_t 1244 uhci_compute_total_bandwidth(usb_ep_descr_t *endpoint, 1245 usb_port_status_t port_status) 1246 { 1247 uint_t bandwidth; 1248 ushort_t MaxPacketSize = endpoint->wMaxPacketSize; 1249 1250 /* Add Host Controller specific delay to required bandwidth */ 1251 bandwidth = HOST_CONTROLLER_DELAY; 1252 1253 /* Add bit-stuffing overhead */ 1254 MaxPacketSize = (ushort_t)((MaxPacketSize * 7) / 6); 1255 1256 /* Low Speed interrupt transaction */ 1257 if (port_status == USBA_LOW_SPEED_DEV) { 1258 /* Low Speed interrupt transaction */ 1259 bandwidth += (LOW_SPEED_PROTO_OVERHEAD + 1260 HUB_LOW_SPEED_PROTO_OVERHEAD + 1261 (LOW_SPEED_CLOCK * MaxPacketSize)); 1262 } else { 1263 /* Full Speed transaction */ 1264 bandwidth += MaxPacketSize; 1265 1266 if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_INTR) { 1267 /* Full Speed interrupt transaction */ 1268 bandwidth += FS_NON_ISOC_PROTO_OVERHEAD; 1269 } else { 1270 /* Isochronus and input transaction */ 1271 if (UHCI_XFER_DIR(endpoint) == USB_EP_DIR_IN) { 1272 bandwidth += FS_ISOC_INPUT_PROTO_OVERHEAD; 1273 } else { 1274 /* Isochronus and output transaction */ 1275 bandwidth += FS_ISOC_OUTPUT_PROTO_OVERHEAD; 1276 } 1277 } 1278 } 1279 1280 return (bandwidth); 1281 } 1282 1283 1284 /* 1285 * uhci_bandwidth_adjust: 1286 */ 1287 static int 1288 uhci_bandwidth_adjust( 1289 uhci_state_t *uhcip, 1290 usb_ep_descr_t *endpoint, 1291 usb_port_status_t port_status) 1292 { 1293 int i = 0; 1294 uint_t interval; 1295 1296 /* 1297 * Get the polling interval from the endpoint descriptor 1298 */ 1299 interval = endpoint->bInterval; 1300 1301 /* 1302 * The bInterval value in the endpoint descriptor can range 1303 * from 1 to 255ms. The interrupt lattice has 32 leaf nodes, 1304 * and the host controller cycles through these nodes every 1305 * 32ms. The longest polling interval that the controller 1306 * supports is 32ms. 1307 */ 1308 1309 /* 1310 * Return an error if the polling interval is less than 1ms 1311 * and greater than 255ms 1312 */ 1313 if ((interval < MIN_POLL_INTERVAL) || (interval > MAX_POLL_INTERVAL)) { 1314 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1315 "uhci_bandwidth_adjust: Endpoint's poll interval must be " 1316 "between %d and %d ms", MIN_POLL_INTERVAL, 1317 MAX_POLL_INTERVAL); 1318 1319 return (USB_FAILURE); 1320 } 1321 1322 /* 1323 * According USB Specifications, a full-speed endpoint can 1324 * specify a desired polling interval 1ms to 255ms and a low 1325 * speed endpoints are limited to specifying only 10ms to 1326 * 255ms. But some old keyboards & mice uses polling interval 1327 * of 8ms. For compatibility purpose, we are using polling 1328 * interval between 8ms & 255ms for low speed endpoints. 1329 */ 1330 if ((port_status == USBA_LOW_SPEED_DEV) && 1331 (interval < MIN_LOW_SPEED_POLL_INTERVAL)) { 1332 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1333 "uhci_bandwidth_adjust: Low speed endpoint's poll interval " 1334 "must be >= %d ms, adjusted", 1335 MIN_LOW_SPEED_POLL_INTERVAL); 1336 1337 interval = MIN_LOW_SPEED_POLL_INTERVAL; 1338 } 1339 1340 /* 1341 * If polling interval is greater than 32ms, 1342 * adjust polling interval equal to 32ms. 1343 */ 1344 if (interval > 32) { 1345 interval = 32; 1346 } 1347 1348 /* 1349 * Find the nearest power of 2 that's less 1350 * than interval. 1351 */ 1352 while ((pow_2(i)) <= interval) { 1353 i++; 1354 } 1355 1356 return (pow_2((i - 1))); 1357 } 1358 1359 1360 /* 1361 * uhci_lattice_height: 1362 * Given the requested bandwidth, find the height in the tree at 1363 * which the nodes for this bandwidth fall. The height is measured 1364 * as the number of nodes from the leaf to the level specified by 1365 * bandwidth The root of the tree is at height TREE_HEIGHT. 1366 */ 1367 static uint_t 1368 uhci_lattice_height(uint_t bandwidth) 1369 { 1370 return (TREE_HEIGHT - (log_2(bandwidth))); 1371 } 1372 1373 1374 static uint_t 1375 uhci_lattice_parent(uint_t node) 1376 { 1377 return (((node % 2) == 0) ? ((node/2) - 1) : (node/2)); 1378 } 1379 1380 1381 /* 1382 * uhci_leftmost_leaf: 1383 * Find the leftmost leaf in the subtree specified by the node. 1384 * Height refers to number of nodes from the bottom of the tree 1385 * to the node, including the node. 1386 */ 1387 static uint_t 1388 uhci_leftmost_leaf(uint_t node, uint_t height) 1389 { 1390 node = pow_2(height + VIRTUAL_TREE_HEIGHT) * (node+1) - 1391 NUM_FRAME_LST_ENTRIES; 1392 return (node); 1393 } 1394 1395 1396 /* 1397 * uhci_insert_qh: 1398 * Add the Queue Head (QH) into the Host Controller's (HC) 1399 * appropriate queue head list. 1400 */ 1401 void 1402 uhci_insert_qh(uhci_state_t *uhcip, usba_pipe_handle_data_t *ph) 1403 { 1404 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 1405 1406 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1407 "uhci_insert_qh:"); 1408 1409 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1410 1411 switch (UHCI_XFER_TYPE(&ph->p_ep)) { 1412 case USB_EP_ATTR_CONTROL: 1413 uhci_insert_ctrl_qh(uhcip, pp); 1414 break; 1415 case USB_EP_ATTR_BULK: 1416 uhci_insert_bulk_qh(uhcip, pp); 1417 break; 1418 case USB_EP_ATTR_INTR: 1419 uhci_insert_intr_qh(uhcip, pp); 1420 break; 1421 case USB_EP_ATTR_ISOCH: 1422 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 1423 "uhci_insert_qh: Illegal request"); 1424 break; 1425 } 1426 } 1427 1428 1429 /* 1430 * uhci_insert_ctrl_qh: 1431 * Insert a control QH into the Host Controller's (HC) control QH list. 1432 */ 1433 static void 1434 uhci_insert_ctrl_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 1435 { 1436 queue_head_t *qh = pp->pp_qh; 1437 1438 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1439 "uhci_insert_ctrl_qh:"); 1440 1441 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1442 1443 if (uhcip->uhci_ctrl_xfers_q_head == uhcip->uhci_ctrl_xfers_q_tail) { 1444 uhcip->uhci_ctrl_xfers_q_head->prev_qh = UHCI_INVALID_PTR; 1445 } 1446 1447 SetQH32(uhcip, qh->link_ptr, 1448 GetQH32(uhcip, uhcip->uhci_ctrl_xfers_q_tail->link_ptr)); 1449 qh->prev_qh = uhcip->uhci_ctrl_xfers_q_tail; 1450 SetQH32(uhcip, uhcip->uhci_ctrl_xfers_q_tail->link_ptr, 1451 QH_PADDR(qh) | HC_QUEUE_HEAD); 1452 uhcip->uhci_ctrl_xfers_q_tail = qh; 1453 1454 } 1455 1456 1457 /* 1458 * uhci_insert_bulk_qh: 1459 * Insert a bulk QH into the Host Controller's (HC) bulk QH list. 1460 */ 1461 static void 1462 uhci_insert_bulk_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 1463 { 1464 queue_head_t *qh = pp->pp_qh; 1465 1466 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1467 "uhci_insert_bulk_qh:"); 1468 1469 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1470 1471 if (uhcip->uhci_bulk_xfers_q_head == uhcip->uhci_bulk_xfers_q_tail) { 1472 uhcip->uhci_bulk_xfers_q_head->prev_qh = UHCI_INVALID_PTR; 1473 } else if (uhcip->uhci_bulk_xfers_q_head->link_ptr == 1474 uhcip->uhci_bulk_xfers_q_tail->link_ptr) { 1475 1476 /* If there is already a loop, we should keep the loop. */ 1477 qh->link_ptr = uhcip->uhci_bulk_xfers_q_tail->link_ptr; 1478 } 1479 1480 qh->prev_qh = uhcip->uhci_bulk_xfers_q_tail; 1481 SetQH32(uhcip, uhcip->uhci_bulk_xfers_q_tail->link_ptr, 1482 QH_PADDR(qh) | HC_QUEUE_HEAD); 1483 uhcip->uhci_bulk_xfers_q_tail = qh; 1484 } 1485 1486 1487 /* 1488 * uhci_insert_intr_qh: 1489 * Insert a periodic Queue head i.e Interrupt queue head into the 1490 * Host Controller's (HC) interrupt lattice tree. 1491 */ 1492 static void 1493 uhci_insert_intr_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 1494 { 1495 uint_t node = pp->pp_node; /* The appropriate node was */ 1496 /* found during the opening */ 1497 /* of the pipe. */ 1498 queue_head_t *qh = pp->pp_qh; 1499 queue_head_t *next_lattice_qh, *lattice_qh; 1500 1501 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1502 "uhci_insert_intr_qh:"); 1503 1504 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1505 1506 /* Find the lattice queue head */ 1507 lattice_qh = &uhcip->uhci_qh_pool_addr[node]; 1508 next_lattice_qh = 1509 QH_VADDR(GetQH32(uhcip, lattice_qh->link_ptr) & QH_LINK_PTR_MASK); 1510 1511 next_lattice_qh->prev_qh = qh; 1512 qh->link_ptr = lattice_qh->link_ptr; 1513 qh->prev_qh = lattice_qh; 1514 SetQH32(uhcip, lattice_qh->link_ptr, QH_PADDR(qh) | HC_QUEUE_HEAD); 1515 pp->pp_data_toggle = 0; 1516 } 1517 1518 1519 /* 1520 * uhci_insert_intr_td: 1521 * Create a TD and a data buffer for an interrupt endpoint. 1522 */ 1523 int 1524 uhci_insert_intr_td( 1525 uhci_state_t *uhcip, 1526 usba_pipe_handle_data_t *ph, 1527 usb_intr_req_t *req, 1528 usb_flags_t flags) 1529 { 1530 int error, pipe_dir; 1531 uint_t length, mps; 1532 uint32_t buf_offs; 1533 uhci_td_t *tmp_td; 1534 usb_intr_req_t *intr_reqp; 1535 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 1536 uhci_trans_wrapper_t *tw; 1537 1538 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1539 "uhci_insert_intr_td: req: 0x%p", req); 1540 1541 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1542 1543 /* Get the interrupt pipe direction */ 1544 pipe_dir = UHCI_XFER_DIR(&ph->p_ep); 1545 1546 /* Get the current interrupt request pointer */ 1547 if (req) { 1548 length = req->intr_len; 1549 } else { 1550 ASSERT(pipe_dir == USB_EP_DIR_IN); 1551 length = (pp->pp_client_periodic_in_reqp) ? 1552 (((usb_intr_req_t *)pp-> 1553 pp_client_periodic_in_reqp)->intr_len) : 1554 ph->p_ep.wMaxPacketSize; 1555 } 1556 1557 /* Check the size of interrupt request */ 1558 if (length > UHCI_MAX_TD_XFER_SIZE) { 1559 1560 /* the length shouldn't exceed 8K */ 1561 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1562 "uhci_insert_intr_td: Intr request size 0x%lx is " 1563 "more than 0x%x", length, UHCI_MAX_TD_XFER_SIZE); 1564 1565 return (USB_INVALID_REQUEST); 1566 } 1567 1568 USB_DPRINTF_L3(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1569 "uhci_insert_intr_td: length: 0x%lx", length); 1570 1571 /* Allocate a transaction wrapper */ 1572 if ((tw = uhci_create_transfer_wrapper(uhcip, pp, length, flags)) == 1573 NULL) { 1574 1575 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1576 "uhci_insert_intr_td: TW allocation failed"); 1577 1578 return (USB_NO_RESOURCES); 1579 } 1580 1581 /* 1582 * Initialize the callback and any callback 1583 * data for when the td completes. 1584 */ 1585 tw->tw_handle_td = uhci_handle_intr_td; 1586 tw->tw_handle_callback_value = NULL; 1587 tw->tw_direction = (pipe_dir == USB_EP_DIR_OUT) ? 1588 PID_OUT : PID_IN; 1589 tw->tw_curr_xfer_reqp = (usb_opaque_t)req; 1590 1591 /* 1592 * If it is an Interrupt IN request and interrupt request is NULL, 1593 * allocate the usb interrupt request structure for the current 1594 * interrupt polling request. 1595 */ 1596 if (tw->tw_direction == PID_IN) { 1597 if ((error = uhci_allocate_periodic_in_resource(uhcip, 1598 pp, tw, flags)) != USB_SUCCESS) { 1599 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1600 "uhci_insert_intr_td: Interrupt request structure " 1601 "allocation failed"); 1602 1603 /* free the transfer wrapper */ 1604 uhci_deallocate_tw(uhcip, pp, tw); 1605 1606 return (error); 1607 } 1608 } 1609 1610 intr_reqp = (usb_intr_req_t *)tw->tw_curr_xfer_reqp; 1611 ASSERT(tw->tw_curr_xfer_reqp != NULL); 1612 1613 tw->tw_timeout_cnt = (intr_reqp->intr_attributes & USB_ATTRS_ONE_XFER) ? 1614 intr_reqp->intr_timeout : 0; 1615 1616 /* DATA IN */ 1617 if (tw->tw_direction == PID_IN) { 1618 /* Insert the td onto the queue head */ 1619 error = uhci_insert_hc_td(uhcip, 0, 1620 length, pp, tw, PID_IN, intr_reqp->intr_attributes); 1621 1622 if (error != USB_SUCCESS) { 1623 1624 uhci_deallocate_periodic_in_resource(uhcip, pp, tw); 1625 /* free the transfer wrapper */ 1626 uhci_deallocate_tw(uhcip, pp, tw); 1627 1628 return (USB_NO_RESOURCES); 1629 } 1630 tw->tw_bytes_xfered = 0; 1631 1632 return (USB_SUCCESS); 1633 } 1634 1635 if (req->intr_len) { 1636 /* DATA OUT */ 1637 ASSERT(req->intr_data != NULL); 1638 1639 /* Copy the data into the message */ 1640 ddi_rep_put8(tw->tw_accesshandle, req->intr_data->b_rptr, 1641 (uint8_t *)tw->tw_buf, req->intr_len, DDI_DEV_AUTOINCR); 1642 } 1643 1644 /* set tw->tw_claim flag, so that nobody else works on this tw. */ 1645 tw->tw_claim = UHCI_INTR_HDLR_CLAIMED; 1646 1647 mps = ph->p_ep.wMaxPacketSize; 1648 buf_offs = 0; 1649 1650 /* Insert tds onto the queue head */ 1651 while (length > 0) { 1652 1653 error = uhci_insert_hc_td(uhcip, buf_offs, 1654 (length > mps) ? mps : length, 1655 pp, tw, PID_OUT, 1656 intr_reqp->intr_attributes); 1657 1658 if (error != USB_SUCCESS) { 1659 /* no resource. */ 1660 break; 1661 } 1662 1663 if (length <= mps) { 1664 /* inserted all data. */ 1665 length = 0; 1666 1667 } else { 1668 1669 buf_offs += mps; 1670 length -= mps; 1671 } 1672 } 1673 1674 if (error != USB_SUCCESS) { 1675 1676 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 1677 "uhci_insert_intr_td: allocate td failed, free resource"); 1678 1679 /* remove all the tds */ 1680 while (tw->tw_hctd_head != NULL) { 1681 uhci_delete_td(uhcip, tw->tw_hctd_head); 1682 } 1683 1684 tw->tw_claim = UHCI_NOT_CLAIMED; 1685 uhci_deallocate_tw(uhcip, pp, tw); 1686 1687 return (error); 1688 } 1689 1690 /* allow HC to xfer the tds of this tw */ 1691 tmp_td = tw->tw_hctd_head; 1692 while (tmp_td != NULL) { 1693 1694 SetTD_status(uhcip, tmp_td, UHCI_TD_ACTIVE); 1695 tmp_td = tmp_td->tw_td_next; 1696 } 1697 1698 tw->tw_bytes_xfered = 0; 1699 tw->tw_claim = UHCI_NOT_CLAIMED; 1700 1701 return (error); 1702 } 1703 1704 1705 /* 1706 * uhci_create_transfer_wrapper: 1707 * Create a Transaction Wrapper (TW) for non-isoc transfer types. 1708 * This involves the allocating of DMA resources. 1709 * 1710 * For non-isoc transfers, one DMA handle and one DMA buffer are 1711 * allocated per transfer. The DMA buffer may contain multiple 1712 * DMA cookies and the cookies should meet certain alignment 1713 * requirement to be able to fit in the multiple TDs. The alignment 1714 * needs to ensure: 1715 * 1. the size of a cookie be larger than max TD length (0x500) 1716 * 2. the size of a cookie be a multiple of wMaxPacketSize of the 1717 * ctrl/bulk pipes 1718 * 1719 * wMaxPacketSize for ctrl and bulk pipes may be 8, 16, 32 or 64 bytes. 1720 * So the alignment should be a multiple of 64. wMaxPacketSize for intr 1721 * pipes is a little different since it only specifies the max to be 1722 * 64 bytes, but as long as an intr transfer is limited to max TD length, 1723 * any alignment can work if the cookie size is larger than max TD length. 1724 * 1725 * Considering the above conditions, 2K alignment is used. 4K alignment 1726 * should also be fine. 1727 */ 1728 static uhci_trans_wrapper_t * 1729 uhci_create_transfer_wrapper( 1730 uhci_state_t *uhcip, 1731 uhci_pipe_private_t *pp, 1732 size_t length, 1733 usb_flags_t usb_flags) 1734 { 1735 size_t real_length; 1736 uhci_trans_wrapper_t *tw; 1737 ddi_device_acc_attr_t dev_attr; 1738 ddi_dma_attr_t dma_attr; 1739 int kmem_flag; 1740 int (*dmamem_wait)(caddr_t); 1741 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 1742 1743 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1744 "uhci_create_transfer_wrapper: length = 0x%lx flags = 0x%x", 1745 length, usb_flags); 1746 1747 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1748 1749 /* isochronous pipe should not call into this function */ 1750 if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_ISOCH) { 1751 1752 return (NULL); 1753 } 1754 1755 /* SLEEP flag should not be used in interrupt context */ 1756 if (servicing_interrupt()) { 1757 kmem_flag = KM_NOSLEEP; 1758 dmamem_wait = DDI_DMA_DONTWAIT; 1759 } else { 1760 kmem_flag = KM_SLEEP; 1761 dmamem_wait = DDI_DMA_SLEEP; 1762 } 1763 1764 /* Allocate space for the transfer wrapper */ 1765 if ((tw = kmem_zalloc(sizeof (uhci_trans_wrapper_t), kmem_flag)) == 1766 NULL) { 1767 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1768 "uhci_create_transfer_wrapper: kmem_alloc failed"); 1769 1770 return (NULL); 1771 } 1772 1773 /* zero-length packet doesn't need to allocate dma memory */ 1774 if (length == 0) { 1775 1776 goto dmadone; 1777 } 1778 1779 /* allow sg lists for transfer wrapper dma memory */ 1780 bcopy(&uhcip->uhci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t)); 1781 dma_attr.dma_attr_sgllen = UHCI_DMA_ATTR_SGLLEN; 1782 dma_attr.dma_attr_align = UHCI_DMA_ATTR_ALIGN; 1783 1784 /* Store the transfer length */ 1785 tw->tw_length = length; 1786 1787 /* Allocate the DMA handle */ 1788 if (ddi_dma_alloc_handle(uhcip->uhci_dip, &dma_attr, dmamem_wait, 1789 0, &tw->tw_dmahandle) != DDI_SUCCESS) { 1790 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1791 "uhci_create_transfer_wrapper: Alloc handle failed"); 1792 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 1793 1794 return (NULL); 1795 } 1796 1797 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 1798 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 1799 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1800 1801 /* Allocate the memory */ 1802 if (ddi_dma_mem_alloc(tw->tw_dmahandle, tw->tw_length, &dev_attr, 1803 DDI_DMA_CONSISTENT, dmamem_wait, NULL, (caddr_t *)&tw->tw_buf, 1804 &real_length, &tw->tw_accesshandle) != DDI_SUCCESS) { 1805 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1806 "uhci_create_transfer_wrapper: dma_mem_alloc fail"); 1807 ddi_dma_free_handle(&tw->tw_dmahandle); 1808 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 1809 1810 return (NULL); 1811 } 1812 1813 ASSERT(real_length >= length); 1814 1815 /* Bind the handle */ 1816 if (ddi_dma_addr_bind_handle(tw->tw_dmahandle, NULL, 1817 (caddr_t)tw->tw_buf, real_length, DDI_DMA_RDWR|DDI_DMA_CONSISTENT, 1818 dmamem_wait, NULL, &tw->tw_cookie, &tw->tw_ncookies) != 1819 DDI_DMA_MAPPED) { 1820 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1821 "uhci_create_transfer_wrapper: Bind handle failed"); 1822 ddi_dma_mem_free(&tw->tw_accesshandle); 1823 ddi_dma_free_handle(&tw->tw_dmahandle); 1824 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 1825 1826 return (NULL); 1827 } 1828 1829 tw->tw_cookie_idx = 0; 1830 tw->tw_dma_offs = 0; 1831 1832 dmadone: 1833 /* 1834 * Only allow one wrapper to be added at a time. Insert the 1835 * new transaction wrapper into the list for this pipe. 1836 */ 1837 if (pp->pp_tw_head == NULL) { 1838 pp->pp_tw_head = tw; 1839 pp->pp_tw_tail = tw; 1840 } else { 1841 pp->pp_tw_tail->tw_next = tw; 1842 pp->pp_tw_tail = tw; 1843 ASSERT(tw->tw_next == NULL); 1844 } 1845 1846 /* Store a back pointer to the pipe private structure */ 1847 tw->tw_pipe_private = pp; 1848 1849 /* Store the transfer type - synchronous or asynchronous */ 1850 tw->tw_flags = usb_flags; 1851 1852 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1853 "uhci_create_transfer_wrapper: tw = 0x%p, ncookies = %u", 1854 tw, tw->tw_ncookies); 1855 1856 return (tw); 1857 } 1858 1859 1860 /* 1861 * uhci_insert_hc_td: 1862 * Insert a Transfer Descriptor (TD) on an QH. 1863 */ 1864 int 1865 uhci_insert_hc_td( 1866 uhci_state_t *uhcip, 1867 uint32_t buffer_offset, 1868 size_t hcgtd_length, 1869 uhci_pipe_private_t *pp, 1870 uhci_trans_wrapper_t *tw, 1871 uchar_t PID, 1872 usb_req_attrs_t attrs) 1873 { 1874 uhci_td_t *td, *current_dummy; 1875 queue_head_t *qh = pp->pp_qh; 1876 1877 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1878 1879 if ((td = uhci_allocate_td_from_pool(uhcip)) == NULL) { 1880 1881 return (USB_NO_RESOURCES); 1882 } 1883 1884 current_dummy = qh->td_tailp; 1885 1886 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 1887 "uhci_insert_hc_td: td %p, attrs = 0x%x", td, attrs); 1888 1889 /* 1890 * Fill in the current dummy td and 1891 * add the new dummy to the end. 1892 */ 1893 uhci_fill_in_td(uhcip, td, current_dummy, buffer_offset, 1894 hcgtd_length, pp, PID, attrs, tw); 1895 1896 /* 1897 * Allow HC hardware xfer the td, except interrupt out td. 1898 */ 1899 if ((tw->tw_handle_td != uhci_handle_intr_td) || (PID != PID_OUT)) { 1900 1901 SetTD_status(uhcip, current_dummy, UHCI_TD_ACTIVE); 1902 } 1903 1904 /* Insert this td onto the tw */ 1905 1906 if (tw->tw_hctd_head == NULL) { 1907 ASSERT(tw->tw_hctd_tail == NULL); 1908 tw->tw_hctd_head = current_dummy; 1909 tw->tw_hctd_tail = current_dummy; 1910 } else { 1911 /* Add the td to the end of the list */ 1912 tw->tw_hctd_tail->tw_td_next = current_dummy; 1913 tw->tw_hctd_tail = current_dummy; 1914 } 1915 1916 /* 1917 * Insert the TD on to the QH. When this occurs, 1918 * the Host Controller will see the newly filled in TD 1919 */ 1920 current_dummy->outst_td_next = NULL; 1921 current_dummy->outst_td_prev = uhcip->uhci_outst_tds_tail; 1922 if (uhcip->uhci_outst_tds_head == NULL) { 1923 uhcip->uhci_outst_tds_head = current_dummy; 1924 } else { 1925 uhcip->uhci_outst_tds_tail->outst_td_next = current_dummy; 1926 } 1927 uhcip->uhci_outst_tds_tail = current_dummy; 1928 current_dummy->tw = tw; 1929 1930 return (USB_SUCCESS); 1931 } 1932 1933 1934 /* 1935 * uhci_fill_in_td: 1936 * Fill in the fields of a Transfer Descriptor (TD). 1937 */ 1938 static void 1939 uhci_fill_in_td( 1940 uhci_state_t *uhcip, 1941 uhci_td_t *td, 1942 uhci_td_t *current_dummy, 1943 uint32_t buffer_offset, 1944 size_t length, 1945 uhci_pipe_private_t *pp, 1946 uchar_t PID, 1947 usb_req_attrs_t attrs, 1948 uhci_trans_wrapper_t *tw) 1949 { 1950 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 1951 uint32_t buf_addr; 1952 1953 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 1954 "uhci_fill_in_td: td 0x%p buf_offs 0x%x len 0x%lx " 1955 "attrs 0x%x", td, buffer_offset, length, attrs); 1956 1957 /* 1958 * If this is an isochronous TD, just return 1959 */ 1960 if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_ISOCH) { 1961 1962 return; 1963 } 1964 1965 /* The maximum transfer length of UHCI cannot exceed 0x500 bytes */ 1966 ASSERT(length <= UHCI_MAX_TD_XFER_SIZE); 1967 1968 bzero((char *)td, sizeof (uhci_td_t)); /* Clear the TD */ 1969 SetTD32(uhcip, current_dummy->link_ptr, TD_PADDR(td)); 1970 1971 if (attrs & USB_ATTRS_SHORT_XFER_OK) { 1972 SetTD_spd(uhcip, current_dummy, 1); 1973 } 1974 1975 mutex_enter(&ph->p_usba_device->usb_mutex); 1976 if (ph->p_usba_device->usb_port_status == USBA_LOW_SPEED_DEV) { 1977 SetTD_ls(uhcip, current_dummy, LOW_SPEED_DEVICE); 1978 } 1979 1980 SetTD_c_err(uhcip, current_dummy, UHCI_MAX_ERR_COUNT); 1981 SetTD_mlen(uhcip, current_dummy, 1982 (length == 0) ? ZERO_LENGTH : (length - 1)); 1983 SetTD_dtogg(uhcip, current_dummy, pp->pp_data_toggle); 1984 1985 /* Adjust the data toggle bit */ 1986 ADJ_DATA_TOGGLE(pp); 1987 1988 SetTD_devaddr(uhcip, current_dummy, ph->p_usba_device->usb_addr); 1989 SetTD_endpt(uhcip, current_dummy, 1990 ph->p_ep.bEndpointAddress & END_POINT_ADDRESS_MASK); 1991 SetTD_PID(uhcip, current_dummy, PID); 1992 SetTD_ioc(uhcip, current_dummy, INTERRUPT_ON_COMPLETION); 1993 1994 buf_addr = uhci_get_tw_paddr_by_offs(uhcip, buffer_offset, length, tw); 1995 SetTD32(uhcip, current_dummy->buffer_address, buf_addr); 1996 1997 td->qh_td_prev = current_dummy; 1998 current_dummy->qh_td_prev = NULL; 1999 pp->pp_qh->td_tailp = td; 2000 mutex_exit(&ph->p_usba_device->usb_mutex); 2001 } 2002 2003 /* 2004 * uhci_get_tw_paddr_by_offs: 2005 * Walk through the DMA cookies of a TW buffer to retrieve 2006 * the device address used for a TD. 2007 * 2008 * buffer_offset - the starting offset into the TW buffer, where the 2009 * TD should transfer from. When a TW has more than 2010 * one TD, the TDs must be filled in increasing order. 2011 */ 2012 static uint32_t 2013 uhci_get_tw_paddr_by_offs( 2014 uhci_state_t *uhcip, 2015 uint32_t buffer_offset, 2016 size_t length, 2017 uhci_trans_wrapper_t *tw) 2018 { 2019 uint32_t buf_addr; 2020 int rem_len; 2021 2022 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2023 "uhci_get_tw_paddr_by_offs: buf_offs 0x%x len 0x%lx", 2024 buffer_offset, length); 2025 2026 /* 2027 * TDs must be filled in increasing DMA offset order. 2028 * tw_dma_offs is initialized to be 0 at TW creation and 2029 * is only increased in this function. 2030 */ 2031 ASSERT(length == 0 || buffer_offset >= tw->tw_dma_offs); 2032 2033 if (length == 0) { 2034 buf_addr = 0; 2035 2036 return (buf_addr); 2037 } 2038 2039 /* 2040 * Advance to the next DMA cookie until finding the cookie 2041 * that buffer_offset falls in. 2042 * It is very likely this loop will never repeat more than 2043 * once. It is here just to accommodate the case buffer_offset 2044 * is increased by multiple cookies during two consecutive 2045 * calls into this function. In that case, the interim DMA 2046 * buffer is allowed to be skipped. 2047 */ 2048 while ((tw->tw_dma_offs + tw->tw_cookie.dmac_size) <= 2049 buffer_offset) { 2050 /* 2051 * tw_dma_offs always points to the starting offset 2052 * of a cookie 2053 */ 2054 tw->tw_dma_offs += tw->tw_cookie.dmac_size; 2055 ddi_dma_nextcookie(tw->tw_dmahandle, &tw->tw_cookie); 2056 tw->tw_cookie_idx++; 2057 ASSERT(tw->tw_cookie_idx < tw->tw_ncookies); 2058 } 2059 2060 /* 2061 * Counting the remained buffer length to be filled in 2062 * the TDs for current DMA cookie 2063 */ 2064 rem_len = (tw->tw_dma_offs + tw->tw_cookie.dmac_size) - 2065 buffer_offset; 2066 2067 /* Calculate the beginning address of the buffer */ 2068 ASSERT(length <= rem_len); 2069 buf_addr = (buffer_offset - tw->tw_dma_offs) + 2070 tw->tw_cookie.dmac_address; 2071 2072 USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2073 "uhci_get_tw_paddr_by_offs: dmac_addr 0x%p dmac_size " 2074 "0x%lx idx %d", buf_addr, tw->tw_cookie.dmac_size, 2075 tw->tw_cookie_idx); 2076 2077 return (buf_addr); 2078 } 2079 2080 2081 /* 2082 * uhci_modify_td_active_bits: 2083 * Sets active bit in all the tds of QH to INACTIVE so that 2084 * the HC stops processing the TD's related to the QH. 2085 */ 2086 void 2087 uhci_modify_td_active_bits( 2088 uhci_state_t *uhcip, 2089 uhci_pipe_private_t *pp) 2090 { 2091 uhci_td_t *td_head; 2092 usb_ep_descr_t *ept = &pp->pp_pipe_handle->p_ep; 2093 uhci_trans_wrapper_t *tw_head = pp->pp_tw_head; 2094 2095 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2096 "uhci_modify_td_active_bits: tw head %p", (void *)tw_head); 2097 2098 while (tw_head != NULL) { 2099 tw_head->tw_claim = UHCI_MODIFY_TD_BITS_CLAIMED; 2100 td_head = tw_head->tw_hctd_head; 2101 2102 while (td_head) { 2103 if (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_ISOCH) { 2104 SetTD_status(uhcip, td_head, 2105 GetTD_status(uhcip, td_head) & TD_INACTIVE); 2106 } else { 2107 SetTD32(uhcip, td_head->link_ptr, 2108 GetTD32(uhcip, td_head->link_ptr) | 2109 HC_END_OF_LIST); 2110 } 2111 2112 td_head = td_head->tw_td_next; 2113 } 2114 tw_head = tw_head->tw_next; 2115 } 2116 } 2117 2118 2119 /* 2120 * uhci_insert_ctrl_td: 2121 * Create a TD and a data buffer for a control Queue Head. 2122 */ 2123 int 2124 uhci_insert_ctrl_td( 2125 uhci_state_t *uhcip, 2126 usba_pipe_handle_data_t *ph, 2127 usb_ctrl_req_t *ctrl_reqp, 2128 usb_flags_t flags) 2129 { 2130 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 2131 uhci_trans_wrapper_t *tw; 2132 size_t ctrl_buf_size; 2133 2134 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2135 "uhci_insert_ctrl_td: timeout: 0x%x", ctrl_reqp->ctrl_timeout); 2136 2137 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2138 2139 /* 2140 * If we have a control data phase, make the data buffer start 2141 * on the next 64-byte boundary so as to ensure the DMA cookie 2142 * can fit in the multiple TDs. The buffer in the range of 2143 * [SETUP_SIZE, UHCI_CTRL_EPT_MAX_SIZE) is just for padding 2144 * and not to be transferred. 2145 */ 2146 if (ctrl_reqp->ctrl_wLength) { 2147 ctrl_buf_size = UHCI_CTRL_EPT_MAX_SIZE + 2148 ctrl_reqp->ctrl_wLength; 2149 } else { 2150 ctrl_buf_size = SETUP_SIZE; 2151 } 2152 2153 /* Allocate a transaction wrapper */ 2154 if ((tw = uhci_create_transfer_wrapper(uhcip, pp, 2155 ctrl_buf_size, flags)) == NULL) { 2156 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2157 "uhci_insert_ctrl_td: TW allocation failed"); 2158 2159 return (USB_NO_RESOURCES); 2160 } 2161 2162 pp->pp_data_toggle = 0; 2163 2164 tw->tw_curr_xfer_reqp = (usb_opaque_t)ctrl_reqp; 2165 tw->tw_bytes_xfered = 0; 2166 tw->tw_bytes_pending = ctrl_reqp->ctrl_wLength; 2167 tw->tw_timeout_cnt = max(UHCI_CTRL_TIMEOUT, ctrl_reqp->ctrl_timeout); 2168 2169 /* 2170 * Initialize the callback and any callback 2171 * data for when the td completes. 2172 */ 2173 tw->tw_handle_td = uhci_handle_ctrl_td; 2174 tw->tw_handle_callback_value = NULL; 2175 2176 if ((uhci_create_setup_pkt(uhcip, pp, tw)) != USB_SUCCESS) { 2177 tw->tw_ctrl_state = 0; 2178 2179 /* free the transfer wrapper */ 2180 uhci_deallocate_tw(uhcip, pp, tw); 2181 2182 return (USB_NO_RESOURCES); 2183 } 2184 2185 tw->tw_ctrl_state = SETUP; 2186 2187 return (USB_SUCCESS); 2188 } 2189 2190 2191 /* 2192 * uhci_create_setup_pkt: 2193 * create a setup packet to initiate a control transfer. 2194 * 2195 * OHCI driver has seen the case where devices fail if there is 2196 * more than one control transfer to the device within a frame. 2197 * So, the UHCI ensures that only one TD will be put on the control 2198 * pipe to one device (to be consistent with OHCI driver). 2199 */ 2200 static int 2201 uhci_create_setup_pkt( 2202 uhci_state_t *uhcip, 2203 uhci_pipe_private_t *pp, 2204 uhci_trans_wrapper_t *tw) 2205 { 2206 int sdata; 2207 usb_ctrl_req_t *req = (usb_ctrl_req_t *)tw->tw_curr_xfer_reqp; 2208 2209 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2210 "uhci_create_setup_pkt: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%p", 2211 req->ctrl_bmRequestType, req->ctrl_bRequest, req->ctrl_wValue, 2212 req->ctrl_wIndex, req->ctrl_wLength, (void *)req->ctrl_data); 2213 2214 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2215 ASSERT(tw != NULL); 2216 2217 /* Create the first four bytes of the setup packet */ 2218 sdata = (req->ctrl_bmRequestType | (req->ctrl_bRequest << 8) | 2219 (req->ctrl_wValue << 16)); 2220 ddi_put32(tw->tw_accesshandle, (uint_t *)tw->tw_buf, sdata); 2221 2222 /* Create the second four bytes */ 2223 sdata = (uint32_t)(req->ctrl_wIndex | (req->ctrl_wLength << 16)); 2224 ddi_put32(tw->tw_accesshandle, 2225 (uint_t *)(tw->tw_buf + sizeof (uint_t)), sdata); 2226 2227 /* 2228 * The TD's are placed on the QH one at a time. 2229 * Once this TD is placed on the done list, the 2230 * data or status phase TD will be enqueued. 2231 */ 2232 if ((uhci_insert_hc_td(uhcip, 0, SETUP_SIZE, 2233 pp, tw, PID_SETUP, req->ctrl_attributes)) != USB_SUCCESS) { 2234 2235 return (USB_NO_RESOURCES); 2236 } 2237 2238 USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2239 "Create_setup: pp = 0x%p, attrs = 0x%x", pp, req->ctrl_attributes); 2240 2241 /* 2242 * If this control transfer has a data phase, record the 2243 * direction. If the data phase is an OUT transaction , 2244 * copy the data into the buffer of the transfer wrapper. 2245 */ 2246 if (req->ctrl_wLength != 0) { 2247 /* There is a data stage. Find the direction */ 2248 if (req->ctrl_bmRequestType & USB_DEV_REQ_DEV_TO_HOST) { 2249 tw->tw_direction = PID_IN; 2250 } else { 2251 tw->tw_direction = PID_OUT; 2252 2253 /* Copy the data into the buffer */ 2254 ddi_rep_put8(tw->tw_accesshandle, 2255 req->ctrl_data->b_rptr, 2256 (uint8_t *)(tw->tw_buf + UHCI_CTRL_EPT_MAX_SIZE), 2257 req->ctrl_wLength, 2258 DDI_DEV_AUTOINCR); 2259 } 2260 } 2261 2262 return (USB_SUCCESS); 2263 } 2264 2265 2266 /* 2267 * uhci_create_stats: 2268 * Allocate and initialize the uhci kstat structures 2269 */ 2270 void 2271 uhci_create_stats(uhci_state_t *uhcip) 2272 { 2273 int i; 2274 char kstatname[KSTAT_STRLEN]; 2275 char *usbtypes[USB_N_COUNT_KSTATS] = 2276 {"ctrl", "isoch", "bulk", "intr"}; 2277 uint_t instance = uhcip->uhci_instance; 2278 const char *dname = ddi_driver_name(uhcip->uhci_dip); 2279 uhci_intrs_stats_t *isp; 2280 2281 if (UHCI_INTRS_STATS(uhcip) == NULL) { 2282 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,intrs", 2283 dname, instance); 2284 UHCI_INTRS_STATS(uhcip) = kstat_create("usba", instance, 2285 kstatname, "usb_interrupts", KSTAT_TYPE_NAMED, 2286 sizeof (uhci_intrs_stats_t) / sizeof (kstat_named_t), 2287 KSTAT_FLAG_PERSISTENT); 2288 2289 if (UHCI_INTRS_STATS(uhcip) != NULL) { 2290 isp = UHCI_INTRS_STATS_DATA(uhcip); 2291 kstat_named_init(&isp->uhci_intrs_hc_halted, 2292 "HC Halted", KSTAT_DATA_UINT64); 2293 kstat_named_init(&isp->uhci_intrs_hc_process_err, 2294 "HC Process Errors", KSTAT_DATA_UINT64); 2295 kstat_named_init(&isp->uhci_intrs_host_sys_err, 2296 "Host Sys Errors", KSTAT_DATA_UINT64); 2297 kstat_named_init(&isp->uhci_intrs_resume_detected, 2298 "Resume Detected", KSTAT_DATA_UINT64); 2299 kstat_named_init(&isp->uhci_intrs_usb_err_intr, 2300 "USB Error", KSTAT_DATA_UINT64); 2301 kstat_named_init(&isp->uhci_intrs_usb_intr, 2302 "USB Interrupts", KSTAT_DATA_UINT64); 2303 kstat_named_init(&isp->uhci_intrs_total, 2304 "Total Interrupts", KSTAT_DATA_UINT64); 2305 kstat_named_init(&isp->uhci_intrs_not_claimed, 2306 "Not Claimed", KSTAT_DATA_UINT64); 2307 2308 UHCI_INTRS_STATS(uhcip)->ks_private = uhcip; 2309 UHCI_INTRS_STATS(uhcip)->ks_update = nulldev; 2310 kstat_install(UHCI_INTRS_STATS(uhcip)); 2311 } 2312 } 2313 2314 if (UHCI_TOTAL_STATS(uhcip) == NULL) { 2315 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,total", 2316 dname, instance); 2317 UHCI_TOTAL_STATS(uhcip) = kstat_create("usba", instance, 2318 kstatname, "usb_byte_count", KSTAT_TYPE_IO, 1, 2319 KSTAT_FLAG_PERSISTENT); 2320 2321 if (UHCI_TOTAL_STATS(uhcip) != NULL) { 2322 kstat_install(UHCI_TOTAL_STATS(uhcip)); 2323 } 2324 } 2325 2326 for (i = 0; i < USB_N_COUNT_KSTATS; i++) { 2327 if (uhcip->uhci_count_stats[i] == NULL) { 2328 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,%s", 2329 dname, instance, usbtypes[i]); 2330 uhcip->uhci_count_stats[i] = kstat_create("usba", 2331 instance, kstatname, "usb_byte_count", 2332 KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 2333 2334 if (uhcip->uhci_count_stats[i] != NULL) { 2335 kstat_install(uhcip->uhci_count_stats[i]); 2336 } 2337 } 2338 } 2339 } 2340 2341 2342 /* 2343 * uhci_destroy_stats: 2344 * Clean up uhci kstat structures 2345 */ 2346 void 2347 uhci_destroy_stats(uhci_state_t *uhcip) 2348 { 2349 int i; 2350 2351 if (UHCI_INTRS_STATS(uhcip)) { 2352 kstat_delete(UHCI_INTRS_STATS(uhcip)); 2353 UHCI_INTRS_STATS(uhcip) = NULL; 2354 } 2355 2356 if (UHCI_TOTAL_STATS(uhcip)) { 2357 kstat_delete(UHCI_TOTAL_STATS(uhcip)); 2358 UHCI_TOTAL_STATS(uhcip) = NULL; 2359 } 2360 2361 for (i = 0; i < USB_N_COUNT_KSTATS; i++) { 2362 if (uhcip->uhci_count_stats[i]) { 2363 kstat_delete(uhcip->uhci_count_stats[i]); 2364 uhcip->uhci_count_stats[i] = NULL; 2365 } 2366 } 2367 } 2368 2369 2370 void 2371 uhci_do_intrs_stats(uhci_state_t *uhcip, int val) 2372 { 2373 if (UHCI_INTRS_STATS(uhcip) == NULL) { 2374 2375 return; 2376 } 2377 2378 UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_total.value.ui64++; 2379 switch (val) { 2380 case USBSTS_REG_HC_HALTED: 2381 UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_hc_halted.value.ui64++; 2382 break; 2383 case USBSTS_REG_HC_PROCESS_ERR: 2384 UHCI_INTRS_STATS_DATA(uhcip)-> 2385 uhci_intrs_hc_process_err.value.ui64++; 2386 break; 2387 case USBSTS_REG_HOST_SYS_ERR: 2388 UHCI_INTRS_STATS_DATA(uhcip)-> 2389 uhci_intrs_host_sys_err.value.ui64++; 2390 break; 2391 case USBSTS_REG_RESUME_DETECT: 2392 UHCI_INTRS_STATS_DATA(uhcip)-> 2393 uhci_intrs_resume_detected.value.ui64++; 2394 break; 2395 case USBSTS_REG_USB_ERR_INTR: 2396 UHCI_INTRS_STATS_DATA(uhcip)-> 2397 uhci_intrs_usb_err_intr.value.ui64++; 2398 break; 2399 case USBSTS_REG_USB_INTR: 2400 UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_usb_intr.value.ui64++; 2401 break; 2402 default: 2403 UHCI_INTRS_STATS_DATA(uhcip)-> 2404 uhci_intrs_not_claimed.value.ui64++; 2405 break; 2406 } 2407 } 2408 2409 2410 void 2411 uhci_do_byte_stats(uhci_state_t *uhcip, size_t len, uint8_t attr, uint8_t addr) 2412 { 2413 uint8_t type = attr & USB_EP_ATTR_MASK; 2414 uint8_t dir = addr & USB_EP_DIR_MASK; 2415 2416 switch (dir) { 2417 case USB_EP_DIR_IN: 2418 UHCI_TOTAL_STATS_DATA(uhcip)->reads++; 2419 UHCI_TOTAL_STATS_DATA(uhcip)->nread += len; 2420 switch (type) { 2421 case USB_EP_ATTR_CONTROL: 2422 UHCI_CTRL_STATS(uhcip)->reads++; 2423 UHCI_CTRL_STATS(uhcip)->nread += len; 2424 break; 2425 case USB_EP_ATTR_BULK: 2426 UHCI_BULK_STATS(uhcip)->reads++; 2427 UHCI_BULK_STATS(uhcip)->nread += len; 2428 break; 2429 case USB_EP_ATTR_INTR: 2430 UHCI_INTR_STATS(uhcip)->reads++; 2431 UHCI_INTR_STATS(uhcip)->nread += len; 2432 break; 2433 case USB_EP_ATTR_ISOCH: 2434 UHCI_ISOC_STATS(uhcip)->reads++; 2435 UHCI_ISOC_STATS(uhcip)->nread += len; 2436 break; 2437 } 2438 break; 2439 case USB_EP_DIR_OUT: 2440 UHCI_TOTAL_STATS_DATA(uhcip)->writes++; 2441 UHCI_TOTAL_STATS_DATA(uhcip)->nwritten += len; 2442 switch (type) { 2443 case USB_EP_ATTR_CONTROL: 2444 UHCI_CTRL_STATS(uhcip)->writes++; 2445 UHCI_CTRL_STATS(uhcip)->nwritten += len; 2446 break; 2447 case USB_EP_ATTR_BULK: 2448 UHCI_BULK_STATS(uhcip)->writes++; 2449 UHCI_BULK_STATS(uhcip)->nwritten += len; 2450 break; 2451 case USB_EP_ATTR_INTR: 2452 UHCI_INTR_STATS(uhcip)->writes++; 2453 UHCI_INTR_STATS(uhcip)->nwritten += len; 2454 break; 2455 case USB_EP_ATTR_ISOCH: 2456 UHCI_ISOC_STATS(uhcip)->writes++; 2457 UHCI_ISOC_STATS(uhcip)->nwritten += len; 2458 break; 2459 } 2460 break; 2461 } 2462 } 2463 2464 2465 /* 2466 * uhci_free_tw: 2467 * Free the Transfer Wrapper (TW). 2468 */ 2469 void 2470 uhci_free_tw(uhci_state_t *uhcip, uhci_trans_wrapper_t *tw) 2471 { 2472 int rval, i; 2473 2474 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, "uhci_free_tw:"); 2475 2476 ASSERT(tw != NULL); 2477 2478 if (tw->tw_isoc_strtlen > 0) { 2479 ASSERT(tw->tw_isoc_bufs != NULL); 2480 for (i = 0; i < tw->tw_ncookies; i++) { 2481 rval = ddi_dma_unbind_handle( 2482 tw->tw_isoc_bufs[i].dma_handle); 2483 ASSERT(rval == USB_SUCCESS); 2484 ddi_dma_mem_free(&tw->tw_isoc_bufs[i].mem_handle); 2485 ddi_dma_free_handle(&tw->tw_isoc_bufs[i].dma_handle); 2486 } 2487 kmem_free(tw->tw_isoc_bufs, tw->tw_isoc_strtlen); 2488 } else if (tw->tw_dmahandle != NULL) { 2489 rval = ddi_dma_unbind_handle(tw->tw_dmahandle); 2490 ASSERT(rval == DDI_SUCCESS); 2491 2492 ddi_dma_mem_free(&tw->tw_accesshandle); 2493 ddi_dma_free_handle(&tw->tw_dmahandle); 2494 } 2495 2496 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 2497 } 2498 2499 2500 /* 2501 * uhci_deallocate_tw: 2502 * Deallocate of a Transaction Wrapper (TW) and this involves 2503 * the freeing of DMA resources. 2504 */ 2505 void 2506 uhci_deallocate_tw(uhci_state_t *uhcip, 2507 uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw) 2508 { 2509 uhci_trans_wrapper_t *head; 2510 2511 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2512 "uhci_deallocate_tw:"); 2513 2514 /* 2515 * If the transfer wrapper has no Host Controller (HC) 2516 * Transfer Descriptors (TD) associated with it, then 2517 * remove the transfer wrapper. The transfers are done 2518 * in FIFO order, so this should be the first transfer 2519 * wrapper on the list. 2520 */ 2521 if (tw->tw_hctd_head != NULL) { 2522 ASSERT(tw->tw_hctd_tail != NULL); 2523 2524 return; 2525 } 2526 2527 ASSERT(tw->tw_hctd_tail == NULL); 2528 ASSERT(pp->pp_tw_head != NULL); 2529 2530 /* 2531 * If pp->pp_tw_head is NULL, set the tail also to NULL. 2532 */ 2533 head = pp->pp_tw_head; 2534 2535 if (head == tw) { 2536 pp->pp_tw_head = head->tw_next; 2537 if (pp->pp_tw_head == NULL) { 2538 pp->pp_tw_tail = NULL; 2539 } 2540 } else { 2541 while (head->tw_next != tw) 2542 head = head->tw_next; 2543 head->tw_next = tw->tw_next; 2544 if (tw->tw_next == NULL) { 2545 pp->pp_tw_tail = head; 2546 } 2547 } 2548 uhci_free_tw(uhcip, tw); 2549 } 2550 2551 2552 void 2553 uhci_delete_td(uhci_state_t *uhcip, uhci_td_t *td) 2554 { 2555 uhci_td_t *tmp_td; 2556 uhci_trans_wrapper_t *tw = td->tw; 2557 2558 if ((td->outst_td_next == NULL) && (td->outst_td_prev == NULL)) { 2559 uhcip->uhci_outst_tds_head = NULL; 2560 uhcip->uhci_outst_tds_tail = NULL; 2561 } else if (td->outst_td_next == NULL) { 2562 td->outst_td_prev->outst_td_next = NULL; 2563 uhcip->uhci_outst_tds_tail = td->outst_td_prev; 2564 } else if (td->outst_td_prev == NULL) { 2565 td->outst_td_next->outst_td_prev = NULL; 2566 uhcip->uhci_outst_tds_head = td->outst_td_next; 2567 } else { 2568 td->outst_td_prev->outst_td_next = td->outst_td_next; 2569 td->outst_td_next->outst_td_prev = td->outst_td_prev; 2570 } 2571 2572 tmp_td = tw->tw_hctd_head; 2573 2574 if (tmp_td != td) { 2575 while (tmp_td->tw_td_next != td) { 2576 tmp_td = tmp_td->tw_td_next; 2577 } 2578 ASSERT(tmp_td); 2579 tmp_td->tw_td_next = td->tw_td_next; 2580 if (td->tw_td_next == NULL) { 2581 tw->tw_hctd_tail = tmp_td; 2582 } 2583 } else { 2584 tw->tw_hctd_head = tw->tw_hctd_head->tw_td_next; 2585 if (tw->tw_hctd_head == NULL) { 2586 tw->tw_hctd_tail = NULL; 2587 } 2588 } 2589 2590 td->flag = TD_FLAG_FREE; 2591 } 2592 2593 2594 void 2595 uhci_remove_tds_tws( 2596 uhci_state_t *uhcip, 2597 usba_pipe_handle_data_t *ph) 2598 { 2599 usb_opaque_t curr_reqp; 2600 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 2601 usb_ep_descr_t *ept = &pp->pp_pipe_handle->p_ep; 2602 uhci_trans_wrapper_t *tw_tmp; 2603 uhci_trans_wrapper_t *tw_head = pp->pp_tw_head; 2604 2605 while (tw_head != NULL) { 2606 tw_tmp = tw_head; 2607 tw_head = tw_head->tw_next; 2608 2609 curr_reqp = tw_tmp->tw_curr_xfer_reqp; 2610 if (curr_reqp) { 2611 /* do this for control/bulk/intr */ 2612 if ((tw_tmp->tw_direction == PID_IN) && 2613 (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_INTR)) { 2614 uhci_deallocate_periodic_in_resource(uhcip, 2615 pp, tw_tmp); 2616 } else { 2617 uhci_hcdi_callback(uhcip, pp, 2618 pp->pp_pipe_handle, tw_tmp, USB_CR_FLUSHED); 2619 } 2620 } /* end of curr_reqp */ 2621 2622 if (tw_tmp->tw_claim != UHCI_MODIFY_TD_BITS_CLAIMED) { 2623 continue; 2624 } 2625 2626 while (tw_tmp->tw_hctd_head != NULL) { 2627 uhci_delete_td(uhcip, tw_tmp->tw_hctd_head); 2628 } 2629 2630 uhci_deallocate_tw(uhcip, pp, tw_tmp); 2631 } 2632 } 2633 2634 2635 /* 2636 * uhci_remove_qh: 2637 * Remove the Queue Head from the Host Controller's 2638 * appropriate QH list. 2639 */ 2640 void 2641 uhci_remove_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 2642 { 2643 uhci_td_t *dummy_td; 2644 2645 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2646 2647 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2648 "uhci_remove_qh:"); 2649 2650 dummy_td = pp->pp_qh->td_tailp; 2651 dummy_td->flag = TD_FLAG_FREE; 2652 2653 switch (UHCI_XFER_TYPE(&pp->pp_pipe_handle->p_ep)) { 2654 case USB_EP_ATTR_CONTROL: 2655 uhci_remove_ctrl_qh(uhcip, pp); 2656 break; 2657 case USB_EP_ATTR_BULK: 2658 uhci_remove_bulk_qh(uhcip, pp); 2659 break; 2660 case USB_EP_ATTR_INTR: 2661 uhci_remove_intr_qh(uhcip, pp); 2662 break; 2663 } 2664 } 2665 2666 2667 static void 2668 uhci_remove_intr_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 2669 { 2670 queue_head_t *qh = pp->pp_qh; 2671 queue_head_t *next_lattice_qh = 2672 QH_VADDR(GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK); 2673 2674 qh->prev_qh->link_ptr = qh->link_ptr; 2675 next_lattice_qh->prev_qh = qh->prev_qh; 2676 qh->qh_flag = QUEUE_HEAD_FLAG_FREE; 2677 2678 } 2679 2680 /* 2681 * uhci_remove_bulk_qh: 2682 * Remove a bulk QH from the Host Controller's QH list. There may be a 2683 * loop for bulk QHs, we must care about this while removing a bulk QH. 2684 */ 2685 static void 2686 uhci_remove_bulk_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 2687 { 2688 queue_head_t *qh = pp->pp_qh; 2689 queue_head_t *next_lattice_qh; 2690 uint32_t paddr; 2691 2692 paddr = (GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK); 2693 next_lattice_qh = (qh == uhcip->uhci_bulk_xfers_q_tail) ? 2694 0 : QH_VADDR(paddr); 2695 2696 if ((qh == uhcip->uhci_bulk_xfers_q_tail) && 2697 (qh->prev_qh == uhcip->uhci_bulk_xfers_q_head)) { 2698 SetQH32(uhcip, qh->prev_qh->link_ptr, HC_END_OF_LIST); 2699 } else { 2700 qh->prev_qh->link_ptr = qh->link_ptr; 2701 } 2702 2703 if (next_lattice_qh == NULL) { 2704 uhcip->uhci_bulk_xfers_q_tail = qh->prev_qh; 2705 } else { 2706 next_lattice_qh->prev_qh = qh->prev_qh; 2707 } 2708 2709 qh->qh_flag = QUEUE_HEAD_FLAG_FREE; 2710 2711 } 2712 2713 2714 static void 2715 uhci_remove_ctrl_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 2716 { 2717 queue_head_t *qh = pp->pp_qh; 2718 queue_head_t *next_lattice_qh = 2719 QH_VADDR(GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK); 2720 2721 qh->prev_qh->link_ptr = qh->link_ptr; 2722 if (next_lattice_qh->prev_qh != NULL) { 2723 next_lattice_qh->prev_qh = qh->prev_qh; 2724 } else { 2725 uhcip->uhci_ctrl_xfers_q_tail = qh->prev_qh; 2726 } 2727 2728 qh->qh_flag = QUEUE_HEAD_FLAG_FREE; 2729 } 2730 2731 2732 /* 2733 * uhci_allocate_td_from_pool: 2734 * Allocate a Transfer Descriptor (TD) from the TD buffer pool. 2735 */ 2736 static uhci_td_t * 2737 uhci_allocate_td_from_pool(uhci_state_t *uhcip) 2738 { 2739 int index; 2740 uhci_td_t *td; 2741 2742 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2743 2744 /* 2745 * Search for a blank Transfer Descriptor (TD) 2746 * in the TD buffer pool. 2747 */ 2748 for (index = 0; index < uhci_td_pool_size; index ++) { 2749 if (uhcip->uhci_td_pool_addr[index].flag == TD_FLAG_FREE) { 2750 break; 2751 } 2752 } 2753 2754 if (index == uhci_td_pool_size) { 2755 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2756 "uhci_allocate_td_from_pool: TD exhausted"); 2757 2758 return (NULL); 2759 } 2760 2761 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2762 "uhci_allocate_td_from_pool: Allocated %d", index); 2763 2764 /* Create a new dummy for the end of the TD list */ 2765 td = &uhcip->uhci_td_pool_addr[index]; 2766 2767 /* Mark the newly allocated TD as a dummy */ 2768 td->flag = TD_FLAG_DUMMY; 2769 td->qh_td_prev = NULL; 2770 2771 return (td); 2772 } 2773 2774 2775 /* 2776 * uhci_insert_bulk_td: 2777 */ 2778 int 2779 uhci_insert_bulk_td( 2780 uhci_state_t *uhcip, 2781 usba_pipe_handle_data_t *ph, 2782 usb_bulk_req_t *req, 2783 usb_flags_t flags) 2784 { 2785 size_t length; 2786 uint_t mps; /* MaxPacketSize */ 2787 uint_t num_bulk_tds, i, j; 2788 uint32_t buf_offs; 2789 uhci_td_t *bulk_td_ptr; 2790 uhci_td_t *current_dummy, *tmp_td; 2791 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 2792 uhci_trans_wrapper_t *tw; 2793 uhci_bulk_isoc_xfer_t *bulk_xfer_info; 2794 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 2795 2796 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2797 "uhci_insert_bulk_td: req: 0x%p, flags = 0x%x", req, flags); 2798 2799 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2800 2801 /* 2802 * Create transfer wrapper 2803 */ 2804 if ((tw = uhci_create_transfer_wrapper(uhcip, pp, req->bulk_len, 2805 flags)) == NULL) { 2806 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2807 "uhci_insert_bulk_td: TW allocation failed"); 2808 2809 return (USB_NO_RESOURCES); 2810 } 2811 2812 tw->tw_bytes_xfered = 0; 2813 tw->tw_bytes_pending = req->bulk_len; 2814 tw->tw_handle_td = uhci_handle_bulk_td; 2815 tw->tw_handle_callback_value = (usb_opaque_t)req->bulk_data; 2816 tw->tw_timeout_cnt = req->bulk_timeout; 2817 tw->tw_data = req->bulk_data; 2818 tw->tw_curr_xfer_reqp = (usb_opaque_t)req; 2819 2820 /* Get the bulk pipe direction */ 2821 tw->tw_direction = (UHCI_XFER_DIR(&ph->p_ep) == USB_EP_DIR_OUT) ? 2822 PID_OUT : PID_IN; 2823 2824 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2825 "uhci_insert_bulk_td: direction: 0x%x", tw->tw_direction); 2826 2827 /* If the DATA OUT, copy the data into transfer buffer. */ 2828 if (tw->tw_direction == PID_OUT) { 2829 if (req->bulk_len) { 2830 ASSERT(req->bulk_data != NULL); 2831 2832 /* Copy the data into the message */ 2833 ddi_rep_put8(tw->tw_accesshandle, 2834 req->bulk_data->b_rptr, 2835 (uint8_t *)tw->tw_buf, 2836 req->bulk_len, DDI_DEV_AUTOINCR); 2837 } 2838 } 2839 2840 /* Get the max packet size. */ 2841 length = mps = pp->pp_pipe_handle->p_ep.wMaxPacketSize; 2842 2843 /* 2844 * Calculate number of TD's to insert in the current frame interval. 2845 * Max number TD's allowed (driver implementation) is 128 2846 * in one frame interval. Once all the TD's are completed 2847 * then the remaining TD's will be inserted into the lattice 2848 * in the uhci_handle_bulk_td(). 2849 */ 2850 if ((tw->tw_bytes_pending / mps) >= MAX_NUM_BULK_TDS_PER_XFER) { 2851 num_bulk_tds = MAX_NUM_BULK_TDS_PER_XFER; 2852 } else { 2853 num_bulk_tds = (tw->tw_bytes_pending / mps); 2854 2855 if (tw->tw_bytes_pending % mps || tw->tw_bytes_pending == 0) { 2856 num_bulk_tds++; 2857 length = (tw->tw_bytes_pending % mps); 2858 } 2859 } 2860 2861 /* 2862 * Allocate memory for the bulk xfer information structure 2863 */ 2864 if ((bulk_xfer_info = kmem_zalloc( 2865 sizeof (uhci_bulk_isoc_xfer_t), KM_NOSLEEP)) == NULL) { 2866 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2867 "uhci_insert_bulk_td: kmem_zalloc failed"); 2868 2869 /* Free the transfer wrapper */ 2870 uhci_deallocate_tw(uhcip, pp, tw); 2871 2872 return (USB_FAILURE); 2873 } 2874 2875 /* Allocate memory for the bulk TD's */ 2876 if (uhci_alloc_bulk_isoc_tds(uhcip, num_bulk_tds, bulk_xfer_info) != 2877 USB_SUCCESS) { 2878 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2879 "uhci_insert_bulk_td: alloc_bulk_isoc_tds failed"); 2880 2881 kmem_free(bulk_xfer_info, sizeof (uhci_bulk_isoc_xfer_t)); 2882 2883 /* Free the transfer wrapper */ 2884 uhci_deallocate_tw(uhcip, pp, tw); 2885 2886 return (USB_FAILURE); 2887 } 2888 2889 td_pool_ptr = &bulk_xfer_info->td_pools[0]; 2890 bulk_td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 2891 bulk_td_ptr[0].qh_td_prev = NULL; 2892 current_dummy = pp->pp_qh->td_tailp; 2893 buf_offs = 0; 2894 pp->pp_qh->bulk_xfer_info = bulk_xfer_info; 2895 2896 /* Fill up all the bulk TD's */ 2897 for (i = 0; i < bulk_xfer_info->num_pools; i++) { 2898 for (j = 0; j < (td_pool_ptr->num_tds - 1); j++) { 2899 uhci_fill_in_bulk_isoc_td(uhcip, &bulk_td_ptr[j], 2900 &bulk_td_ptr[j+1], BULKTD_PADDR(td_pool_ptr, 2901 &bulk_td_ptr[j+1]), ph, buf_offs, mps, tw); 2902 buf_offs += mps; 2903 } 2904 2905 /* fill in the last TD */ 2906 if (i == (bulk_xfer_info->num_pools - 1)) { 2907 uhci_fill_in_bulk_isoc_td(uhcip, &bulk_td_ptr[j], 2908 current_dummy, TD_PADDR(current_dummy), 2909 ph, buf_offs, length, tw); 2910 } else { 2911 /* fill in the TD at the tail of a pool */ 2912 tmp_td = &bulk_td_ptr[j]; 2913 td_pool_ptr = &bulk_xfer_info->td_pools[i + 1]; 2914 bulk_td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 2915 uhci_fill_in_bulk_isoc_td(uhcip, tmp_td, 2916 &bulk_td_ptr[0], BULKTD_PADDR(td_pool_ptr, 2917 &bulk_td_ptr[0]), ph, buf_offs, mps, tw); 2918 buf_offs += mps; 2919 } 2920 } 2921 2922 bulk_xfer_info->num_tds = num_bulk_tds; 2923 2924 /* 2925 * Point the end of the lattice tree to the start of the bulk xfers 2926 * queue head. This allows the HC to execute the same Queue Head/TD 2927 * in the same frame. There are some bulk devices, which NAKs after 2928 * completing each TD. As a result, the performance on such devices 2929 * is very bad. This loop will provide a chance to execute NAk'ed 2930 * bulk TDs again in the same frame. 2931 */ 2932 if (uhcip->uhci_pending_bulk_cmds++ == 0) { 2933 uhcip->uhci_bulk_xfers_q_tail->link_ptr = 2934 uhcip->uhci_bulk_xfers_q_head->link_ptr; 2935 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 2936 "uhci_insert_bulk_td: count = %d no tds %d", 2937 uhcip->uhci_pending_bulk_cmds, num_bulk_tds); 2938 } 2939 2940 /* Insert on the bulk queue head for the execution by HC */ 2941 SetQH32(uhcip, pp->pp_qh->element_ptr, 2942 bulk_xfer_info->td_pools[0].cookie.dmac_address); 2943 2944 return (USB_SUCCESS); 2945 } 2946 2947 2948 /* 2949 * uhci_fill_in_bulk_isoc_td 2950 * Fills the bulk/isoc TD 2951 * 2952 * offset - different meanings for bulk and isoc TDs: 2953 * starting offset into the TW buffer for a bulk TD 2954 * and the index into the isoc packet list for an isoc TD 2955 */ 2956 void 2957 uhci_fill_in_bulk_isoc_td(uhci_state_t *uhcip, uhci_td_t *current_td, 2958 uhci_td_t *next_td, 2959 uint32_t next_td_paddr, 2960 usba_pipe_handle_data_t *ph, 2961 uint_t offset, 2962 uint_t length, 2963 uhci_trans_wrapper_t *tw) 2964 { 2965 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 2966 usb_ep_descr_t *ept = &pp->pp_pipe_handle->p_ep; 2967 uint32_t buf_addr; 2968 2969 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2970 "uhci_fill_in_bulk_isoc_td: tw 0x%p offs 0x%x length 0x%x", 2971 tw, offset, length); 2972 2973 bzero((char *)current_td, sizeof (uhci_td_t)); 2974 SetTD32(uhcip, current_td->link_ptr, next_td_paddr | HC_DEPTH_FIRST); 2975 2976 switch (UHCI_XFER_TYPE(ept)) { 2977 case USB_EP_ATTR_ISOCH: 2978 if (((usb_isoc_req_t *)tw->tw_curr_xfer_reqp)->isoc_attributes 2979 & USB_ATTRS_SHORT_XFER_OK) { 2980 SetTD_spd(uhcip, current_td, 1); 2981 } 2982 break; 2983 case USB_EP_ATTR_BULK: 2984 if (((usb_bulk_req_t *)tw->tw_curr_xfer_reqp)->bulk_attributes 2985 & USB_ATTRS_SHORT_XFER_OK) { 2986 SetTD_spd(uhcip, current_td, 1); 2987 } 2988 break; 2989 } 2990 2991 mutex_enter(&ph->p_usba_device->usb_mutex); 2992 2993 SetTD_c_err(uhcip, current_td, UHCI_MAX_ERR_COUNT); 2994 SetTD_status(uhcip, current_td, UHCI_TD_ACTIVE); 2995 SetTD_ioc(uhcip, current_td, INTERRUPT_ON_COMPLETION); 2996 SetTD_mlen(uhcip, current_td, 2997 (length == 0) ? ZERO_LENGTH : (length - 1)); 2998 SetTD_dtogg(uhcip, current_td, pp->pp_data_toggle); 2999 SetTD_devaddr(uhcip, current_td, ph->p_usba_device->usb_addr); 3000 SetTD_endpt(uhcip, current_td, ph->p_ep.bEndpointAddress & 3001 END_POINT_ADDRESS_MASK); 3002 SetTD_PID(uhcip, current_td, tw->tw_direction); 3003 3004 /* Get the right buffer address for the current TD */ 3005 switch (UHCI_XFER_TYPE(ept)) { 3006 case USB_EP_ATTR_ISOCH: 3007 buf_addr = tw->tw_isoc_bufs[offset].cookie.dmac_address; 3008 break; 3009 case USB_EP_ATTR_BULK: 3010 buf_addr = uhci_get_tw_paddr_by_offs(uhcip, offset, 3011 length, tw); 3012 break; 3013 } 3014 SetTD32(uhcip, current_td->buffer_address, buf_addr); 3015 3016 /* 3017 * Adjust the data toggle. 3018 * The data toggle bit must always be 0 for isoc transfers. 3019 * And set the "iso" bit in the TD for isoc transfers. 3020 */ 3021 if (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_ISOCH) { 3022 pp->pp_data_toggle = 0; 3023 SetTD_iso(uhcip, current_td, 1); 3024 } else { 3025 ADJ_DATA_TOGGLE(pp); 3026 next_td->qh_td_prev = current_td; 3027 pp->pp_qh->td_tailp = next_td; 3028 } 3029 3030 current_td->outst_td_next = NULL; 3031 current_td->outst_td_prev = uhcip->uhci_outst_tds_tail; 3032 if (uhcip->uhci_outst_tds_head == NULL) { 3033 uhcip->uhci_outst_tds_head = current_td; 3034 } else { 3035 uhcip->uhci_outst_tds_tail->outst_td_next = current_td; 3036 } 3037 uhcip->uhci_outst_tds_tail = current_td; 3038 current_td->tw = tw; 3039 3040 if (tw->tw_hctd_head == NULL) { 3041 ASSERT(tw->tw_hctd_tail == NULL); 3042 tw->tw_hctd_head = current_td; 3043 tw->tw_hctd_tail = current_td; 3044 } else { 3045 /* Add the td to the end of the list */ 3046 tw->tw_hctd_tail->tw_td_next = current_td; 3047 tw->tw_hctd_tail = current_td; 3048 } 3049 3050 mutex_exit(&ph->p_usba_device->usb_mutex); 3051 } 3052 3053 3054 /* 3055 * uhci_alloc_bulk_isoc_tds: 3056 * - Allocates the isoc/bulk TD pools. It will allocate one whole 3057 * pool to store all the TDs if the system allows. Only when the 3058 * first allocation fails, it tries to allocate several small 3059 * pools with each pool limited in physical page size. 3060 */ 3061 static int 3062 uhci_alloc_bulk_isoc_tds( 3063 uhci_state_t *uhcip, 3064 uint_t num_tds, 3065 uhci_bulk_isoc_xfer_t *info) 3066 { 3067 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3068 "uhci_alloc_bulk_isoc_tds: num_tds: 0x%x info: 0x%p", 3069 num_tds, info); 3070 3071 info->num_pools = 1; 3072 /* allocate as a whole pool at the first time */ 3073 if (uhci_alloc_memory_for_tds(uhcip, num_tds, info) != 3074 USB_SUCCESS) { 3075 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3076 "alloc_memory_for_tds failed: num_tds %d num_pools %d", 3077 num_tds, info->num_pools); 3078 3079 /* reduce the td number per pool and alloc again */ 3080 info->num_pools = num_tds / UHCI_MAX_TD_NUM_PER_POOL; 3081 if (num_tds % UHCI_MAX_TD_NUM_PER_POOL) { 3082 info->num_pools++; 3083 } 3084 3085 if (uhci_alloc_memory_for_tds(uhcip, num_tds, info) != 3086 USB_SUCCESS) { 3087 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3088 "alloc_memory_for_tds failed: num_tds %d " 3089 "num_pools %d", num_tds, info->num_pools); 3090 3091 return (USB_NO_RESOURCES); 3092 } 3093 } 3094 3095 return (USB_SUCCESS); 3096 } 3097 3098 3099 /* 3100 * uhci_alloc_memory_for_tds: 3101 * - Allocates memory for the isoc/bulk td pools. 3102 */ 3103 static int 3104 uhci_alloc_memory_for_tds( 3105 uhci_state_t *uhcip, 3106 uint_t num_tds, 3107 uhci_bulk_isoc_xfer_t *info) 3108 { 3109 int result, i, j, err; 3110 size_t real_length; 3111 uint_t ccount, num; 3112 ddi_device_acc_attr_t dev_attr; 3113 uhci_bulk_isoc_td_pool_t *td_pool_ptr1, *td_pool_ptr2; 3114 3115 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3116 "uhci_alloc_memory_for_tds: num_tds: 0x%x info: 0x%p " 3117 "num_pools: %u", num_tds, info, info->num_pools); 3118 3119 /* The host controller will be little endian */ 3120 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 3121 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 3122 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 3123 3124 /* Allocate the TD pool structures */ 3125 if ((info->td_pools = kmem_zalloc( 3126 (sizeof (uhci_bulk_isoc_td_pool_t) * info->num_pools), 3127 KM_SLEEP)) == NULL) { 3128 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3129 "uhci_alloc_memory_for_tds: alloc td_pools failed"); 3130 3131 return (USB_FAILURE); 3132 } 3133 3134 for (i = 0; i < info->num_pools; i++) { 3135 if (info->num_pools == 1) { 3136 num = num_tds; 3137 } else if (i < (info->num_pools - 1)) { 3138 num = UHCI_MAX_TD_NUM_PER_POOL; 3139 } else { 3140 num = (num_tds % UHCI_MAX_TD_NUM_PER_POOL); 3141 } 3142 3143 td_pool_ptr1 = &info->td_pools[i]; 3144 3145 /* Allocate the bulk TD pool DMA handle */ 3146 if (ddi_dma_alloc_handle(uhcip->uhci_dip, 3147 &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 0, 3148 &td_pool_ptr1->dma_handle) != DDI_SUCCESS) { 3149 3150 for (j = 0; j < i; j++) { 3151 td_pool_ptr2 = &info->td_pools[j]; 3152 result = ddi_dma_unbind_handle( 3153 td_pool_ptr2->dma_handle); 3154 ASSERT(result == DDI_SUCCESS); 3155 ddi_dma_mem_free(&td_pool_ptr2->mem_handle); 3156 ddi_dma_free_handle(&td_pool_ptr2->dma_handle); 3157 } 3158 3159 kmem_free(info->td_pools, 3160 (sizeof (uhci_bulk_isoc_td_pool_t) * 3161 info->num_pools)); 3162 3163 return (USB_FAILURE); 3164 } 3165 3166 /* Allocate the memory for the bulk TD pool */ 3167 if (ddi_dma_mem_alloc(td_pool_ptr1->dma_handle, 3168 num * sizeof (uhci_td_t), &dev_attr, 3169 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0, 3170 &td_pool_ptr1->pool_addr, &real_length, 3171 &td_pool_ptr1->mem_handle) != DDI_SUCCESS) { 3172 3173 ddi_dma_free_handle(&td_pool_ptr1->dma_handle); 3174 3175 for (j = 0; j < i; j++) { 3176 td_pool_ptr2 = &info->td_pools[j]; 3177 result = ddi_dma_unbind_handle( 3178 td_pool_ptr2->dma_handle); 3179 ASSERT(result == DDI_SUCCESS); 3180 ddi_dma_mem_free(&td_pool_ptr2->mem_handle); 3181 ddi_dma_free_handle(&td_pool_ptr2->dma_handle); 3182 } 3183 3184 kmem_free(info->td_pools, 3185 (sizeof (uhci_bulk_isoc_td_pool_t) * 3186 info->num_pools)); 3187 3188 return (USB_FAILURE); 3189 } 3190 3191 /* Map the bulk TD pool into the I/O address space */ 3192 result = ddi_dma_addr_bind_handle(td_pool_ptr1->dma_handle, 3193 NULL, (caddr_t)td_pool_ptr1->pool_addr, real_length, 3194 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 3195 &td_pool_ptr1->cookie, &ccount); 3196 3197 /* Process the result */ 3198 err = USB_SUCCESS; 3199 3200 if (result != DDI_DMA_MAPPED) { 3201 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3202 "uhci_allocate_memory_for_tds: Result = %d", 3203 result); 3204 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, 3205 result); 3206 3207 err = USB_FAILURE; 3208 } 3209 3210 if ((result == DDI_DMA_MAPPED) && (ccount != 1)) { 3211 /* The cookie count should be 1 */ 3212 USB_DPRINTF_L2(PRINT_MASK_ATTA, 3213 uhcip->uhci_log_hdl, 3214 "uhci_allocate_memory_for_tds: " 3215 "More than 1 cookie"); 3216 3217 result = ddi_dma_unbind_handle( 3218 td_pool_ptr1->dma_handle); 3219 ASSERT(result == DDI_SUCCESS); 3220 3221 err = USB_FAILURE; 3222 } 3223 3224 if (err == USB_FAILURE) { 3225 3226 ddi_dma_mem_free(&td_pool_ptr1->mem_handle); 3227 ddi_dma_free_handle(&td_pool_ptr1->dma_handle); 3228 3229 for (j = 0; j < i; j++) { 3230 td_pool_ptr2 = &info->td_pools[j]; 3231 result = ddi_dma_unbind_handle( 3232 td_pool_ptr2->dma_handle); 3233 ASSERT(result == DDI_SUCCESS); 3234 ddi_dma_mem_free(&td_pool_ptr2->mem_handle); 3235 ddi_dma_free_handle(&td_pool_ptr2->dma_handle); 3236 } 3237 3238 kmem_free(info->td_pools, 3239 (sizeof (uhci_bulk_isoc_td_pool_t) * 3240 info->num_pools)); 3241 3242 return (USB_FAILURE); 3243 } 3244 3245 bzero((void *)td_pool_ptr1->pool_addr, 3246 num * sizeof (uhci_td_t)); 3247 td_pool_ptr1->num_tds = num; 3248 } 3249 3250 return (USB_SUCCESS); 3251 } 3252 3253 3254 /* 3255 * uhci_handle_bulk_td: 3256 * 3257 * Handles the completed bulk transfer descriptors 3258 */ 3259 void 3260 uhci_handle_bulk_td(uhci_state_t *uhcip, uhci_td_t *td) 3261 { 3262 uint_t num_bulk_tds, index, td_count, j; 3263 usb_cr_t error; 3264 uint_t length, bytes_xfered; 3265 ushort_t MaxPacketSize; 3266 uint32_t buf_offs, paddr; 3267 uhci_td_t *bulk_td_ptr, *current_dummy, *td_head; 3268 uhci_td_t *tmp_td; 3269 queue_head_t *qh, *next_qh; 3270 uhci_trans_wrapper_t *tw = td->tw; 3271 uhci_pipe_private_t *pp = tw->tw_pipe_private; 3272 uhci_bulk_isoc_xfer_t *bulk_xfer_info; 3273 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 3274 usba_pipe_handle_data_t *ph; 3275 3276 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3277 "uhci_handle_bulk_td: td = 0x%p tw = 0x%p", td, tw); 3278 3279 /* 3280 * Update the tw_bytes_pending, and tw_bytes_xfered 3281 */ 3282 bytes_xfered = ZERO_LENGTH; 3283 3284 /* 3285 * Check whether there are any errors occurred in the xfer. 3286 * If so, update the data_toggle for the queue head and 3287 * return error to the upper layer. 3288 */ 3289 if (GetTD_status(uhcip, td) & TD_STATUS_MASK) { 3290 uhci_handle_bulk_td_errors(uhcip, td); 3291 3292 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3293 "uhci_handle_bulk_td: error; data toggle: 0x%x", 3294 pp->pp_data_toggle); 3295 3296 return; 3297 } 3298 3299 /* 3300 * Update the tw_bytes_pending, and tw_bytes_xfered 3301 */ 3302 bytes_xfered = GetTD_alen(uhcip, td); 3303 if (bytes_xfered != ZERO_LENGTH) { 3304 tw->tw_bytes_pending -= (bytes_xfered + 1); 3305 tw->tw_bytes_xfered += (bytes_xfered + 1); 3306 } 3307 3308 /* 3309 * Get Bulk pipe information and pipe handle 3310 */ 3311 bulk_xfer_info = pp->pp_qh->bulk_xfer_info; 3312 ph = tw->tw_pipe_private->pp_pipe_handle; 3313 3314 /* 3315 * Check whether data underrun occurred. 3316 * If so, complete the transfer 3317 * Update the data toggle bit 3318 */ 3319 if (bytes_xfered != GetTD_mlen(uhcip, td)) { 3320 bulk_xfer_info->num_tds = 1; 3321 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3322 "uhci_handle_bulk_td: Data underrun occured"); 3323 3324 pp->pp_data_toggle = GetTD_dtogg(uhcip, td) == 0 ? 1 : 0; 3325 } 3326 3327 /* 3328 * If the TD's in the current frame are completed, then check 3329 * whether we have any more bytes to xfer. If so, insert TD's. 3330 * If no more bytes needs to be transferred, then do callback to the 3331 * upper layer. 3332 * If the TD's in the current frame are not completed, then 3333 * just delete the TD from the linked lists. 3334 */ 3335 USB_DPRINTF_L3(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3336 "uhci_handle_bulk_td: completed TD data toggle: 0x%x", 3337 GetTD_dtogg(uhcip, td)); 3338 3339 if (--bulk_xfer_info->num_tds == 0) { 3340 uhci_delete_td(uhcip, td); 3341 3342 if ((tw->tw_bytes_pending) && 3343 (GetTD_mlen(uhcip, td) - GetTD_alen(uhcip, td) == 0)) { 3344 3345 MaxPacketSize = pp->pp_pipe_handle->p_ep.wMaxPacketSize; 3346 length = MaxPacketSize; 3347 3348 qh = pp->pp_qh; 3349 paddr = GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK; 3350 if (GetQH32(uhcip, qh->link_ptr) != 3351 GetQH32(uhcip, 3352 uhcip->uhci_bulk_xfers_q_head->link_ptr)) { 3353 next_qh = QH_VADDR(paddr); 3354 SetQH32(uhcip, qh->prev_qh->link_ptr, 3355 paddr|(0x2)); 3356 next_qh->prev_qh = qh->prev_qh; 3357 SetQH32(uhcip, qh->link_ptr, 3358 GetQH32(uhcip, 3359 uhcip->uhci_bulk_xfers_q_head->link_ptr)); 3360 qh->prev_qh = uhcip->uhci_bulk_xfers_q_tail; 3361 SetQH32(uhcip, 3362 uhcip->uhci_bulk_xfers_q_tail->link_ptr, 3363 QH_PADDR(qh) | 0x2); 3364 uhcip->uhci_bulk_xfers_q_tail = qh; 3365 } 3366 3367 if ((tw->tw_bytes_pending / MaxPacketSize) >= 3368 MAX_NUM_BULK_TDS_PER_XFER) { 3369 num_bulk_tds = MAX_NUM_BULK_TDS_PER_XFER; 3370 } else { 3371 num_bulk_tds = 3372 (tw->tw_bytes_pending / MaxPacketSize); 3373 if (tw->tw_bytes_pending % MaxPacketSize) { 3374 num_bulk_tds++; 3375 length = (tw->tw_bytes_pending % 3376 MaxPacketSize); 3377 } 3378 } 3379 3380 current_dummy = pp->pp_qh->td_tailp; 3381 td_pool_ptr = &bulk_xfer_info->td_pools[0]; 3382 bulk_td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 3383 buf_offs = tw->tw_bytes_xfered; 3384 td_count = num_bulk_tds; 3385 index = 0; 3386 3387 /* reuse the TDs to transfer more data */ 3388 while (td_count > 0) { 3389 for (j = 0; 3390 (j < (td_pool_ptr->num_tds - 1)) && 3391 (td_count > 1); j++, td_count--) { 3392 uhci_fill_in_bulk_isoc_td(uhcip, 3393 &bulk_td_ptr[j], &bulk_td_ptr[j+1], 3394 BULKTD_PADDR(td_pool_ptr, 3395 &bulk_td_ptr[j+1]), ph, buf_offs, 3396 MaxPacketSize, tw); 3397 buf_offs += MaxPacketSize; 3398 } 3399 3400 if (td_count == 1) { 3401 uhci_fill_in_bulk_isoc_td(uhcip, 3402 &bulk_td_ptr[j], current_dummy, 3403 TD_PADDR(current_dummy), ph, 3404 buf_offs, length, tw); 3405 3406 break; 3407 } else { 3408 tmp_td = &bulk_td_ptr[j]; 3409 ASSERT(index < 3410 (bulk_xfer_info->num_pools - 1)); 3411 td_pool_ptr = &bulk_xfer_info-> 3412 td_pools[index + 1]; 3413 bulk_td_ptr = (uhci_td_t *) 3414 td_pool_ptr->pool_addr; 3415 uhci_fill_in_bulk_isoc_td(uhcip, 3416 tmp_td, &bulk_td_ptr[0], 3417 BULKTD_PADDR(td_pool_ptr, 3418 &bulk_td_ptr[0]), ph, buf_offs, 3419 MaxPacketSize, tw); 3420 buf_offs += MaxPacketSize; 3421 td_count--; 3422 index++; 3423 } 3424 } 3425 3426 pp->pp_qh->bulk_xfer_info = bulk_xfer_info; 3427 bulk_xfer_info->num_tds = num_bulk_tds; 3428 SetQH32(uhcip, pp->pp_qh->element_ptr, 3429 bulk_xfer_info->td_pools[0].cookie.dmac_address); 3430 } else { 3431 usba_pipe_handle_data_t *usb_pp = pp->pp_pipe_handle; 3432 3433 pp->pp_qh->bulk_xfer_info = NULL; 3434 3435 if (tw->tw_bytes_pending) { 3436 /* Update the element pointer */ 3437 SetQH32(uhcip, pp->pp_qh->element_ptr, 3438 TD_PADDR(pp->pp_qh->td_tailp)); 3439 3440 /* Remove all the tds */ 3441 td_head = tw->tw_hctd_head; 3442 while (td_head != NULL) { 3443 uhci_delete_td(uhcip, td_head); 3444 td_head = tw->tw_hctd_head; 3445 } 3446 } 3447 3448 if (tw->tw_direction == PID_IN) { 3449 usb_req_attrs_t attrs = ((usb_bulk_req_t *) 3450 tw->tw_curr_xfer_reqp)->bulk_attributes; 3451 3452 error = USB_CR_OK; 3453 3454 /* Data run occurred */ 3455 if (tw->tw_bytes_pending && 3456 (!(attrs & USB_ATTRS_SHORT_XFER_OK))) { 3457 error = USB_CR_DATA_UNDERRUN; 3458 } 3459 3460 uhci_sendup_td_message(uhcip, error, tw); 3461 } else { 3462 uhci_do_byte_stats(uhcip, tw->tw_length, 3463 usb_pp->p_ep.bmAttributes, 3464 usb_pp->p_ep.bEndpointAddress); 3465 3466 /* Data underrun occurred */ 3467 if (tw->tw_bytes_pending) { 3468 3469 tw->tw_data->b_rptr += 3470 tw->tw_bytes_xfered; 3471 3472 USB_DPRINTF_L2(PRINT_MASK_ATTA, 3473 uhcip->uhci_log_hdl, 3474 "uhci_handle_bulk_td: " 3475 "data underrun occurred"); 3476 3477 uhci_hcdi_callback(uhcip, pp, 3478 tw->tw_pipe_private->pp_pipe_handle, 3479 tw, USB_CR_DATA_UNDERRUN); 3480 } else { 3481 uhci_hcdi_callback(uhcip, pp, 3482 tw->tw_pipe_private->pp_pipe_handle, 3483 tw, USB_CR_OK); 3484 } 3485 } /* direction */ 3486 3487 /* Deallocate DMA memory */ 3488 uhci_deallocate_tw(uhcip, pp, tw); 3489 for (j = 0; j < bulk_xfer_info->num_pools; j++) { 3490 td_pool_ptr = &bulk_xfer_info->td_pools[j]; 3491 (void) ddi_dma_unbind_handle( 3492 td_pool_ptr->dma_handle); 3493 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 3494 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 3495 } 3496 kmem_free(bulk_xfer_info->td_pools, 3497 (sizeof (uhci_bulk_isoc_td_pool_t) * 3498 bulk_xfer_info->num_pools)); 3499 kmem_free(bulk_xfer_info, 3500 sizeof (uhci_bulk_isoc_xfer_t)); 3501 3502 /* 3503 * When there are no pending bulk commands, point the 3504 * end of the lattice tree to NULL. This will make sure 3505 * that the HC control does not loop anymore and PCI 3506 * bus is not affected. 3507 */ 3508 if (--uhcip->uhci_pending_bulk_cmds == 0) { 3509 uhcip->uhci_bulk_xfers_q_tail->link_ptr = 3510 HC_END_OF_LIST; 3511 USB_DPRINTF_L3(PRINT_MASK_ATTA, 3512 uhcip->uhci_log_hdl, 3513 "uhci_handle_bulk_td: count = %d", 3514 uhcip->uhci_pending_bulk_cmds); 3515 } 3516 } 3517 } else { 3518 uhci_delete_td(uhcip, td); 3519 } 3520 } 3521 3522 3523 void 3524 uhci_handle_bulk_td_errors(uhci_state_t *uhcip, uhci_td_t *td) 3525 { 3526 usb_cr_t usb_err; 3527 uint32_t paddr_tail, element_ptr, paddr; 3528 uhci_td_t *next_td; 3529 uhci_pipe_private_t *pp; 3530 uhci_trans_wrapper_t *tw = td->tw; 3531 usba_pipe_handle_data_t *ph; 3532 uhci_bulk_isoc_td_pool_t *td_pool_ptr = NULL; 3533 3534 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3535 "uhci_handle_bulk_td_errors: td = %p", (void *)td); 3536 3537 #ifdef DEBUG 3538 uhci_print_td(uhcip, td); 3539 #endif 3540 3541 tw = td->tw; 3542 ph = tw->tw_pipe_private->pp_pipe_handle; 3543 pp = (uhci_pipe_private_t *)ph->p_hcd_private; 3544 3545 /* 3546 * Find the type of error occurred and return the error 3547 * to the upper layer. And adjust the data toggle. 3548 */ 3549 element_ptr = GetQH32(uhcip, pp->pp_qh->element_ptr) & 3550 QH_ELEMENT_PTR_MASK; 3551 paddr_tail = TD_PADDR(pp->pp_qh->td_tailp); 3552 3553 /* 3554 * If a timeout occurs before a transfer has completed, 3555 * the timeout handler sets the CRC/Timeout bit and clears the Active 3556 * bit in the link_ptr for each td in the transfer. 3557 * It then waits (at least) 1 ms so that any tds the controller might 3558 * have been executing will have completed. 3559 * So at this point element_ptr will point to either: 3560 * 1) the next td for the transfer (which has not been executed, 3561 * and has the CRC/Timeout status bit set and Active bit cleared), 3562 * 2) the dummy td for this qh. 3563 * So if the element_ptr does not point to the dummy td, we know 3564 * it points to the next td that would have been executed. 3565 * That td has the data toggle we want to save. 3566 * All outstanding tds have been marked as CRC/Timeout, 3567 * so it doesn't matter which td we pass to uhci_parse_td_error 3568 * for the error status. 3569 */ 3570 if (element_ptr != paddr_tail) { 3571 paddr = (element_ptr & QH_ELEMENT_PTR_MASK); 3572 uhci_get_bulk_td_by_paddr(uhcip, pp->pp_qh->bulk_xfer_info, 3573 paddr, &td_pool_ptr); 3574 next_td = BULKTD_VADDR(td_pool_ptr, paddr); 3575 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3576 "uhci_handle_bulk_td_errors: next td = %p", 3577 (void *)next_td); 3578 3579 usb_err = uhci_parse_td_error(uhcip, pp, next_td); 3580 } else { 3581 usb_err = uhci_parse_td_error(uhcip, pp, td); 3582 } 3583 3584 /* 3585 * Update the link pointer. 3586 */ 3587 SetQH32(uhcip, pp->pp_qh->element_ptr, TD_PADDR(pp->pp_qh->td_tailp)); 3588 3589 /* 3590 * Send up number of bytes transferred before the error condition. 3591 */ 3592 if ((tw->tw_direction == PID_OUT) && tw->tw_data) { 3593 tw->tw_data->b_rptr += tw->tw_bytes_xfered; 3594 } 3595 3596 uhci_remove_bulk_tds_tws(uhcip, tw->tw_pipe_private, UHCI_IN_ERROR); 3597 3598 /* 3599 * When there are no pending bulk commands, point the end of the 3600 * lattice tree to NULL. This will make sure that the HC control 3601 * does not loop anymore and PCI bus is not affected. 3602 */ 3603 if (--uhcip->uhci_pending_bulk_cmds == 0) { 3604 uhcip->uhci_bulk_xfers_q_tail->link_ptr = HC_END_OF_LIST; 3605 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3606 "uhci_handle_bulk_td_errors: count = %d", 3607 uhcip->uhci_pending_bulk_cmds); 3608 } 3609 3610 uhci_hcdi_callback(uhcip, pp, ph, tw, usb_err); 3611 uhci_deallocate_tw(uhcip, pp, tw); 3612 } 3613 3614 3615 /* 3616 * uhci_get_bulk_td_by_paddr: 3617 * Obtain the address of the TD pool the physical address falls in. 3618 * 3619 * td_pool_pp - pointer to the address of the TD pool containing the paddr 3620 */ 3621 /* ARGSUSED */ 3622 static void 3623 uhci_get_bulk_td_by_paddr( 3624 uhci_state_t *uhcip, 3625 uhci_bulk_isoc_xfer_t *info, 3626 uint32_t paddr, 3627 uhci_bulk_isoc_td_pool_t **td_pool_pp) 3628 { 3629 uint_t i = 0; 3630 3631 while (i < info->num_pools) { 3632 *td_pool_pp = &info->td_pools[i]; 3633 if (((*td_pool_pp)->cookie.dmac_address <= paddr) && 3634 (((*td_pool_pp)->cookie.dmac_address + 3635 (*td_pool_pp)->cookie.dmac_size) > paddr)) { 3636 3637 break; 3638 } 3639 i++; 3640 } 3641 3642 ASSERT(i < info->num_pools); 3643 } 3644 3645 3646 void 3647 uhci_remove_bulk_tds_tws( 3648 uhci_state_t *uhcip, 3649 uhci_pipe_private_t *pp, 3650 int what) 3651 { 3652 uint_t rval, i; 3653 uhci_td_t *head; 3654 uhci_td_t *head_next; 3655 usb_opaque_t curr_reqp; 3656 uhci_bulk_isoc_xfer_t *info; 3657 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 3658 3659 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 3660 3661 if ((info = pp->pp_qh->bulk_xfer_info) == NULL) { 3662 3663 return; 3664 } 3665 3666 head = uhcip->uhci_outst_tds_head; 3667 3668 while (head) { 3669 uhci_trans_wrapper_t *tw_tmp = head->tw; 3670 head_next = head->outst_td_next; 3671 3672 if (pp->pp_qh == tw_tmp->tw_pipe_private->pp_qh) { 3673 curr_reqp = tw_tmp->tw_curr_xfer_reqp; 3674 if (curr_reqp && 3675 ((what == UHCI_IN_CLOSE) || 3676 (what == UHCI_IN_RESET))) { 3677 uhci_hcdi_callback(uhcip, pp, 3678 pp->pp_pipe_handle, 3679 tw_tmp, USB_CR_FLUSHED); 3680 } /* end of curr_reqp */ 3681 3682 uhci_delete_td(uhcip, head); 3683 3684 if (what == UHCI_IN_CLOSE || what == UHCI_IN_RESET) { 3685 ASSERT(info->num_tds > 0); 3686 if (--info->num_tds == 0) { 3687 uhci_deallocate_tw(uhcip, pp, tw_tmp); 3688 3689 /* 3690 * This will make sure that the HC 3691 * does not loop anymore when there 3692 * are no pending bulk commands. 3693 */ 3694 if (--uhcip->uhci_pending_bulk_cmds 3695 == 0) { 3696 uhcip->uhci_bulk_xfers_q_tail-> 3697 link_ptr = HC_END_OF_LIST; 3698 USB_DPRINTF_L3(PRINT_MASK_ATTA, 3699 uhcip->uhci_log_hdl, 3700 "uhci_remove_bulk_tds_tws:" 3701 " count = %d", 3702 uhcip-> 3703 uhci_pending_bulk_cmds); 3704 } 3705 } 3706 } 3707 } 3708 3709 head = head_next; 3710 } 3711 3712 if (what == UHCI_IN_CLOSE || what == UHCI_IN_RESET) { 3713 ASSERT(info->num_tds == 0); 3714 } 3715 3716 for (i = 0; i < info->num_pools; i++) { 3717 td_pool_ptr = &info->td_pools[i]; 3718 rval = ddi_dma_unbind_handle(td_pool_ptr->dma_handle); 3719 ASSERT(rval == DDI_SUCCESS); 3720 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 3721 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 3722 } 3723 kmem_free(info->td_pools, (sizeof (uhci_bulk_isoc_td_pool_t) * 3724 info->num_pools)); 3725 kmem_free(info, sizeof (uhci_bulk_isoc_xfer_t)); 3726 pp->pp_qh->bulk_xfer_info = NULL; 3727 } 3728 3729 3730 /* 3731 * uhci_save_data_toggle () 3732 * Save the data toggle in the usba_device structure 3733 */ 3734 void 3735 uhci_save_data_toggle(uhci_pipe_private_t *pp) 3736 { 3737 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 3738 3739 /* Save the data toggle in the usb devices structure. */ 3740 mutex_enter(&ph->p_mutex); 3741 usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress, 3742 pp->pp_data_toggle); 3743 mutex_exit(&ph->p_mutex); 3744 } 3745 3746 /* 3747 * uhci_create_isoc_transfer_wrapper: 3748 * Create a Transaction Wrapper (TW) for isoc transfer. 3749 * This involves the allocating of DMA resources. 3750 * 3751 * For isoc transfers, one isoc transfer includes multiple packets 3752 * and each packet may have a different length. So each packet is 3753 * transfered by one TD. We only know the individual packet length 3754 * won't exceed 1023 bytes, but we don't know exactly the lengths. 3755 * It is hard to make one physically discontiguous DMA buffer which 3756 * can fit in all the TDs like what can be done to the ctrl/bulk/ 3757 * intr transfers. It is also undesirable to make one physically 3758 * contiguous DMA buffer for all the packets, since this may easily 3759 * fail when the system is in low memory. So an individual DMA 3760 * buffer is allocated for an individual isoc packet and each DMA 3761 * buffer is physically contiguous. An extra structure is allocated 3762 * to save the multiple DMA handles. 3763 */ 3764 static uhci_trans_wrapper_t * 3765 uhci_create_isoc_transfer_wrapper( 3766 uhci_state_t *uhcip, 3767 uhci_pipe_private_t *pp, 3768 usb_isoc_req_t *req, 3769 size_t length, 3770 usb_flags_t usb_flags) 3771 { 3772 int result; 3773 size_t real_length, strtlen, xfer_size; 3774 uhci_trans_wrapper_t *tw; 3775 ddi_device_acc_attr_t dev_attr; 3776 ddi_dma_attr_t dma_attr; 3777 int kmem_flag; 3778 int (*dmamem_wait)(caddr_t); 3779 uint_t i, j, ccount; 3780 usb_isoc_req_t *tmp_req = req; 3781 3782 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 3783 3784 if (UHCI_XFER_TYPE(&pp->pp_pipe_handle->p_ep) != USB_EP_ATTR_ISOCH) { 3785 3786 return (NULL); 3787 } 3788 3789 if ((req == NULL) && (UHCI_XFER_DIR(&pp->pp_pipe_handle->p_ep) == 3790 USB_EP_DIR_IN)) { 3791 tmp_req = (usb_isoc_req_t *)pp->pp_client_periodic_in_reqp; 3792 } 3793 3794 if (tmp_req == NULL) { 3795 3796 return (NULL); 3797 } 3798 3799 3800 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3801 "uhci_create_isoc_transfer_wrapper: length = 0x%lx flags = 0x%x", 3802 length, usb_flags); 3803 3804 /* SLEEP flag should not be used in interrupt context */ 3805 if (servicing_interrupt()) { 3806 kmem_flag = KM_NOSLEEP; 3807 dmamem_wait = DDI_DMA_DONTWAIT; 3808 } else { 3809 kmem_flag = KM_SLEEP; 3810 dmamem_wait = DDI_DMA_SLEEP; 3811 } 3812 3813 /* Allocate space for the transfer wrapper */ 3814 if ((tw = kmem_zalloc(sizeof (uhci_trans_wrapper_t), kmem_flag)) == 3815 NULL) { 3816 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3817 "uhci_create_isoc_transfer_wrapper: kmem_alloc failed"); 3818 3819 return (NULL); 3820 } 3821 3822 /* Allocate space for the isoc buffer handles */ 3823 strtlen = sizeof (uhci_isoc_buf_t) * tmp_req->isoc_pkts_count; 3824 if ((tw->tw_isoc_bufs = kmem_zalloc(strtlen, kmem_flag)) == NULL) { 3825 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3826 "uhci_create_isoc_transfer_wrapper: kmem_alloc " 3827 "isoc buffer failed"); 3828 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 3829 3830 return (NULL); 3831 } 3832 3833 bcopy(&uhcip->uhci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t)); 3834 dma_attr.dma_attr_sgllen = 1; 3835 3836 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 3837 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 3838 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 3839 3840 /* Store the transfer length */ 3841 tw->tw_length = length; 3842 3843 for (i = 0; i < tmp_req->isoc_pkts_count; i++) { 3844 tw->tw_isoc_bufs[i].index = i; 3845 3846 /* Allocate the DMA handle */ 3847 if ((result = ddi_dma_alloc_handle(uhcip->uhci_dip, &dma_attr, 3848 dmamem_wait, 0, &tw->tw_isoc_bufs[i].dma_handle)) != 3849 DDI_SUCCESS) { 3850 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3851 "uhci_create_isoc_transfer_wrapper: " 3852 "Alloc handle %d failed", i); 3853 3854 for (j = 0; j < i; j++) { 3855 result = ddi_dma_unbind_handle( 3856 tw->tw_isoc_bufs[j].dma_handle); 3857 ASSERT(result == USB_SUCCESS); 3858 ddi_dma_mem_free(&tw->tw_isoc_bufs[j]. 3859 mem_handle); 3860 ddi_dma_free_handle(&tw->tw_isoc_bufs[j]. 3861 dma_handle); 3862 } 3863 kmem_free(tw->tw_isoc_bufs, strtlen); 3864 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 3865 3866 return (NULL); 3867 } 3868 3869 /* Allocate the memory */ 3870 xfer_size = tmp_req->isoc_pkt_descr[i].isoc_pkt_length; 3871 if ((result = ddi_dma_mem_alloc(tw->tw_isoc_bufs[i].dma_handle, 3872 xfer_size, &dev_attr, DDI_DMA_CONSISTENT, dmamem_wait, 3873 NULL, (caddr_t *)&tw->tw_isoc_bufs[i].buf_addr, 3874 &real_length, &tw->tw_isoc_bufs[i].mem_handle)) != 3875 DDI_SUCCESS) { 3876 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3877 "uhci_create_isoc_transfer_wrapper: " 3878 "dma_mem_alloc %d fail", i); 3879 ddi_dma_free_handle(&tw->tw_isoc_bufs[i].dma_handle); 3880 3881 for (j = 0; j < i; j++) { 3882 result = ddi_dma_unbind_handle( 3883 tw->tw_isoc_bufs[j].dma_handle); 3884 ASSERT(result == USB_SUCCESS); 3885 ddi_dma_mem_free(&tw->tw_isoc_bufs[j]. 3886 mem_handle); 3887 ddi_dma_free_handle(&tw->tw_isoc_bufs[j]. 3888 dma_handle); 3889 } 3890 kmem_free(tw->tw_isoc_bufs, strtlen); 3891 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 3892 3893 return (NULL); 3894 } 3895 3896 ASSERT(real_length >= xfer_size); 3897 3898 /* Bind the handle */ 3899 result = ddi_dma_addr_bind_handle( 3900 tw->tw_isoc_bufs[i].dma_handle, NULL, 3901 (caddr_t)tw->tw_isoc_bufs[i].buf_addr, real_length, 3902 DDI_DMA_RDWR|DDI_DMA_CONSISTENT, dmamem_wait, NULL, 3903 &tw->tw_isoc_bufs[i].cookie, &ccount); 3904 3905 if ((result == DDI_DMA_MAPPED) && (ccount == 1)) { 3906 tw->tw_isoc_bufs[i].length = xfer_size; 3907 3908 continue; 3909 } else { 3910 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3911 "uhci_create_isoc_transfer_wrapper: " 3912 "Bind handle %d failed", i); 3913 if (result == DDI_DMA_MAPPED) { 3914 result = ddi_dma_unbind_handle( 3915 tw->tw_isoc_bufs[i].dma_handle); 3916 ASSERT(result == USB_SUCCESS); 3917 } 3918 ddi_dma_mem_free(&tw->tw_isoc_bufs[i].mem_handle); 3919 ddi_dma_free_handle(&tw->tw_isoc_bufs[i].dma_handle); 3920 3921 for (j = 0; j < i; j++) { 3922 result = ddi_dma_unbind_handle( 3923 tw->tw_isoc_bufs[j].dma_handle); 3924 ASSERT(result == USB_SUCCESS); 3925 ddi_dma_mem_free(&tw->tw_isoc_bufs[j]. 3926 mem_handle); 3927 ddi_dma_free_handle(&tw->tw_isoc_bufs[j]. 3928 dma_handle); 3929 } 3930 kmem_free(tw->tw_isoc_bufs, strtlen); 3931 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 3932 3933 return (NULL); 3934 } 3935 } 3936 3937 tw->tw_ncookies = tmp_req->isoc_pkts_count; 3938 tw->tw_isoc_strtlen = strtlen; 3939 3940 /* 3941 * Only allow one wrapper to be added at a time. Insert the 3942 * new transaction wrapper into the list for this pipe. 3943 */ 3944 if (pp->pp_tw_head == NULL) { 3945 pp->pp_tw_head = tw; 3946 pp->pp_tw_tail = tw; 3947 } else { 3948 pp->pp_tw_tail->tw_next = tw; 3949 pp->pp_tw_tail = tw; 3950 ASSERT(tw->tw_next == NULL); 3951 } 3952 3953 /* Store a back pointer to the pipe private structure */ 3954 tw->tw_pipe_private = pp; 3955 3956 /* Store the transfer type - synchronous or asynchronous */ 3957 tw->tw_flags = usb_flags; 3958 3959 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3960 "uhci_create_isoc_transfer_wrapper: tw = 0x%p, ncookies = %u", 3961 tw, tw->tw_ncookies); 3962 3963 return (tw); 3964 } 3965 3966 /* 3967 * uhci_insert_isoc_td: 3968 * - Create transfer wrapper 3969 * - Allocate memory for the isoc td's 3970 * - Fill up all the TD's and submit to the HC 3971 * - Update all the linked lists 3972 */ 3973 int 3974 uhci_insert_isoc_td( 3975 uhci_state_t *uhcip, 3976 usba_pipe_handle_data_t *ph, 3977 usb_isoc_req_t *isoc_req, 3978 size_t length, 3979 usb_flags_t flags) 3980 { 3981 int rval = USB_SUCCESS; 3982 int error; 3983 uint_t ddic; 3984 uint32_t i, j, index; 3985 uint32_t bytes_to_xfer; 3986 uint32_t expired_frames = 0; 3987 usb_frame_number_t start_frame, end_frame, current_frame; 3988 uhci_td_t *td_ptr; 3989 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 3990 uhci_trans_wrapper_t *tw; 3991 uhci_bulk_isoc_xfer_t *isoc_xfer_info; 3992 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 3993 3994 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 3995 "uhci_insert_isoc_td: ph = 0x%p isoc req = %p length = %lu", 3996 ph, (void *)isoc_req, length); 3997 3998 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 3999 4000 /* Allocate a transfer wrapper */ 4001 if ((tw = uhci_create_isoc_transfer_wrapper(uhcip, pp, isoc_req, 4002 length, flags)) == NULL) { 4003 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4004 "uhci_insert_isoc_td: TW allocation failed"); 4005 4006 return (USB_NO_RESOURCES); 4007 } 4008 4009 /* Save current isochronous request pointer */ 4010 tw->tw_curr_xfer_reqp = (usb_opaque_t)isoc_req; 4011 4012 /* 4013 * Initialize the transfer wrapper. These values are useful 4014 * for sending back the reply. 4015 */ 4016 tw->tw_handle_td = uhci_handle_isoc_td; 4017 tw->tw_handle_callback_value = NULL; 4018 tw->tw_direction = (UHCI_XFER_DIR(&ph->p_ep) == USB_EP_DIR_OUT) ? 4019 PID_OUT : PID_IN; 4020 4021 /* 4022 * If the transfer isoc send, then copy the data from the request 4023 * to the transfer wrapper. 4024 */ 4025 if ((tw->tw_direction == PID_OUT) && length) { 4026 uchar_t *p; 4027 4028 ASSERT(isoc_req->isoc_data != NULL); 4029 p = isoc_req->isoc_data->b_rptr; 4030 4031 /* Copy the data into the message */ 4032 for (i = 0; i < isoc_req->isoc_pkts_count; i++) { 4033 ddi_rep_put8(tw->tw_isoc_bufs[i].mem_handle, 4034 p, (uint8_t *)tw->tw_isoc_bufs[i].buf_addr, 4035 isoc_req->isoc_pkt_descr[i].isoc_pkt_length, 4036 DDI_DEV_AUTOINCR); 4037 p += isoc_req->isoc_pkt_descr[i].isoc_pkt_length; 4038 } 4039 } 4040 4041 if (tw->tw_direction == PID_IN) { 4042 if ((rval = uhci_allocate_periodic_in_resource(uhcip, pp, tw, 4043 flags)) != USB_SUCCESS) { 4044 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4045 "uhci_insert_isoc_td: isoc_req_t alloc failed"); 4046 uhci_deallocate_tw(uhcip, pp, tw); 4047 4048 return (rval); 4049 } 4050 4051 isoc_req = (usb_isoc_req_t *)tw->tw_curr_xfer_reqp; 4052 } 4053 4054 tw->tw_isoc_req = (usb_isoc_req_t *)tw->tw_curr_xfer_reqp; 4055 4056 /* Get the pointer to the isoc_xfer_info structure */ 4057 isoc_xfer_info = (uhci_bulk_isoc_xfer_t *)&tw->tw_xfer_info; 4058 isoc_xfer_info->num_tds = isoc_req->isoc_pkts_count; 4059 4060 /* 4061 * Allocate memory for isoc tds 4062 */ 4063 if ((rval = uhci_alloc_bulk_isoc_tds(uhcip, isoc_req->isoc_pkts_count, 4064 isoc_xfer_info)) != USB_SUCCESS) { 4065 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4066 "uhci_alloc_bulk_isoc_td: Memory allocation failure"); 4067 4068 if (tw->tw_direction == PID_IN) { 4069 uhci_deallocate_periodic_in_resource(uhcip, pp, tw); 4070 } 4071 uhci_deallocate_tw(uhcip, pp, tw); 4072 4073 return (rval); 4074 } 4075 4076 /* 4077 * Get the isoc td pool address, buffer address and 4078 * max packet size that the device supports. 4079 */ 4080 td_pool_ptr = &isoc_xfer_info->td_pools[0]; 4081 td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 4082 index = 0; 4083 4084 /* 4085 * Fill up the isoc tds 4086 */ 4087 USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4088 "uhci_insert_isoc_td : isoc pkts %d", isoc_req->isoc_pkts_count); 4089 4090 for (i = 0; i < isoc_xfer_info->num_pools; i++) { 4091 for (j = 0; j < td_pool_ptr->num_tds; j++) { 4092 bytes_to_xfer = 4093 isoc_req->isoc_pkt_descr[index].isoc_pkt_length; 4094 4095 uhci_fill_in_bulk_isoc_td(uhcip, &td_ptr[j], 4096 (uhci_td_t *)NULL, HC_END_OF_LIST, ph, index, 4097 bytes_to_xfer, tw); 4098 td_ptr[j].isoc_pkt_index = index; 4099 index++; 4100 } 4101 4102 if (i < (isoc_xfer_info->num_pools - 1)) { 4103 td_pool_ptr = &isoc_xfer_info->td_pools[i + 1]; 4104 td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 4105 } 4106 } 4107 4108 /* 4109 * Get the starting frame number. 4110 * The client drivers sets the flag USB_ATTRS_ISOC_XFER_ASAP to inform 4111 * the HCD to care of starting frame number. 4112 * 4113 * Following code is very time critical. So, perform atomic execution. 4114 */ 4115 ddic = ddi_enter_critical(); 4116 current_frame = uhci_get_sw_frame_number(uhcip); 4117 4118 if (isoc_req->isoc_attributes & USB_ATTRS_ISOC_START_FRAME) { 4119 start_frame = isoc_req->isoc_frame_no; 4120 end_frame = start_frame + isoc_req->isoc_pkts_count; 4121 4122 /* Check available frames */ 4123 if ((end_frame - current_frame) < UHCI_MAX_ISOC_FRAMES) { 4124 if (current_frame > start_frame) { 4125 if ((current_frame + FRNUM_OFFSET) < 4126 end_frame) { 4127 expired_frames = current_frame + 4128 FRNUM_OFFSET - start_frame; 4129 start_frame = current_frame + 4130 FRNUM_OFFSET; 4131 } else { 4132 rval = USB_INVALID_START_FRAME; 4133 } 4134 } 4135 } else { 4136 rval = USB_INVALID_START_FRAME; 4137 } 4138 4139 } else if (isoc_req->isoc_attributes & USB_ATTRS_ISOC_XFER_ASAP) { 4140 start_frame = pp->pp_frame_num; 4141 4142 if (start_frame == INVALID_FRNUM) { 4143 start_frame = current_frame + FRNUM_OFFSET; 4144 } else if (current_frame > start_frame) { 4145 start_frame = current_frame + FRNUM_OFFSET; 4146 } 4147 4148 end_frame = start_frame + isoc_req->isoc_pkts_count; 4149 isoc_req->isoc_frame_no = start_frame; 4150 4151 } 4152 4153 if (rval != USB_SUCCESS) { 4154 4155 /* Exit the critical */ 4156 ddi_exit_critical(ddic); 4157 4158 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4159 "uhci_insert_isoc_td: Invalid starting frame number"); 4160 4161 if (tw->tw_direction == PID_IN) { 4162 uhci_deallocate_periodic_in_resource(uhcip, pp, tw); 4163 } 4164 4165 while (tw->tw_hctd_head) { 4166 uhci_delete_td(uhcip, tw->tw_hctd_head); 4167 } 4168 4169 for (i = 0; i < isoc_xfer_info->num_pools; i++) { 4170 td_pool_ptr = &isoc_xfer_info->td_pools[i]; 4171 error = ddi_dma_unbind_handle(td_pool_ptr->dma_handle); 4172 ASSERT(error == DDI_SUCCESS); 4173 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 4174 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 4175 } 4176 kmem_free(isoc_xfer_info->td_pools, 4177 (sizeof (uhci_bulk_isoc_td_pool_t) * 4178 isoc_xfer_info->num_pools)); 4179 4180 uhci_deallocate_tw(uhcip, pp, tw); 4181 4182 return (rval); 4183 } 4184 4185 for (i = 0; i < expired_frames; i++) { 4186 isoc_req->isoc_pkt_descr[i].isoc_pkt_status = 4187 USB_CR_NOT_ACCESSED; 4188 isoc_req->isoc_pkt_descr[i].isoc_pkt_actual_length = 4189 isoc_req->isoc_pkt_descr[i].isoc_pkt_length; 4190 uhci_get_isoc_td_by_index(uhcip, isoc_xfer_info, i, 4191 &td_ptr, &td_pool_ptr); 4192 uhci_delete_td(uhcip, td_ptr); 4193 --isoc_xfer_info->num_tds; 4194 } 4195 4196 /* 4197 * Add the TD's to the HC list 4198 */ 4199 start_frame = (start_frame & 0x3ff); 4200 for (; i < isoc_req->isoc_pkts_count; i++) { 4201 uhci_get_isoc_td_by_index(uhcip, isoc_xfer_info, i, 4202 &td_ptr, &td_pool_ptr); 4203 if (uhcip->uhci_isoc_q_tailp[start_frame]) { 4204 td_ptr->isoc_prev = 4205 uhcip->uhci_isoc_q_tailp[start_frame]; 4206 td_ptr->isoc_next = NULL; 4207 td_ptr->link_ptr = 4208 uhcip->uhci_isoc_q_tailp[start_frame]->link_ptr; 4209 uhcip->uhci_isoc_q_tailp[start_frame]->isoc_next = 4210 td_ptr; 4211 SetTD32(uhcip, 4212 uhcip->uhci_isoc_q_tailp[start_frame]->link_ptr, 4213 ISOCTD_PADDR(td_pool_ptr, td_ptr)); 4214 uhcip->uhci_isoc_q_tailp[start_frame] = td_ptr; 4215 } else { 4216 uhcip->uhci_isoc_q_tailp[start_frame] = td_ptr; 4217 td_ptr->isoc_next = NULL; 4218 td_ptr->isoc_prev = NULL; 4219 SetTD32(uhcip, td_ptr->link_ptr, 4220 GetFL32(uhcip, 4221 uhcip->uhci_frame_lst_tablep[start_frame])); 4222 SetFL32(uhcip, 4223 uhcip->uhci_frame_lst_tablep[start_frame], 4224 ISOCTD_PADDR(td_pool_ptr, td_ptr)); 4225 } 4226 td_ptr->starting_frame = start_frame; 4227 4228 if (++start_frame == NUM_FRAME_LST_ENTRIES) 4229 start_frame = 0; 4230 } 4231 4232 ddi_exit_critical(ddic); 4233 pp->pp_frame_num = end_frame; 4234 4235 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4236 "uhci_insert_isoc_td: current frame number 0x%llx, pipe frame num" 4237 " 0x%llx", current_frame, pp->pp_frame_num); 4238 4239 return (rval); 4240 } 4241 4242 4243 /* 4244 * uhci_get_isoc_td_by_index: 4245 * Obtain the addresses of the TD pool and the TD at the index. 4246 * 4247 * tdpp - pointer to the address of the TD at the isoc packet index 4248 * td_pool_pp - pointer to the address of the TD pool containing 4249 * the specified TD 4250 */ 4251 /* ARGSUSED */ 4252 static void 4253 uhci_get_isoc_td_by_index( 4254 uhci_state_t *uhcip, 4255 uhci_bulk_isoc_xfer_t *info, 4256 uint_t index, 4257 uhci_td_t **tdpp, 4258 uhci_bulk_isoc_td_pool_t **td_pool_pp) 4259 { 4260 uint_t i = 0, j = 0; 4261 uhci_td_t *td_ptr; 4262 4263 while (j < info->num_pools) { 4264 if ((i + info->td_pools[j].num_tds) <= index) { 4265 i += info->td_pools[j].num_tds; 4266 j++; 4267 } else { 4268 i = index - i; 4269 4270 break; 4271 } 4272 } 4273 4274 ASSERT(j < info->num_pools); 4275 *td_pool_pp = &info->td_pools[j]; 4276 td_ptr = (uhci_td_t *)((*td_pool_pp)->pool_addr); 4277 *tdpp = &td_ptr[i]; 4278 } 4279 4280 4281 /* 4282 * uhci_handle_isoc_td: 4283 * Handles the completed isoc tds 4284 */ 4285 void 4286 uhci_handle_isoc_td(uhci_state_t *uhcip, uhci_td_t *td) 4287 { 4288 uint_t rval, i; 4289 uint32_t pkt_index = td->isoc_pkt_index; 4290 usb_cr_t cr; 4291 uhci_trans_wrapper_t *tw = td->tw; 4292 usb_isoc_req_t *isoc_req = (usb_isoc_req_t *)tw->tw_isoc_req; 4293 uhci_pipe_private_t *pp = tw->tw_pipe_private; 4294 uhci_bulk_isoc_xfer_t *isoc_xfer_info = &tw->tw_xfer_info; 4295 usba_pipe_handle_data_t *usb_pp; 4296 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 4297 4298 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4299 "uhci_handle_isoc_td: td = 0x%p, pp = 0x%p, tw = 0x%p, req = 0x%p, " 4300 "index = %x", td, pp, tw, isoc_req, pkt_index); 4301 4302 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4303 4304 usb_pp = pp->pp_pipe_handle; 4305 4306 /* 4307 * Check whether there are any errors occurred. If so, update error 4308 * count and return it to the upper.But never return a non zero 4309 * completion reason. 4310 */ 4311 cr = USB_CR_OK; 4312 if (GetTD_status(uhcip, td) & TD_STATUS_MASK) { 4313 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4314 "uhci_handle_isoc_td: Error Occurred: TD Status = %x", 4315 GetTD_status(uhcip, td)); 4316 isoc_req->isoc_error_count++; 4317 } 4318 4319 if (isoc_req != NULL) { 4320 isoc_req->isoc_pkt_descr[pkt_index].isoc_pkt_status = cr; 4321 isoc_req->isoc_pkt_descr[pkt_index].isoc_pkt_actual_length = 4322 (GetTD_alen(uhcip, td) == ZERO_LENGTH) ? 0 : 4323 GetTD_alen(uhcip, td) + 1; 4324 } 4325 4326 uhci_delete_isoc_td(uhcip, td); 4327 4328 if (--isoc_xfer_info->num_tds != 0) { 4329 USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4330 "uhci_handle_isoc_td: Number of TDs %d", 4331 isoc_xfer_info->num_tds); 4332 4333 return; 4334 } 4335 4336 tw->tw_claim = UHCI_INTR_HDLR_CLAIMED; 4337 if (tw->tw_direction == PID_IN) { 4338 uhci_sendup_td_message(uhcip, cr, tw); 4339 4340 if ((uhci_handle_isoc_receive(uhcip, pp, tw)) != USB_SUCCESS) { 4341 USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4342 "uhci_handle_isoc_td: Drop message"); 4343 } 4344 4345 } else { 4346 /* update kstats only for OUT. sendup_td_msg() does it for IN */ 4347 uhci_do_byte_stats(uhcip, tw->tw_length, 4348 usb_pp->p_ep.bmAttributes, usb_pp->p_ep.bEndpointAddress); 4349 4350 uhci_hcdi_callback(uhcip, pp, usb_pp, tw, USB_CR_OK); 4351 } 4352 4353 for (i = 0; i < isoc_xfer_info->num_pools; i++) { 4354 td_pool_ptr = &isoc_xfer_info->td_pools[i]; 4355 rval = ddi_dma_unbind_handle(td_pool_ptr->dma_handle); 4356 ASSERT(rval == DDI_SUCCESS); 4357 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 4358 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 4359 } 4360 kmem_free(isoc_xfer_info->td_pools, 4361 (sizeof (uhci_bulk_isoc_td_pool_t) * 4362 isoc_xfer_info->num_pools)); 4363 uhci_deallocate_tw(uhcip, pp, tw); 4364 } 4365 4366 4367 /* 4368 * uhci_handle_isoc_receive: 4369 * - Sends the isoc data to the client 4370 * - Inserts another isoc receive request 4371 */ 4372 static int 4373 uhci_handle_isoc_receive( 4374 uhci_state_t *uhcip, 4375 uhci_pipe_private_t *pp, 4376 uhci_trans_wrapper_t *tw) 4377 { 4378 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4379 "uhci_handle_isoc_receive: tw = 0x%p", tw); 4380 4381 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4382 4383 /* 4384 * -- check for pipe state being polling before 4385 * inserting a new request. Check when is TD 4386 * de-allocation being done? (so we can reuse the same TD) 4387 */ 4388 if (uhci_start_isoc_receive_polling(uhcip, 4389 pp->pp_pipe_handle, (usb_isoc_req_t *)tw->tw_curr_xfer_reqp, 4390 0) != USB_SUCCESS) { 4391 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4392 "uhci_handle_isoc_receive: receive polling failed"); 4393 4394 return (USB_FAILURE); 4395 } 4396 4397 return (USB_SUCCESS); 4398 } 4399 4400 4401 /* 4402 * uhci_delete_isoc_td: 4403 * - Delete from the outstanding command queue 4404 * - Delete from the tw queue 4405 * - Delete from the isoc queue 4406 * - Delete from the HOST CONTROLLER list 4407 */ 4408 static void 4409 uhci_delete_isoc_td(uhci_state_t *uhcip, uhci_td_t *td) 4410 { 4411 uint32_t starting_frame = td->starting_frame; 4412 4413 if ((td->isoc_next == NULL) && (td->isoc_prev == NULL)) { 4414 SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[starting_frame], 4415 GetTD32(uhcip, td->link_ptr)); 4416 uhcip->uhci_isoc_q_tailp[starting_frame] = 0; 4417 } else if (td->isoc_next == NULL) { 4418 td->isoc_prev->link_ptr = td->link_ptr; 4419 td->isoc_prev->isoc_next = NULL; 4420 uhcip->uhci_isoc_q_tailp[starting_frame] = td->isoc_prev; 4421 } else if (td->isoc_prev == NULL) { 4422 td->isoc_next->isoc_prev = NULL; 4423 SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[starting_frame], 4424 GetTD32(uhcip, td->link_ptr)); 4425 } else { 4426 td->isoc_prev->isoc_next = td->isoc_next; 4427 td->isoc_next->isoc_prev = td->isoc_prev; 4428 td->isoc_prev->link_ptr = td->link_ptr; 4429 } 4430 4431 uhci_delete_td(uhcip, td); 4432 } 4433 4434 4435 /* 4436 * uhci_send_isoc_receive 4437 * - Allocates usb_isoc_request 4438 * - Updates the isoc request 4439 * - Inserts the isoc td's into the HC processing list. 4440 */ 4441 int 4442 uhci_start_isoc_receive_polling( 4443 uhci_state_t *uhcip, 4444 usba_pipe_handle_data_t *ph, 4445 usb_isoc_req_t *isoc_req, 4446 usb_flags_t usb_flags) 4447 { 4448 int ii, error; 4449 size_t max_isoc_xfer_size, length; 4450 ushort_t isoc_pkt_count; 4451 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 4452 usb_isoc_pkt_descr_t *isoc_pkt_descr; 4453 4454 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4455 "uhci_start_isoc_receive_polling: usb_flags = %x", usb_flags); 4456 4457 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4458 4459 max_isoc_xfer_size = ph->p_ep.wMaxPacketSize * UHCI_MAX_ISOC_PKTS; 4460 4461 if (isoc_req) { 4462 isoc_pkt_descr = isoc_req->isoc_pkt_descr; 4463 isoc_pkt_count = isoc_req->isoc_pkts_count; 4464 } else { 4465 isoc_pkt_descr = ((usb_isoc_req_t *) 4466 pp->pp_client_periodic_in_reqp)->isoc_pkt_descr; 4467 isoc_pkt_count = ((usb_isoc_req_t *) 4468 pp->pp_client_periodic_in_reqp)->isoc_pkts_count; 4469 } 4470 4471 for (ii = 0, length = 0; ii < isoc_pkt_count; ii++) { 4472 length += isoc_pkt_descr->isoc_pkt_length; 4473 isoc_pkt_descr++; 4474 } 4475 4476 /* Check the size of isochronous request */ 4477 if (length > max_isoc_xfer_size) { 4478 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4479 "uhci_start_isoc_receive_polling: " 4480 "Max isoc request size = %lx, Given isoc req size = %lx", 4481 max_isoc_xfer_size, length); 4482 4483 return (USB_FAILURE); 4484 } 4485 4486 /* Add the TD into the Host Controller's isoc list */ 4487 error = uhci_insert_isoc_td(uhcip, ph, isoc_req, length, usb_flags); 4488 4489 return (error); 4490 } 4491 4492 4493 /* 4494 * uhci_remove_isoc_tds_tws 4495 * This routine scans the pipe and removes all the td's 4496 * and transfer wrappers and deallocates the memory 4497 * associated with those td's and tw's. 4498 */ 4499 void 4500 uhci_remove_isoc_tds_tws(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 4501 { 4502 uint_t rval, i; 4503 uhci_td_t *tmp_td, *td_head; 4504 usb_isoc_req_t *isoc_req; 4505 uhci_trans_wrapper_t *tmp_tw, *tw_head; 4506 uhci_bulk_isoc_xfer_t *isoc_xfer_info; 4507 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 4508 4509 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4510 "uhci_remove_isoc_tds_tws: pp = %p", (void *)pp); 4511 4512 tw_head = pp->pp_tw_head; 4513 while (tw_head) { 4514 tmp_tw = tw_head; 4515 tw_head = tw_head->tw_next; 4516 td_head = tmp_tw->tw_hctd_head; 4517 if (tmp_tw->tw_direction == PID_IN) { 4518 uhci_deallocate_periodic_in_resource(uhcip, pp, 4519 tmp_tw); 4520 } else if (tmp_tw->tw_direction == PID_OUT) { 4521 uhci_hcdi_callback(uhcip, pp, pp->pp_pipe_handle, 4522 tmp_tw, USB_CR_FLUSHED); 4523 } 4524 4525 while (td_head) { 4526 tmp_td = td_head; 4527 td_head = td_head->tw_td_next; 4528 uhci_delete_isoc_td(uhcip, tmp_td); 4529 } 4530 4531 isoc_req = (usb_isoc_req_t *)tmp_tw->tw_isoc_req; 4532 if (isoc_req) { 4533 usb_free_isoc_req(isoc_req); 4534 } 4535 4536 ASSERT(tmp_tw->tw_hctd_head == NULL); 4537 4538 if (tmp_tw->tw_xfer_info.td_pools) { 4539 isoc_xfer_info = 4540 (uhci_bulk_isoc_xfer_t *)&tmp_tw->tw_xfer_info; 4541 for (i = 0; i < isoc_xfer_info->num_pools; i++) { 4542 td_pool_ptr = &isoc_xfer_info->td_pools[i]; 4543 rval = ddi_dma_unbind_handle( 4544 td_pool_ptr->dma_handle); 4545 ASSERT(rval == DDI_SUCCESS); 4546 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 4547 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 4548 } 4549 kmem_free(isoc_xfer_info->td_pools, 4550 (sizeof (uhci_bulk_isoc_td_pool_t) * 4551 isoc_xfer_info->num_pools)); 4552 } 4553 4554 uhci_deallocate_tw(uhcip, pp, tmp_tw); 4555 } 4556 } 4557 4558 4559 /* 4560 * uhci_isoc_update_sw_frame_number() 4561 * to avoid code duplication, call uhci_get_sw_frame_number() 4562 */ 4563 void 4564 uhci_isoc_update_sw_frame_number(uhci_state_t *uhcip) 4565 { 4566 (void) uhci_get_sw_frame_number(uhcip); 4567 } 4568 4569 4570 /* 4571 * uhci_get_sw_frame_number: 4572 * Hold the uhci_int_mutex before calling this routine. 4573 */ 4574 uint64_t 4575 uhci_get_sw_frame_number(uhci_state_t *uhcip) 4576 { 4577 uint64_t sw_frnum, hw_frnum, current_frnum; 4578 4579 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4580 4581 sw_frnum = uhcip->uhci_sw_frnum; 4582 hw_frnum = Get_OpReg16(FRNUM); 4583 4584 /* 4585 * Check bit 10 in the software counter and hardware frame counter. 4586 * If both are same, then don't increment the software frame counter 4587 * (Bit 10 of hw frame counter toggle for every 1024 frames) 4588 * The lower 11 bits of software counter contains the hardware frame 4589 * counter value. The MSB (bit 10) of software counter is incremented 4590 * for every 1024 frames either here or in get frame number routine. 4591 */ 4592 if ((sw_frnum & UHCI_BIT_10_MASK) == (hw_frnum & UHCI_BIT_10_MASK)) { 4593 /* The MSB of hw counter did not toggle */ 4594 current_frnum = ((sw_frnum & (SW_FRNUM_MASK)) | hw_frnum); 4595 } else { 4596 /* 4597 * The hw counter wrapped around. And the interrupt handler 4598 * did not get a chance to update the sw frame counter. 4599 * So, update the sw frame counter and return correct frame no. 4600 */ 4601 sw_frnum >>= UHCI_SIZE_OF_HW_FRNUM - 1; 4602 current_frnum = 4603 ((++sw_frnum << (UHCI_SIZE_OF_HW_FRNUM - 1)) | hw_frnum); 4604 } 4605 uhcip->uhci_sw_frnum = current_frnum; 4606 4607 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 4608 "uhci_get_sw_frame_number: sw=%ld hd=%ld", 4609 uhcip->uhci_sw_frnum, hw_frnum); 4610 4611 return (current_frnum); 4612 } 4613 4614 4615 /* 4616 * uhci_cmd_timeout_hdlr: 4617 * This routine will get called for every second. It checks for 4618 * timed out control commands/bulk commands. Timeout any commands 4619 * that exceeds the time out period specified by the pipe policy. 4620 */ 4621 void 4622 uhci_cmd_timeout_hdlr(void *arg) 4623 { 4624 uint_t flag = B_FALSE; 4625 uhci_td_t *head, *tmp_td; 4626 uhci_state_t *uhcip = (uhci_state_t *)arg; 4627 uhci_pipe_private_t *pp; 4628 4629 /* 4630 * Check whether any of the control xfers are timed out. 4631 * If so, complete those commands with time out as reason. 4632 */ 4633 mutex_enter(&uhcip->uhci_int_mutex); 4634 head = uhcip->uhci_outst_tds_head; 4635 4636 while (head) { 4637 /* 4638 * If timeout out is zero, then dont timeout command. 4639 */ 4640 if (head->tw->tw_timeout_cnt == 0) { 4641 head = head->outst_td_next; 4642 continue; 4643 } 4644 4645 if (!(head->tw->tw_flags & TW_TIMEOUT_FLAG)) { 4646 head->tw->tw_flags |= TW_TIMEOUT_FLAG; 4647 --head->tw->tw_timeout_cnt; 4648 } 4649 4650 /* only do it for bulk and control TDs */ 4651 if ((head->tw->tw_timeout_cnt == 0) && 4652 (head->tw->tw_handle_td != uhci_handle_isoc_td)) { 4653 4654 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 4655 "Command timed out: td = %p", (void *)head); 4656 4657 head->tw->tw_claim = UHCI_TIMEOUT_HDLR_CLAIMED; 4658 4659 /* 4660 * Check finally whether the command completed 4661 */ 4662 if (GetTD_status(uhcip, head) & UHCI_TD_ACTIVE) { 4663 SetTD32(uhcip, head->link_ptr, 4664 GetTD32(uhcip, head->link_ptr) | 4665 HC_END_OF_LIST); 4666 pp = head->tw->tw_pipe_private; 4667 SetQH32(uhcip, pp->pp_qh->element_ptr, 4668 GetQH32(uhcip, pp->pp_qh->element_ptr) | 4669 HC_END_OF_LIST); 4670 } 4671 4672 flag = B_TRUE; 4673 } 4674 4675 head = head->outst_td_next; 4676 } 4677 4678 if (flag) { 4679 (void) uhci_wait_for_sof(uhcip); 4680 } 4681 4682 head = uhcip->uhci_outst_tds_head; 4683 while (head) { 4684 if (head->tw->tw_flags & TW_TIMEOUT_FLAG) { 4685 head->tw->tw_flags &= ~TW_TIMEOUT_FLAG; 4686 } 4687 if (head->tw->tw_claim == UHCI_TIMEOUT_HDLR_CLAIMED) { 4688 head->tw->tw_claim = UHCI_NOT_CLAIMED; 4689 tmp_td = head->tw->tw_hctd_head; 4690 while (tmp_td) { 4691 SetTD_status(uhcip, tmp_td, 4692 UHCI_TD_CRC_TIMEOUT); 4693 tmp_td = tmp_td->tw_td_next; 4694 } 4695 } 4696 head = head->outst_td_next; 4697 } 4698 4699 /* 4700 * Process the td which was completed before shifting from normal 4701 * mode to polled mode 4702 */ 4703 if (uhcip->uhci_polled_flag == UHCI_POLLED_FLAG_TRUE) { 4704 uhci_process_submitted_td_queue(uhcip); 4705 uhcip->uhci_polled_flag = UHCI_POLLED_FLAG_FALSE; 4706 } else if (flag) { 4707 /* Process the completed/timed out commands */ 4708 uhci_process_submitted_td_queue(uhcip); 4709 } 4710 4711 /* Re-register the control/bulk/intr commands' timeout handler */ 4712 if (uhcip->uhci_cmd_timeout_id) { 4713 uhcip->uhci_cmd_timeout_id = timeout(uhci_cmd_timeout_hdlr, 4714 (void *)uhcip, UHCI_ONE_SECOND); 4715 } 4716 4717 mutex_exit(&uhcip->uhci_int_mutex); 4718 } 4719 4720 4721 /* 4722 * uhci_wait_for_sof: 4723 * Wait for the start of the next frame (implying any changes made in the 4724 * lattice have now taken effect). 4725 * To be sure this is the case, we wait for the completion of the current 4726 * frame (which might have already been pending), then another complete 4727 * frame to ensure everything has taken effect. 4728 */ 4729 int 4730 uhci_wait_for_sof(uhci_state_t *uhcip) 4731 { 4732 int n; 4733 ushort_t cmd_reg; 4734 usb_frame_number_t before_frame_number, after_frame_number; 4735 clock_t time, rval; 4736 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 4737 "uhci_wait_for_sof: uhcip = %p", uhcip); 4738 4739 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4740 before_frame_number = uhci_get_sw_frame_number(uhcip); 4741 for (n = 0; n < MAX_SOF_WAIT_COUNT; n++) { 4742 SetTD_ioc(uhcip, uhcip->uhci_sof_td, 1); 4743 uhcip->uhci_cv_signal = B_TRUE; 4744 4745 time = ddi_get_lbolt() + UHCI_ONE_SECOND; 4746 rval = cv_timedwait(&uhcip->uhci_cv_SOF, 4747 &uhcip->uhci_int_mutex, time); 4748 4749 after_frame_number = uhci_get_sw_frame_number(uhcip); 4750 if ((rval == -1) && 4751 (after_frame_number <= before_frame_number)) { 4752 cmd_reg = Get_OpReg16(USBCMD); 4753 Set_OpReg16(USBCMD, (cmd_reg | USBCMD_REG_HC_RUN)); 4754 Set_OpReg16(USBINTR, ENABLE_ALL_INTRS); 4755 after_frame_number = uhci_get_sw_frame_number(uhcip); 4756 } 4757 before_frame_number = after_frame_number; 4758 } 4759 4760 SetTD_ioc(uhcip, uhcip->uhci_sof_td, 0); 4761 4762 return (uhcip->uhci_cv_signal ? USB_FAILURE : USB_SUCCESS); 4763 4764 } 4765 4766 /* 4767 * uhci_allocate_periodic_in_resource: 4768 * Allocate interrupt/isochronous request structure for the 4769 * interrupt/isochronous IN transfer. 4770 */ 4771 int 4772 uhci_allocate_periodic_in_resource( 4773 uhci_state_t *uhcip, 4774 uhci_pipe_private_t *pp, 4775 uhci_trans_wrapper_t *tw, 4776 usb_flags_t flags) 4777 { 4778 size_t length = 0; 4779 usb_opaque_t client_periodic_in_reqp; 4780 usb_intr_req_t *cur_intr_req; 4781 usb_isoc_req_t *curr_isoc_reqp; 4782 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 4783 4784 USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4785 "uhci_allocate_periodic_in_resource:\n\t" 4786 "ph = 0x%p, pp = 0x%p, tw = 0x%p, flags = 0x%x", ph, pp, tw, flags); 4787 4788 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4789 4790 /* Check the current periodic in request pointer */ 4791 if (tw->tw_curr_xfer_reqp) { 4792 USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4793 "uhci_allocate_periodic_in_resource: Interrupt " 4794 "request structure already exists: " 4795 "allocation failed"); 4796 4797 return (USB_SUCCESS); 4798 } 4799 4800 /* Get the client periodic in request pointer */ 4801 client_periodic_in_reqp = pp->pp_client_periodic_in_reqp; 4802 4803 /* 4804 * If it a periodic IN request and periodic request is NULL, 4805 * allocate corresponding usb periodic IN request for the 4806 * current periodic polling request and copy the information 4807 * from the saved periodic request structure. 4808 */ 4809 if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_INTR) { 4810 /* Get the interrupt transfer length */ 4811 length = ((usb_intr_req_t *)client_periodic_in_reqp)-> 4812 intr_len; 4813 4814 cur_intr_req = usba_hcdi_dup_intr_req(ph->p_dip, 4815 (usb_intr_req_t *)client_periodic_in_reqp, length, flags); 4816 if (cur_intr_req == NULL) { 4817 USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4818 "uhci_allocate_periodic_in_resource: Interrupt " 4819 "request structure allocation failed"); 4820 4821 return (USB_NO_RESOURCES); 4822 } 4823 4824 /* Check and save the timeout value */ 4825 tw->tw_timeout_cnt = (cur_intr_req->intr_attributes & 4826 USB_ATTRS_ONE_XFER) ? cur_intr_req->intr_timeout: 0; 4827 tw->tw_curr_xfer_reqp = (usb_opaque_t)cur_intr_req; 4828 tw->tw_length = cur_intr_req->intr_len; 4829 } else { 4830 ASSERT(client_periodic_in_reqp != NULL); 4831 4832 if ((curr_isoc_reqp = usba_hcdi_dup_isoc_req(ph->p_dip, 4833 (usb_isoc_req_t *)client_periodic_in_reqp, flags)) == 4834 NULL) { 4835 USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4836 "uhci_allocate_periodic_in_resource: Isochronous " 4837 "request structure allocation failed"); 4838 4839 return (USB_NO_RESOURCES); 4840 } 4841 4842 /* 4843 * Save the client's isochronous request pointer and 4844 * length of isochronous transfer in transfer wrapper. 4845 * The dup'ed request is saved in pp_client_periodic_in_reqp 4846 */ 4847 tw->tw_curr_xfer_reqp = 4848 (usb_opaque_t)pp->pp_client_periodic_in_reqp; 4849 pp->pp_client_periodic_in_reqp = (usb_opaque_t)curr_isoc_reqp; 4850 tw->tw_length = curr_isoc_reqp->isoc_pkts_length; 4851 } 4852 4853 mutex_enter(&ph->p_mutex); 4854 ph->p_req_count++; 4855 mutex_exit(&ph->p_mutex); 4856 4857 return (USB_SUCCESS); 4858 } 4859 4860 4861 /* 4862 * uhci_deallocate_periodic_in_resource: 4863 * Deallocate interrupt/isochronous request structure for the 4864 * interrupt/isochronous IN transfer. 4865 */ 4866 void 4867 uhci_deallocate_periodic_in_resource( 4868 uhci_state_t *uhcip, 4869 uhci_pipe_private_t *pp, 4870 uhci_trans_wrapper_t *tw) 4871 { 4872 usb_opaque_t curr_xfer_reqp; 4873 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 4874 4875 USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4876 "uhci_deallocate_periodic_in_resource: " 4877 "pp = 0x%p tw = 0x%p", pp, tw); 4878 4879 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4880 4881 curr_xfer_reqp = tw->tw_curr_xfer_reqp; 4882 if (curr_xfer_reqp) { 4883 /* 4884 * Reset periodic in request usb isoch 4885 * packet request pointers to null. 4886 */ 4887 tw->tw_curr_xfer_reqp = NULL; 4888 tw->tw_isoc_req = NULL; 4889 4890 mutex_enter(&ph->p_mutex); 4891 ph->p_req_count--; 4892 mutex_exit(&ph->p_mutex); 4893 4894 /* 4895 * Free pre-allocated interrupt or isochronous requests. 4896 */ 4897 switch (UHCI_XFER_TYPE(&ph->p_ep)) { 4898 case USB_EP_ATTR_INTR: 4899 usb_free_intr_req((usb_intr_req_t *)curr_xfer_reqp); 4900 break; 4901 case USB_EP_ATTR_ISOCH: 4902 usb_free_isoc_req((usb_isoc_req_t *)curr_xfer_reqp); 4903 break; 4904 } 4905 } 4906 } 4907 4908 4909 /* 4910 * uhci_hcdi_callback() 4911 * convenience wrapper around usba_hcdi_callback() 4912 */ 4913 void 4914 uhci_hcdi_callback(uhci_state_t *uhcip, uhci_pipe_private_t *pp, 4915 usba_pipe_handle_data_t *ph, uhci_trans_wrapper_t *tw, usb_cr_t cr) 4916 { 4917 usb_opaque_t curr_xfer_reqp; 4918 4919 USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4920 "uhci_hcdi_callback: ph = 0x%p, tw = 0x%p, cr = 0x%x", ph, tw, cr); 4921 4922 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4923 4924 if (tw && tw->tw_curr_xfer_reqp) { 4925 curr_xfer_reqp = tw->tw_curr_xfer_reqp; 4926 tw->tw_curr_xfer_reqp = NULL; 4927 tw->tw_isoc_req = NULL; 4928 } else { 4929 ASSERT(pp->pp_client_periodic_in_reqp != NULL); 4930 4931 curr_xfer_reqp = pp->pp_client_periodic_in_reqp; 4932 pp->pp_client_periodic_in_reqp = NULL; 4933 } 4934 4935 ASSERT(curr_xfer_reqp != NULL); 4936 4937 mutex_exit(&uhcip->uhci_int_mutex); 4938 usba_hcdi_cb(ph, curr_xfer_reqp, cr); 4939 mutex_enter(&uhcip->uhci_int_mutex); 4940 } 4941 4942 4943 #ifdef DEBUG 4944 static void 4945 uhci_print_td(uhci_state_t *uhcip, uhci_td_t *td) 4946 { 4947 uint_t *ptr = (uint_t *)td; 4948 4949 #ifndef lint 4950 _NOTE(NO_COMPETING_THREADS_NOW); 4951 #endif 4952 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 4953 "\tDWORD 1 0x%x\t DWORD 2 0x%x", ptr[0], ptr[1]); 4954 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 4955 "\tDWORD 3 0x%x\t DWORD 4 0x%x", ptr[2], ptr[3]); 4956 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 4957 "\tBytes xfered = %d", td->tw->tw_bytes_xfered); 4958 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 4959 "\tBytes Pending = %d", td->tw->tw_bytes_pending); 4960 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 4961 "Queue Head Details:"); 4962 uhci_print_qh(uhcip, td->tw->tw_pipe_private->pp_qh); 4963 4964 #ifndef lint 4965 _NOTE(COMPETING_THREADS_NOW); 4966 #endif 4967 } 4968 4969 4970 static void 4971 uhci_print_qh(uhci_state_t *uhcip, queue_head_t *qh) 4972 { 4973 uint_t *ptr = (uint_t *)qh; 4974 4975 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 4976 "\tLink Ptr = %x Element Ptr = %x", ptr[0], ptr[1]); 4977 } 4978 #endif 4979