1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 27 /* 28 * Universal Host Controller Driver (UHCI) 29 * 30 * The UHCI driver is a driver which interfaces to the Universal 31 * Serial Bus Driver (USBA) and the Host Controller (HC). The interface to 32 * the Host Controller is defined by the UHCI. 33 * This file contains misc functions. 34 */ 35 #include <sys/usb/hcd/uhci/uhcid.h> 36 #include <sys/usb/hcd/uhci/uhciutil.h> 37 #include <sys/usb/hcd/uhci/uhcipolled.h> 38 39 #include <sys/disp.h> 40 41 /* Globals */ 42 extern uint_t uhci_td_pool_size; /* Num TDs */ 43 extern uint_t uhci_qh_pool_size; /* Num QHs */ 44 extern ushort_t uhci_tree_bottom_nodes[]; 45 extern void *uhci_statep; 46 47 /* function prototypes */ 48 static void uhci_build_interrupt_lattice(uhci_state_t *uhcip); 49 static int uhci_init_frame_lst_table(dev_info_t *dip, uhci_state_t *uhcip); 50 51 static uint_t uhci_lattice_height(uint_t bandwidth); 52 static uint_t uhci_lattice_parent(uint_t node); 53 static uint_t uhci_leftmost_leaf(uint_t node, uint_t height); 54 static uint_t uhci_compute_total_bandwidth(usb_ep_descr_t *endpoint, 55 usb_port_status_t port_status); 56 57 static int uhci_bandwidth_adjust(uhci_state_t *uhcip, 58 usb_ep_descr_t *endpoint, usb_port_status_t port_status); 59 60 static uhci_td_t *uhci_allocate_td_from_pool(uhci_state_t *uhcip); 61 static void uhci_fill_in_td(uhci_state_t *uhcip, 62 uhci_td_t *td, uhci_td_t *current_dummy, 63 uint32_t buffer_offset, size_t length, 64 uhci_pipe_private_t *pp, uchar_t PID, 65 usb_req_attrs_t attrs, uhci_trans_wrapper_t *tw); 66 static uint32_t uhci_get_tw_paddr_by_offs(uhci_state_t *uhcip, 67 uint32_t buffer_offset, size_t length, 68 uhci_trans_wrapper_t *tw); 69 static uhci_trans_wrapper_t *uhci_create_transfer_wrapper( 70 uhci_state_t *uhcip, uhci_pipe_private_t *pp, 71 size_t length, usb_flags_t usb_flags); 72 static uhci_trans_wrapper_t *uhci_create_isoc_transfer_wrapper( 73 uhci_state_t *uhcip, uhci_pipe_private_t *pp, 74 usb_isoc_req_t *req, size_t length, 75 usb_flags_t usb_flags); 76 77 static int uhci_create_setup_pkt(uhci_state_t *uhcip, 78 uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw); 79 static void uhci_insert_ctrl_qh(uhci_state_t *uhcip, 80 uhci_pipe_private_t *pp); 81 static void uhci_remove_ctrl_qh(uhci_state_t *uhcip, 82 uhci_pipe_private_t *pp); 83 static void uhci_insert_intr_qh(uhci_state_t *uhcip, 84 uhci_pipe_private_t *pp); 85 static void uhci_remove_intr_qh(uhci_state_t *uhcip, 86 uhci_pipe_private_t *pp); 87 static void uhci_remove_bulk_qh(uhci_state_t *uhcip, 88 uhci_pipe_private_t *pp); 89 static void uhci_insert_bulk_qh(uhci_state_t *uhcip, 90 uhci_pipe_private_t *pp); 91 static void uhci_handle_bulk_td_errors(uhci_state_t *uhcip, uhci_td_t *td); 92 static int uhci_alloc_memory_for_tds(uhci_state_t *uhcip, uint_t num_tds, 93 uhci_bulk_isoc_xfer_t *info); 94 static int uhci_alloc_bulk_isoc_tds(uhci_state_t *uhcip, uint_t num_tds, 95 uhci_bulk_isoc_xfer_t *info); 96 static void uhci_get_isoc_td_by_index(uhci_state_t *uhcip, 97 uhci_bulk_isoc_xfer_t *info, uint_t index, 98 uhci_td_t **tdpp, uhci_bulk_isoc_td_pool_t **td_pool_pp); 99 static void uhci_get_bulk_td_by_paddr(uhci_state_t *uhcip, 100 uhci_bulk_isoc_xfer_t *info, uint32_t paddr, 101 uhci_bulk_isoc_td_pool_t **td_pool_pp); 102 103 static int uhci_handle_isoc_receive(uhci_state_t *uhcip, 104 uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw); 105 static void uhci_delete_isoc_td(uhci_state_t *uhcip, 106 uhci_td_t *td); 107 #ifdef DEBUG 108 static void uhci_print_td(uhci_state_t *uhcip, uhci_td_t *td); 109 static void uhci_print_qh(uhci_state_t *uhcip, queue_head_t *qh); 110 #endif 111 112 113 /* 114 * uhci_build_interrupt_lattice: 115 * 116 * Construct the interrupt lattice tree using static Queue Head pointers. 117 * This interrupt lattice tree will have total of 63 queue heads and the 118 * Host Controller (HC) processes queue heads every frame. 119 */ 120 static void 121 uhci_build_interrupt_lattice(uhci_state_t *uhcip) 122 { 123 int half_list = NUM_INTR_QH_LISTS / 2; 124 uint16_t i, j, k; 125 uhci_td_t *sof_td, *isoc_td; 126 uintptr_t addr; 127 queue_head_t *list_array = uhcip->uhci_qh_pool_addr; 128 queue_head_t *tmp_qh; 129 frame_lst_table_t *frame_lst_tablep = 130 uhcip->uhci_frame_lst_tablep; 131 132 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 133 "uhci_build_interrupt_lattice:"); 134 135 /* 136 * Reserve the first 63 queue head structures in the pool as static 137 * queue heads & these are required for constructing interrupt 138 * lattice tree. 139 */ 140 for (i = 0; i < NUM_INTR_QH_LISTS; i++) { 141 SetQH32(uhcip, list_array[i].link_ptr, HC_END_OF_LIST); 142 SetQH32(uhcip, list_array[i].element_ptr, HC_END_OF_LIST); 143 list_array[i].qh_flag = QUEUE_HEAD_FLAG_STATIC; 144 list_array[i].node = i; 145 } 146 147 /* Build the interrupt lattice tree */ 148 for (i = 0; i < half_list - 1; i++) { 149 /* 150 * The next pointer in the host controller queue head 151 * descriptor must contain an iommu address. Calculate 152 * the offset into the cpu address and add this to the 153 * starting iommu address. 154 */ 155 addr = QH_PADDR(&list_array[i]) | HC_QUEUE_HEAD; 156 157 SetQH32(uhcip, list_array[2*i + 1].link_ptr, addr); 158 SetQH32(uhcip, list_array[2*i + 2].link_ptr, addr); 159 } 160 161 /* 162 * Initialize the interrupt list in the Frame list Table 163 * so that it points to the bottom of the tree. 164 */ 165 for (i = 0, j = 0; i < pow_2(TREE_HEIGHT); i++) { 166 addr = QH_PADDR(&list_array[half_list + i - 1]); 167 for (k = 0; k < pow_2(VIRTUAL_TREE_HEIGHT); k++) { 168 SetFL32(uhcip, 169 frame_lst_tablep[uhci_tree_bottom_nodes[j++]], 170 addr | HC_QUEUE_HEAD); 171 } 172 } 173 174 /* 175 * Create a controller and bulk Queue heads 176 */ 177 uhcip->uhci_ctrl_xfers_q_head = uhci_alloc_queue_head(uhcip); 178 tmp_qh = uhcip->uhci_ctrl_xfers_q_tail = uhcip->uhci_ctrl_xfers_q_head; 179 180 SetQH32(uhcip, list_array[0].link_ptr, 181 (QH_PADDR(tmp_qh) | HC_QUEUE_HEAD)); 182 183 uhcip->uhci_bulk_xfers_q_head = uhci_alloc_queue_head(uhcip); 184 uhcip->uhci_bulk_xfers_q_tail = uhcip->uhci_bulk_xfers_q_head; 185 SetQH32(uhcip, tmp_qh->link_ptr, 186 (QH_PADDR(uhcip->uhci_bulk_xfers_q_head)|HC_QUEUE_HEAD)); 187 188 SetQH32(uhcip, uhcip->uhci_bulk_xfers_q_head->link_ptr, HC_END_OF_LIST); 189 190 /* 191 * Add a dummy TD to the static queue head 0. THis is used 192 * to generate an at the end of frame. 193 */ 194 sof_td = uhci_allocate_td_from_pool(uhcip); 195 196 SetQH32(uhcip, list_array[0].element_ptr, 197 TD_PADDR(sof_td) | HC_TD_HEAD); 198 SetTD32(uhcip, sof_td->link_ptr, HC_END_OF_LIST); 199 uhcip->uhci_sof_td = sof_td; 200 201 /* 202 * Add a dummy td that is used to generate an interrupt for 203 * every 1024 frames. 204 */ 205 isoc_td = uhci_allocate_td_from_pool(uhcip); 206 SetTD32(uhcip, isoc_td->link_ptr, HC_END_OF_LIST); 207 uhcip->uhci_isoc_td = isoc_td; 208 209 uhcip->uhci_isoc_qh = uhci_alloc_queue_head(uhcip); 210 SetQH32(uhcip, uhcip->uhci_isoc_qh->link_ptr, 211 GetFL32(uhcip, uhcip->uhci_frame_lst_tablep[MAX_FRAME_NUM])); 212 SetQH32(uhcip, uhcip->uhci_isoc_qh->element_ptr, TD_PADDR(isoc_td)); 213 SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[MAX_FRAME_NUM], 214 QH_PADDR(uhcip->uhci_isoc_qh) | HC_QUEUE_HEAD); 215 } 216 217 218 /* 219 * uhci_allocate_pools: 220 * Allocate the system memory for the Queue Heads Descriptor and 221 * for the Transfer Descriptor (TD) pools. Both QH and TD structures 222 * must be aligned to a 16 byte boundary. 223 */ 224 int 225 uhci_allocate_pools(uhci_state_t *uhcip) 226 { 227 dev_info_t *dip = uhcip->uhci_dip; 228 size_t real_length; 229 int i, result; 230 uint_t ccount; 231 ddi_device_acc_attr_t dev_attr; 232 233 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 234 "uhci_allocate_pools:"); 235 236 /* The host controller will be little endian */ 237 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 238 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 239 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 240 241 /* Allocate the TD pool DMA handle */ 242 if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 0, 243 &uhcip->uhci_td_pool_dma_handle) != DDI_SUCCESS) { 244 245 return (USB_FAILURE); 246 } 247 248 /* Allocate the memory for the TD pool */ 249 if (ddi_dma_mem_alloc(uhcip->uhci_td_pool_dma_handle, 250 uhci_td_pool_size * sizeof (uhci_td_t), 251 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0, 252 (caddr_t *)&uhcip->uhci_td_pool_addr, &real_length, 253 &uhcip->uhci_td_pool_mem_handle)) { 254 255 return (USB_FAILURE); 256 } 257 258 /* Map the TD pool into the I/O address space */ 259 result = ddi_dma_addr_bind_handle(uhcip->uhci_td_pool_dma_handle, 260 NULL, (caddr_t)uhcip->uhci_td_pool_addr, real_length, 261 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 262 NULL, &uhcip->uhci_td_pool_cookie, &ccount); 263 264 bzero((void *)uhcip->uhci_td_pool_addr, 265 uhci_td_pool_size * sizeof (uhci_td_t)); 266 267 /* Process the result */ 268 if (result == DDI_DMA_MAPPED) { 269 /* The cookie count should be 1 */ 270 if (ccount != 1) { 271 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 272 "uhci_allocate_pools: More than 1 cookie"); 273 274 return (USB_FAILURE); 275 } 276 } else { 277 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 278 "uhci_allocate_pools: Result = %d", result); 279 280 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result); 281 282 return (USB_FAILURE); 283 } 284 285 uhcip->uhci_dma_addr_bind_flag |= UHCI_TD_POOL_BOUND; 286 287 /* Initialize the TD pool */ 288 for (i = 0; i < uhci_td_pool_size; i++) { 289 uhcip->uhci_td_pool_addr[i].flag = TD_FLAG_FREE; 290 } 291 292 /* Allocate the TD pool DMA handle */ 293 if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 294 0, &uhcip->uhci_qh_pool_dma_handle) != DDI_SUCCESS) { 295 296 return (USB_FAILURE); 297 } 298 299 /* Allocate the memory for the QH pool */ 300 if (ddi_dma_mem_alloc(uhcip->uhci_qh_pool_dma_handle, 301 uhci_qh_pool_size * sizeof (queue_head_t), 302 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0, 303 (caddr_t *)&uhcip->uhci_qh_pool_addr, &real_length, 304 &uhcip->uhci_qh_pool_mem_handle) != DDI_SUCCESS) { 305 306 return (USB_FAILURE); 307 } 308 309 result = ddi_dma_addr_bind_handle(uhcip->uhci_qh_pool_dma_handle, 310 NULL, (caddr_t)uhcip->uhci_qh_pool_addr, real_length, 311 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 312 &uhcip->uhci_qh_pool_cookie, &ccount); 313 314 /* Process the result */ 315 if (result == DDI_DMA_MAPPED) { 316 /* The cookie count should be 1 */ 317 if (ccount != 1) { 318 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 319 "uhci_allocate_pools: More than 1 cookie"); 320 321 return (USB_FAILURE); 322 } 323 } else { 324 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result); 325 326 return (USB_FAILURE); 327 } 328 329 uhcip->uhci_dma_addr_bind_flag |= UHCI_QH_POOL_BOUND; 330 331 bzero((void *)uhcip->uhci_qh_pool_addr, 332 uhci_qh_pool_size * sizeof (queue_head_t)); 333 334 /* Initialize the QH pool */ 335 for (i = 0; i < uhci_qh_pool_size; i ++) { 336 uhcip->uhci_qh_pool_addr[i].qh_flag = QUEUE_HEAD_FLAG_FREE; 337 } 338 339 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 340 "uhci_allocate_pools: Completed"); 341 342 return (USB_SUCCESS); 343 } 344 345 346 /* 347 * uhci_free_pools: 348 * Cleanup on attach failure or detach 349 */ 350 void 351 uhci_free_pools(uhci_state_t *uhcip) 352 { 353 int i, flag, rval; 354 uhci_td_t *td; 355 uhci_trans_wrapper_t *tw; 356 357 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 358 "uhci_free_pools:"); 359 360 if (uhcip->uhci_td_pool_addr && uhcip->uhci_td_pool_mem_handle) { 361 for (i = 0; i < uhci_td_pool_size; i ++) { 362 td = &uhcip->uhci_td_pool_addr[i]; 363 364 flag = uhcip->uhci_td_pool_addr[i].flag; 365 if ((flag != TD_FLAG_FREE) && 366 (flag != TD_FLAG_DUMMY) && (td->tw != NULL)) { 367 tw = td->tw; 368 uhci_free_tw(uhcip, tw); 369 } 370 371 } 372 373 if (uhcip->uhci_dma_addr_bind_flag & UHCI_TD_POOL_BOUND) { 374 rval = ddi_dma_unbind_handle( 375 uhcip->uhci_td_pool_dma_handle); 376 ASSERT(rval == DDI_SUCCESS); 377 } 378 379 ddi_dma_mem_free(&uhcip->uhci_td_pool_mem_handle); 380 } 381 382 /* Free the TD pool */ 383 if (uhcip->uhci_td_pool_dma_handle) { 384 ddi_dma_free_handle(&uhcip->uhci_td_pool_dma_handle); 385 } 386 387 if (uhcip->uhci_qh_pool_addr && uhcip->uhci_qh_pool_mem_handle) { 388 if (uhcip->uhci_dma_addr_bind_flag & UHCI_QH_POOL_BOUND) { 389 rval = ddi_dma_unbind_handle( 390 uhcip->uhci_qh_pool_dma_handle); 391 ASSERT(rval == DDI_SUCCESS); 392 } 393 ddi_dma_mem_free(&uhcip->uhci_qh_pool_mem_handle); 394 } 395 396 /* Free the QH pool */ 397 if (uhcip->uhci_qh_pool_dma_handle) { 398 ddi_dma_free_handle(&uhcip->uhci_qh_pool_dma_handle); 399 } 400 401 /* Free the Frame list Table area */ 402 if (uhcip->uhci_frame_lst_tablep && uhcip->uhci_flt_mem_handle) { 403 if (uhcip->uhci_dma_addr_bind_flag & UHCI_FLA_POOL_BOUND) { 404 rval = ddi_dma_unbind_handle( 405 uhcip->uhci_flt_dma_handle); 406 ASSERT(rval == DDI_SUCCESS); 407 } 408 ddi_dma_mem_free(&uhcip->uhci_flt_mem_handle); 409 } 410 411 if (uhcip->uhci_flt_dma_handle) { 412 ddi_dma_free_handle(&uhcip->uhci_flt_dma_handle); 413 } 414 } 415 416 417 /* 418 * uhci_decode_ddi_dma_addr_bind_handle_result: 419 * Process the return values of ddi_dma_addr_bind_handle() 420 */ 421 void 422 uhci_decode_ddi_dma_addr_bind_handle_result(uhci_state_t *uhcip, int result) 423 { 424 char *msg; 425 426 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 427 "uhci_decode_ddi_dma_addr_bind_handle_result:"); 428 429 switch (result) { 430 case DDI_DMA_PARTIAL_MAP: 431 msg = "Partial transfers not allowed"; 432 break; 433 case DDI_DMA_INUSE: 434 msg = "Handle is in use"; 435 break; 436 case DDI_DMA_NORESOURCES: 437 msg = "No resources"; 438 break; 439 case DDI_DMA_NOMAPPING: 440 msg = "No mapping"; 441 break; 442 case DDI_DMA_TOOBIG: 443 msg = "Object is too big"; 444 break; 445 default: 446 msg = "Unknown dma error"; 447 } 448 449 USB_DPRINTF_L4(PRINT_MASK_ALL, uhcip->uhci_log_hdl, "%s", msg); 450 } 451 452 453 /* 454 * uhci_init_ctlr: 455 * Initialize the Host Controller (HC). 456 */ 457 int 458 uhci_init_ctlr(uhci_state_t *uhcip) 459 { 460 dev_info_t *dip = uhcip->uhci_dip; 461 uint_t cmd_reg; 462 uint_t frame_base_addr; 463 464 mutex_enter(&uhcip->uhci_int_mutex); 465 466 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, "uhci_init_ctlr:"); 467 468 /* 469 * When USB legacy mode is enabled, the BIOS manages the USB keyboard 470 * attached to the UHCI controller. It has been observed that some 471 * times the BIOS does not clear the interrupts in the legacy mode 472 * register in the PCI configuration space. So, disable the SMI intrs 473 * and route the intrs to PIRQD here. 474 */ 475 pci_config_put16(uhcip->uhci_config_handle, 476 LEGACYMODE_REG_OFFSET, LEGACYMODE_REG_INIT_VALUE); 477 478 /* 479 * Disable all the interrupts. 480 */ 481 Set_OpReg16(USBINTR, DISABLE_ALL_INTRS); 482 483 cmd_reg = Get_OpReg16(USBCMD); 484 cmd_reg &= (~USBCMD_REG_HC_RUN); 485 486 /* Stop the controller */ 487 Set_OpReg16(USBCMD, cmd_reg); 488 489 /* Reset the host controller */ 490 Set_OpReg16(USBCMD, USBCMD_REG_GBL_RESET); 491 492 /* Wait 10ms for reset to complete */ 493 mutex_exit(&uhcip->uhci_int_mutex); 494 delay(drv_usectohz(UHCI_RESET_DELAY)); 495 mutex_enter(&uhcip->uhci_int_mutex); 496 497 Set_OpReg16(USBCMD, 0); 498 499 /* Set the frame number to zero */ 500 Set_OpReg16(FRNUM, 0); 501 502 if (uhcip->uhci_hc_soft_state == UHCI_CTLR_INIT_STATE) { 503 /* Initialize the Frame list base address area */ 504 if (uhci_init_frame_lst_table(dip, uhcip) != USB_SUCCESS) { 505 mutex_exit(&uhcip->uhci_int_mutex); 506 507 return (USB_FAILURE); 508 } 509 } 510 511 /* Save the contents of the Frame Interval Registers */ 512 uhcip->uhci_frame_interval = Get_OpReg8(SOFMOD); 513 514 frame_base_addr = uhcip->uhci_flt_cookie.dmac_address; 515 516 /* Set the Frame list base address */ 517 Set_OpReg32(FRBASEADD, frame_base_addr); 518 519 /* 520 * Begin sending SOFs 521 * Set the Host Controller Functional State to Operational 522 */ 523 cmd_reg = Get_OpReg16(USBCMD); 524 cmd_reg |= (USBCMD_REG_HC_RUN | USBCMD_REG_MAXPKT_64 | 525 USBCMD_REG_CONFIG_FLAG); 526 527 Set_OpReg16(USBCMD, cmd_reg); 528 529 /* 530 * Verify the Command and interrupt enable registers, 531 * a sanity check whether actually initialized or not 532 */ 533 cmd_reg = Get_OpReg16(USBCMD); 534 535 if (!(cmd_reg & (USBCMD_REG_HC_RUN | USBCMD_REG_MAXPKT_64 | 536 USBCMD_REG_CONFIG_FLAG))) { 537 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 538 "uhci_init_ctlr: Controller initialization failed"); 539 mutex_exit(&uhcip->uhci_int_mutex); 540 541 return (USB_FAILURE); 542 } 543 544 /* 545 * Set the ioc bit of the isoc intr td. This enables 546 * the generation of an interrupt for every 1024 frames. 547 */ 548 SetTD_ioc(uhcip, uhcip->uhci_isoc_td, 1); 549 550 /* Set host controller soft state to operational */ 551 uhcip->uhci_hc_soft_state = UHCI_CTLR_OPERATIONAL_STATE; 552 mutex_exit(&uhcip->uhci_int_mutex); 553 554 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 555 "uhci_init_ctlr: Completed"); 556 557 return (USB_SUCCESS); 558 } 559 560 561 /* 562 * uhci_uninit_ctlr: 563 * uninitialize the Host Controller (HC). 564 */ 565 void 566 uhci_uninit_ctlr(uhci_state_t *uhcip) 567 { 568 if (uhcip->uhci_regs_handle) { 569 /* Disable all the interrupts. */ 570 Set_OpReg16(USBINTR, DISABLE_ALL_INTRS); 571 572 /* Complete the current transaction and then halt. */ 573 Set_OpReg16(USBCMD, 0); 574 575 /* Wait for sometime */ 576 mutex_exit(&uhcip->uhci_int_mutex); 577 delay(drv_usectohz(UHCI_TIMEWAIT)); 578 mutex_enter(&uhcip->uhci_int_mutex); 579 } 580 } 581 582 583 /* 584 * uhci_map_regs: 585 * The Host Controller (HC) contains a set of on-chip operational 586 * registers and which should be mapped into a non-cacheable 587 * portion of the system addressable space. 588 */ 589 int 590 uhci_map_regs(uhci_state_t *uhcip) 591 { 592 dev_info_t *dip = uhcip->uhci_dip; 593 int index; 594 uint32_t regs_prop_len; 595 int32_t *regs_list; 596 uint16_t command_reg; 597 ddi_device_acc_attr_t attr; 598 599 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, "uhci_map_regs:"); 600 601 /* The host controller will be little endian */ 602 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 603 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 604 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 605 606 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, uhcip->uhci_dip, 607 DDI_PROP_DONTPASS, "reg", ®s_list, ®s_prop_len) != 608 DDI_PROP_SUCCESS) { 609 610 return (USB_FAILURE); 611 } 612 613 for (index = 0; index * 5 < regs_prop_len; index++) { 614 if (regs_list[index * 5] & UHCI_PROP_MASK) { 615 break; 616 } 617 } 618 619 /* 620 * Deallocate the memory allocated by the ddi_prop_lookup_int_array 621 */ 622 ddi_prop_free(regs_list); 623 624 if (index * 5 >= regs_prop_len) { 625 626 return (USB_FAILURE); 627 } 628 629 /* Map in operational registers */ 630 if (ddi_regs_map_setup(dip, index, (caddr_t *)&uhcip->uhci_regsp, 631 0, sizeof (hc_regs_t), &attr, &uhcip->uhci_regs_handle) != 632 DDI_SUCCESS) { 633 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 634 "ddi_regs_map_setup: failed"); 635 636 return (USB_FAILURE); 637 } 638 639 if (pci_config_setup(dip, &uhcip->uhci_config_handle) != DDI_SUCCESS) { 640 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 641 "uhci_map_regs: Config error"); 642 643 return (USB_FAILURE); 644 } 645 646 /* Make sure Memory Access Enable and Master Enable are set */ 647 command_reg = pci_config_get16(uhcip->uhci_config_handle, 648 PCI_CONF_COMM); 649 if (!(command_reg & (PCI_COMM_MAE | PCI_COMM_ME))) { 650 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 651 "uhci_map_regs: No MAE/ME"); 652 } 653 654 command_reg |= PCI_COMM_MAE | PCI_COMM_ME; 655 pci_config_put16(uhcip->uhci_config_handle, PCI_CONF_COMM, command_reg); 656 657 /* 658 * Check whether I/O base address is configured and enabled. 659 */ 660 if (!(command_reg & PCI_COMM_IO)) { 661 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 662 "I/O Base address access disabled"); 663 664 return (USB_FAILURE); 665 } 666 /* 667 * Get the IO base address of the controller 668 */ 669 uhcip->uhci_iobase = (pci_config_get16(uhcip->uhci_config_handle, 670 PCI_CONF_IOBASE) & PCI_CONF_IOBASE_MASK); 671 672 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 673 "uhci_map_regs: Completed"); 674 675 return (USB_SUCCESS); 676 } 677 678 679 void 680 uhci_unmap_regs(uhci_state_t *uhcip) 681 { 682 /* Unmap the UHCI registers */ 683 if (uhcip->uhci_regs_handle) { 684 /* Reset the host controller */ 685 Set_OpReg16(USBCMD, USBCMD_REG_GBL_RESET); 686 687 ddi_regs_map_free(&uhcip->uhci_regs_handle); 688 } 689 690 if (uhcip->uhci_config_handle) { 691 pci_config_teardown(&uhcip->uhci_config_handle); 692 } 693 } 694 695 696 /* 697 * uhci_set_dma_attributes: 698 * Set the limits in the DMA attributes structure. Most of the values used 699 * in the DMA limit structres are the default values as specified by the 700 * Writing PCI device drivers document. 701 */ 702 void 703 uhci_set_dma_attributes(uhci_state_t *uhcip) 704 { 705 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 706 "uhci_set_dma_attributes:"); 707 708 /* Initialize the DMA attributes */ 709 uhcip->uhci_dma_attr.dma_attr_version = DMA_ATTR_V0; 710 uhcip->uhci_dma_attr.dma_attr_addr_lo = 0x00000000ull; 711 uhcip->uhci_dma_attr.dma_attr_addr_hi = 0xfffffff0ull; 712 713 /* 32 bit addressing */ 714 uhcip->uhci_dma_attr.dma_attr_count_max = 0xffffffull; 715 716 /* 717 * Setting the dam_att_align to 512, some times fails the 718 * binding handle. I dont know why ? But setting to 16 will 719 * be right for our case (16 byte alignment required per 720 * UHCI spec for TD descriptors). 721 */ 722 723 /* 16 byte alignment */ 724 uhcip->uhci_dma_attr.dma_attr_align = 0x10; 725 726 /* 727 * Since PCI specification is byte alignment, the 728 * burstsize field should be set to 1 for PCI devices. 729 */ 730 uhcip->uhci_dma_attr.dma_attr_burstsizes = 0x1; 731 732 uhcip->uhci_dma_attr.dma_attr_minxfer = 0x1; 733 uhcip->uhci_dma_attr.dma_attr_maxxfer = 0xffffffull; 734 uhcip->uhci_dma_attr.dma_attr_seg = 0xffffffffull; 735 uhcip->uhci_dma_attr.dma_attr_sgllen = 1; 736 uhcip->uhci_dma_attr.dma_attr_granular = 1; 737 uhcip->uhci_dma_attr.dma_attr_flags = 0; 738 } 739 740 741 uint_t 742 pow_2(uint_t x) 743 { 744 return ((x == 0) ? 1 : (1 << x)); 745 } 746 747 748 uint_t 749 log_2(uint_t x) 750 { 751 int ret_val = 0; 752 753 while (x != 1) { 754 ret_val++; 755 x = x >> 1; 756 } 757 758 return (ret_val); 759 } 760 761 762 /* 763 * uhci_obtain_state: 764 */ 765 uhci_state_t * 766 uhci_obtain_state(dev_info_t *dip) 767 { 768 int instance = ddi_get_instance(dip); 769 uhci_state_t *state = ddi_get_soft_state(uhci_statep, instance); 770 771 ASSERT(state != NULL); 772 773 return (state); 774 } 775 776 777 /* 778 * uhci_alloc_hcdi_ops: 779 * The HCDI interfaces or entry points are the software interfaces used by 780 * the Universal Serial Bus Driver (USBA) to access the services of the 781 * Host Controller Driver (HCD). During HCD initialization, inform USBA 782 * about all available HCDI interfaces or entry points. 783 */ 784 usba_hcdi_ops_t * 785 uhci_alloc_hcdi_ops(uhci_state_t *uhcip) 786 { 787 usba_hcdi_ops_t *hcdi_ops; 788 789 USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 790 "uhci_alloc_hcdi_ops:"); 791 792 hcdi_ops = usba_alloc_hcdi_ops(); 793 794 hcdi_ops->usba_hcdi_ops_version = HCDI_OPS_VERSION_1; 795 796 hcdi_ops->usba_hcdi_pipe_open = uhci_hcdi_pipe_open; 797 hcdi_ops->usba_hcdi_pipe_close = uhci_hcdi_pipe_close; 798 hcdi_ops->usba_hcdi_pipe_reset = uhci_hcdi_pipe_reset; 799 800 hcdi_ops->usba_hcdi_pipe_ctrl_xfer = uhci_hcdi_pipe_ctrl_xfer; 801 hcdi_ops->usba_hcdi_pipe_bulk_xfer = uhci_hcdi_pipe_bulk_xfer; 802 hcdi_ops->usba_hcdi_pipe_intr_xfer = uhci_hcdi_pipe_intr_xfer; 803 hcdi_ops->usba_hcdi_pipe_isoc_xfer = uhci_hcdi_pipe_isoc_xfer; 804 805 hcdi_ops->usba_hcdi_bulk_transfer_size = uhci_hcdi_bulk_transfer_size; 806 hcdi_ops->usba_hcdi_pipe_stop_intr_polling = 807 uhci_hcdi_pipe_stop_intr_polling; 808 hcdi_ops->usba_hcdi_pipe_stop_isoc_polling = 809 uhci_hcdi_pipe_stop_isoc_polling; 810 811 hcdi_ops->usba_hcdi_get_current_frame_number = 812 uhci_hcdi_get_current_frame_number; 813 hcdi_ops->usba_hcdi_get_max_isoc_pkts = uhci_hcdi_get_max_isoc_pkts; 814 815 hcdi_ops->usba_hcdi_console_input_init = uhci_hcdi_polled_input_init; 816 hcdi_ops->usba_hcdi_console_input_enter = uhci_hcdi_polled_input_enter; 817 hcdi_ops->usba_hcdi_console_read = uhci_hcdi_polled_read; 818 hcdi_ops->usba_hcdi_console_input_exit = uhci_hcdi_polled_input_exit; 819 hcdi_ops->usba_hcdi_console_input_fini = uhci_hcdi_polled_input_fini; 820 821 hcdi_ops->usba_hcdi_console_output_init = uhci_hcdi_polled_output_init; 822 hcdi_ops->usba_hcdi_console_output_enter = 823 uhci_hcdi_polled_output_enter; 824 hcdi_ops->usba_hcdi_console_write = uhci_hcdi_polled_write; 825 hcdi_ops->usba_hcdi_console_output_exit = uhci_hcdi_polled_output_exit; 826 hcdi_ops->usba_hcdi_console_output_fini = uhci_hcdi_polled_output_fini; 827 828 return (hcdi_ops); 829 } 830 831 832 /* 833 * uhci_init_frame_lst_table : 834 * Allocate the system memory and initialize Host Controller 835 * Frame list table area The starting of the Frame list Table 836 * area must be 4096 byte aligned. 837 */ 838 static int 839 uhci_init_frame_lst_table(dev_info_t *dip, uhci_state_t *uhcip) 840 { 841 int result; 842 uint_t ccount; 843 size_t real_length; 844 ddi_device_acc_attr_t dev_attr; 845 846 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 847 848 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 849 "uhci_init_frame_lst_table:"); 850 851 /* The host controller will be little endian */ 852 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 853 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 854 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 855 856 /* 4K alignment required */ 857 uhcip->uhci_dma_attr.dma_attr_align = 0x1000; 858 859 /* Create space for the HCCA block */ 860 if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 861 0, &uhcip->uhci_flt_dma_handle) != DDI_SUCCESS) { 862 863 return (USB_FAILURE); 864 } 865 866 /* Reset to default 16 bytes */ 867 uhcip->uhci_dma_attr.dma_attr_align = 0x10; 868 869 if (ddi_dma_mem_alloc(uhcip->uhci_flt_dma_handle, 870 SIZE_OF_FRAME_LST_TABLE, &dev_attr, DDI_DMA_CONSISTENT, 871 DDI_DMA_SLEEP, 0, (caddr_t *)&uhcip->uhci_frame_lst_tablep, 872 &real_length, &uhcip->uhci_flt_mem_handle)) { 873 874 return (USB_FAILURE); 875 } 876 877 /* Map the whole Frame list base area into the I/O address space */ 878 result = ddi_dma_addr_bind_handle(uhcip->uhci_flt_dma_handle, 879 NULL, (caddr_t)uhcip->uhci_frame_lst_tablep, real_length, 880 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 881 &uhcip->uhci_flt_cookie, &ccount); 882 883 if (result == DDI_DMA_MAPPED) { 884 /* The cookie count should be 1 */ 885 if (ccount != 1) { 886 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 887 "uhci_init_frame_list_table: More than 1 cookie"); 888 889 return (USB_FAILURE); 890 } 891 } else { 892 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result); 893 894 return (USB_FAILURE); 895 } 896 897 uhcip->uhci_dma_addr_bind_flag |= UHCI_FLA_POOL_BOUND; 898 899 bzero((void *)uhcip->uhci_frame_lst_tablep, real_length); 900 901 /* Initialize the interrupt lists */ 902 uhci_build_interrupt_lattice(uhcip); 903 904 return (USB_SUCCESS); 905 } 906 907 908 /* 909 * uhci_alloc_queue_head: 910 * Allocate a queue head 911 */ 912 queue_head_t * 913 uhci_alloc_queue_head(uhci_state_t *uhcip) 914 { 915 int index; 916 uhci_td_t *dummy_td; 917 queue_head_t *queue_head; 918 919 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 920 "uhci_alloc_queue_head"); 921 922 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 923 924 /* Allocate a dummy td first. */ 925 if ((dummy_td = uhci_allocate_td_from_pool(uhcip)) == NULL) { 926 927 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 928 "uhci_alloc_queue_head: allocate td from pool failed"); 929 930 return (NULL); 931 } 932 933 /* 934 * The first 63 queue heads in the Queue Head (QH) 935 * buffer pool are reserved for building interrupt lattice 936 * tree. Search for a blank Queue head in the QH buffer pool. 937 */ 938 for (index = NUM_STATIC_NODES; index < uhci_qh_pool_size; index++) { 939 if (uhcip->uhci_qh_pool_addr[index].qh_flag == 940 QUEUE_HEAD_FLAG_FREE) { 941 break; 942 } 943 } 944 945 USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 946 "uhci_alloc_queue_head: Allocated %d", index); 947 948 if (index == uhci_qh_pool_size) { 949 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 950 "uhci_alloc_queue_head: All QH exhausted"); 951 952 /* Free the dummy td allocated for this qh. */ 953 dummy_td->flag = TD_FLAG_FREE; 954 955 return (NULL); 956 } 957 958 queue_head = &uhcip->uhci_qh_pool_addr[index]; 959 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 960 "uhci_alloc_queue_head: Allocated address 0x%p", 961 (void *)queue_head); 962 963 bzero((void *)queue_head, sizeof (queue_head_t)); 964 SetQH32(uhcip, queue_head->link_ptr, HC_END_OF_LIST); 965 SetQH32(uhcip, queue_head->element_ptr, HC_END_OF_LIST); 966 queue_head->prev_qh = NULL; 967 queue_head->qh_flag = QUEUE_HEAD_FLAG_BUSY; 968 969 bzero((char *)dummy_td, sizeof (uhci_td_t)); 970 queue_head->td_tailp = dummy_td; 971 SetQH32(uhcip, queue_head->element_ptr, TD_PADDR(dummy_td)); 972 973 return (queue_head); 974 } 975 976 977 /* 978 * uhci_allocate_bandwidth: 979 * Figure out whether or not this interval may be supported. Return 980 * the index into the lattice if it can be supported. Return 981 * allocation failure if it can not be supported. 982 */ 983 int 984 uhci_allocate_bandwidth( 985 uhci_state_t *uhcip, 986 usba_pipe_handle_data_t *pipe_handle, 987 uint_t *node) 988 { 989 int bandwidth; /* Requested bandwidth */ 990 uint_t min, min_index; 991 uint_t i; 992 uint_t height; /* Bandwidth's height in the tree */ 993 uint_t leftmost; 994 uint_t length; 995 uint32_t paddr; 996 queue_head_t *tmp_qh; 997 usb_ep_descr_t *endpoint = &pipe_handle->p_ep; 998 999 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1000 1001 /* 1002 * Calculate the length in bytes of a transaction on this 1003 * periodic endpoint. 1004 */ 1005 mutex_enter(&pipe_handle->p_usba_device->usb_mutex); 1006 1007 length = uhci_compute_total_bandwidth(endpoint, 1008 pipe_handle->p_usba_device->usb_port_status); 1009 mutex_exit(&pipe_handle->p_usba_device->usb_mutex); 1010 1011 /* 1012 * If the length in bytes plus the allocated bandwidth exceeds 1013 * the maximum, return bandwidth allocation failure. 1014 */ 1015 if ((length + uhcip->uhci_bandwidth_intr_min + 1016 uhcip->uhci_bandwidth_isoch_sum) > (MAX_PERIODIC_BANDWIDTH)) { 1017 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1018 "uhci_allocate_bandwidth: " 1019 "Reached maximum bandwidth value and cannot allocate " 1020 "bandwidth for a given Interrupt/Isoch endpoint"); 1021 1022 return (USB_NO_BANDWIDTH); 1023 } 1024 1025 /* 1026 * ISOC xfers are not supported at this point type 1027 */ 1028 if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_ISOCH) { 1029 uhcip->uhci_bandwidth_isoch_sum += length; 1030 1031 return (USB_SUCCESS); 1032 } 1033 1034 /* 1035 * This is an interrupt endpoint. 1036 * Adjust bandwidth to be a power of 2 1037 */ 1038 mutex_enter(&pipe_handle->p_usba_device->usb_mutex); 1039 bandwidth = uhci_bandwidth_adjust(uhcip, endpoint, 1040 pipe_handle->p_usba_device->usb_port_status); 1041 mutex_exit(&pipe_handle->p_usba_device->usb_mutex); 1042 1043 /* 1044 * If this bandwidth can't be supported, 1045 * return allocation failure. 1046 */ 1047 if (bandwidth == USB_FAILURE) { 1048 1049 return (USB_FAILURE); 1050 } 1051 1052 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1053 "The new bandwidth is %d", bandwidth); 1054 1055 /* Find the leaf with the smallest allocated bandwidth */ 1056 min_index = 0; 1057 min = uhcip->uhci_bandwidth[0]; 1058 1059 for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) { 1060 if (uhcip->uhci_bandwidth[i] < min) { 1061 min_index = i; 1062 min = uhcip->uhci_bandwidth[i]; 1063 } 1064 } 1065 1066 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1067 "The leaf with minimal bandwidth %d, " 1068 "The smallest bandwidth %d", min_index, min); 1069 1070 /* 1071 * Find the index into the lattice given the 1072 * leaf with the smallest allocated bandwidth. 1073 */ 1074 height = uhci_lattice_height(bandwidth); 1075 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1076 "The height is %d", height); 1077 1078 *node = uhci_tree_bottom_nodes[min_index]; 1079 1080 /* check if there are isocs TDs scheduled for this frame */ 1081 if (uhcip->uhci_isoc_q_tailp[*node]) { 1082 paddr = (uhcip->uhci_isoc_q_tailp[*node]->link_ptr & 1083 FRAME_LST_PTR_MASK); 1084 } else { 1085 paddr = (uhcip->uhci_frame_lst_tablep[*node] & 1086 FRAME_LST_PTR_MASK); 1087 } 1088 1089 tmp_qh = QH_VADDR(paddr); 1090 *node = tmp_qh->node; 1091 for (i = 0; i < height; i++) { 1092 *node = uhci_lattice_parent(*node); 1093 } 1094 1095 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1096 "The real node is %d", *node); 1097 1098 /* 1099 * Find the leftmost leaf in the subtree specified by the node. 1100 */ 1101 leftmost = uhci_leftmost_leaf(*node, height); 1102 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1103 "Leftmost %d", leftmost); 1104 1105 for (i = leftmost; i < leftmost + 1106 (NUM_FRAME_LST_ENTRIES/bandwidth); i ++) { 1107 1108 if ((length + uhcip->uhci_bandwidth_isoch_sum + 1109 uhcip->uhci_bandwidth[i]) > MAX_PERIODIC_BANDWIDTH) { 1110 1111 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1112 "uhci_allocate_bandwidth: " 1113 "Reached maximum bandwidth value and cannot " 1114 "allocate bandwidth for Interrupt endpoint"); 1115 1116 return (USB_NO_BANDWIDTH); 1117 } 1118 } 1119 1120 /* 1121 * All the leaves for this node must be updated with the bandwidth. 1122 */ 1123 for (i = leftmost; i < leftmost + 1124 (NUM_FRAME_LST_ENTRIES/bandwidth); i ++) { 1125 uhcip->uhci_bandwidth[i] += length; 1126 } 1127 1128 /* Find the leaf with the smallest allocated bandwidth */ 1129 min_index = 0; 1130 min = uhcip->uhci_bandwidth[0]; 1131 1132 for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) { 1133 if (uhcip->uhci_bandwidth[i] < min) { 1134 min_index = i; 1135 min = uhcip->uhci_bandwidth[i]; 1136 } 1137 } 1138 1139 /* Save the minimum for later use */ 1140 uhcip->uhci_bandwidth_intr_min = min; 1141 1142 return (USB_SUCCESS); 1143 } 1144 1145 1146 /* 1147 * uhci_deallocate_bandwidth: 1148 * Deallocate bandwidth for the given node in the lattice 1149 * and the length of transfer. 1150 */ 1151 void 1152 uhci_deallocate_bandwidth(uhci_state_t *uhcip, 1153 usba_pipe_handle_data_t *pipe_handle) 1154 { 1155 uint_t bandwidth; 1156 uint_t height; 1157 uint_t leftmost; 1158 uint_t i; 1159 uint_t min; 1160 usb_ep_descr_t *endpoint = &pipe_handle->p_ep; 1161 uint_t node, length; 1162 uhci_pipe_private_t *pp = 1163 (uhci_pipe_private_t *)pipe_handle->p_hcd_private; 1164 1165 /* This routine is protected by the uhci_int_mutex */ 1166 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1167 1168 /* Obtain the length */ 1169 mutex_enter(&pipe_handle->p_usba_device->usb_mutex); 1170 length = uhci_compute_total_bandwidth(endpoint, 1171 pipe_handle->p_usba_device->usb_port_status); 1172 mutex_exit(&pipe_handle->p_usba_device->usb_mutex); 1173 1174 /* 1175 * If this is an isochronous endpoint, just delete endpoint's 1176 * bandwidth from the total allocated isochronous bandwidth. 1177 */ 1178 if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_ISOCH) { 1179 uhcip->uhci_bandwidth_isoch_sum -= length; 1180 1181 return; 1182 } 1183 1184 /* Obtain the node */ 1185 node = pp->pp_node; 1186 1187 /* Adjust bandwidth to be a power of 2 */ 1188 mutex_enter(&pipe_handle->p_usba_device->usb_mutex); 1189 bandwidth = uhci_bandwidth_adjust(uhcip, endpoint, 1190 pipe_handle->p_usba_device->usb_port_status); 1191 mutex_exit(&pipe_handle->p_usba_device->usb_mutex); 1192 1193 /* Find the height in the tree */ 1194 height = uhci_lattice_height(bandwidth); 1195 1196 /* 1197 * Find the leftmost leaf in the subtree specified by the node 1198 */ 1199 leftmost = uhci_leftmost_leaf(node, height); 1200 1201 /* Delete the bandwith from the appropriate lists */ 1202 for (i = leftmost; i < leftmost + (NUM_FRAME_LST_ENTRIES/bandwidth); 1203 i ++) { 1204 uhcip->uhci_bandwidth[i] -= length; 1205 } 1206 1207 min = uhcip->uhci_bandwidth[0]; 1208 1209 /* Recompute the minimum */ 1210 for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) { 1211 if (uhcip->uhci_bandwidth[i] < min) { 1212 min = uhcip->uhci_bandwidth[i]; 1213 } 1214 } 1215 1216 /* Save the minimum for later use */ 1217 uhcip->uhci_bandwidth_intr_min = min; 1218 } 1219 1220 1221 /* 1222 * uhci_compute_total_bandwidth: 1223 * 1224 * Given a periodic endpoint (interrupt or isochronous) determine the total 1225 * bandwidth for one transaction. The UHCI host controller traverses the 1226 * endpoint descriptor lists on a first-come-first-serve basis. When the HC 1227 * services an endpoint, only a single transaction attempt is made. The HC 1228 * moves to the next Endpoint Descriptor after the first transaction attempt 1229 * rather than finishing the entire Transfer Descriptor. Therefore, when a 1230 * Transfer Descriptor is inserted into the lattice, we will only count the 1231 * number of bytes for one transaction. 1232 * 1233 * The following are the formulas used for calculating bandwidth in terms 1234 * bytes and it is for the single USB full speed and low speed transaction 1235 * respectively. The protocol overheads will be different for each of type 1236 * of USB transfer and all these formulas & protocol overheads are derived 1237 * from the 5.9.3 section of USB Specification & with the help of Bandwidth 1238 * Analysis white paper which is posted on the USB developer forum. 1239 * 1240 * Full-Speed: 1241 * Protocol overhead + ((MaxPacketSize * 7)/6 ) + Host_Delay 1242 * 1243 * Low-Speed: 1244 * Protocol overhead + Hub LS overhead + 1245 * (Low-Speed clock * ((MaxPacketSize * 7)/6 )) + Host_Delay 1246 */ 1247 static uint_t 1248 uhci_compute_total_bandwidth(usb_ep_descr_t *endpoint, 1249 usb_port_status_t port_status) 1250 { 1251 uint_t bandwidth; 1252 ushort_t MaxPacketSize = endpoint->wMaxPacketSize; 1253 1254 /* Add Host Controller specific delay to required bandwidth */ 1255 bandwidth = HOST_CONTROLLER_DELAY; 1256 1257 /* Add bit-stuffing overhead */ 1258 MaxPacketSize = (ushort_t)((MaxPacketSize * 7) / 6); 1259 1260 /* Low Speed interrupt transaction */ 1261 if (port_status == USBA_LOW_SPEED_DEV) { 1262 /* Low Speed interrupt transaction */ 1263 bandwidth += (LOW_SPEED_PROTO_OVERHEAD + 1264 HUB_LOW_SPEED_PROTO_OVERHEAD + 1265 (LOW_SPEED_CLOCK * MaxPacketSize)); 1266 } else { 1267 /* Full Speed transaction */ 1268 bandwidth += MaxPacketSize; 1269 1270 if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_INTR) { 1271 /* Full Speed interrupt transaction */ 1272 bandwidth += FS_NON_ISOC_PROTO_OVERHEAD; 1273 } else { 1274 /* Isochronus and input transaction */ 1275 if (UHCI_XFER_DIR(endpoint) == USB_EP_DIR_IN) { 1276 bandwidth += FS_ISOC_INPUT_PROTO_OVERHEAD; 1277 } else { 1278 /* Isochronus and output transaction */ 1279 bandwidth += FS_ISOC_OUTPUT_PROTO_OVERHEAD; 1280 } 1281 } 1282 } 1283 1284 return (bandwidth); 1285 } 1286 1287 1288 /* 1289 * uhci_bandwidth_adjust: 1290 */ 1291 static int 1292 uhci_bandwidth_adjust( 1293 uhci_state_t *uhcip, 1294 usb_ep_descr_t *endpoint, 1295 usb_port_status_t port_status) 1296 { 1297 int i = 0; 1298 uint_t interval; 1299 1300 /* 1301 * Get the polling interval from the endpoint descriptor 1302 */ 1303 interval = endpoint->bInterval; 1304 1305 /* 1306 * The bInterval value in the endpoint descriptor can range 1307 * from 1 to 255ms. The interrupt lattice has 32 leaf nodes, 1308 * and the host controller cycles through these nodes every 1309 * 32ms. The longest polling interval that the controller 1310 * supports is 32ms. 1311 */ 1312 1313 /* 1314 * Return an error if the polling interval is less than 1ms 1315 * and greater than 255ms 1316 */ 1317 if ((interval < MIN_POLL_INTERVAL) || (interval > MAX_POLL_INTERVAL)) { 1318 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1319 "uhci_bandwidth_adjust: Endpoint's poll interval must be " 1320 "between %d and %d ms", MIN_POLL_INTERVAL, 1321 MAX_POLL_INTERVAL); 1322 1323 return (USB_FAILURE); 1324 } 1325 1326 /* 1327 * According USB Specifications, a full-speed endpoint can 1328 * specify a desired polling interval 1ms to 255ms and a low 1329 * speed endpoints are limited to specifying only 10ms to 1330 * 255ms. But some old keyboards & mice uses polling interval 1331 * of 8ms. For compatibility purpose, we are using polling 1332 * interval between 8ms & 255ms for low speed endpoints. 1333 */ 1334 if ((port_status == USBA_LOW_SPEED_DEV) && 1335 (interval < MIN_LOW_SPEED_POLL_INTERVAL)) { 1336 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1337 "uhci_bandwidth_adjust: Low speed endpoint's poll interval " 1338 "must be >= %d ms, adjusted", 1339 MIN_LOW_SPEED_POLL_INTERVAL); 1340 1341 interval = MIN_LOW_SPEED_POLL_INTERVAL; 1342 } 1343 1344 /* 1345 * If polling interval is greater than 32ms, 1346 * adjust polling interval equal to 32ms. 1347 */ 1348 if (interval > 32) { 1349 interval = 32; 1350 } 1351 1352 /* 1353 * Find the nearest power of 2 that's less 1354 * than interval. 1355 */ 1356 while ((pow_2(i)) <= interval) { 1357 i++; 1358 } 1359 1360 return (pow_2((i - 1))); 1361 } 1362 1363 1364 /* 1365 * uhci_lattice_height: 1366 * Given the requested bandwidth, find the height in the tree at 1367 * which the nodes for this bandwidth fall. The height is measured 1368 * as the number of nodes from the leaf to the level specified by 1369 * bandwidth The root of the tree is at height TREE_HEIGHT. 1370 */ 1371 static uint_t 1372 uhci_lattice_height(uint_t bandwidth) 1373 { 1374 return (TREE_HEIGHT - (log_2(bandwidth))); 1375 } 1376 1377 1378 static uint_t 1379 uhci_lattice_parent(uint_t node) 1380 { 1381 return (((node % 2) == 0) ? ((node/2) - 1) : (node/2)); 1382 } 1383 1384 1385 /* 1386 * uhci_leftmost_leaf: 1387 * Find the leftmost leaf in the subtree specified by the node. 1388 * Height refers to number of nodes from the bottom of the tree 1389 * to the node, including the node. 1390 */ 1391 static uint_t 1392 uhci_leftmost_leaf(uint_t node, uint_t height) 1393 { 1394 node = pow_2(height + VIRTUAL_TREE_HEIGHT) * (node+1) - 1395 NUM_FRAME_LST_ENTRIES; 1396 return (node); 1397 } 1398 1399 1400 /* 1401 * uhci_insert_qh: 1402 * Add the Queue Head (QH) into the Host Controller's (HC) 1403 * appropriate queue head list. 1404 */ 1405 void 1406 uhci_insert_qh(uhci_state_t *uhcip, usba_pipe_handle_data_t *ph) 1407 { 1408 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 1409 1410 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1411 "uhci_insert_qh:"); 1412 1413 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1414 1415 switch (UHCI_XFER_TYPE(&ph->p_ep)) { 1416 case USB_EP_ATTR_CONTROL: 1417 uhci_insert_ctrl_qh(uhcip, pp); 1418 break; 1419 case USB_EP_ATTR_BULK: 1420 uhci_insert_bulk_qh(uhcip, pp); 1421 break; 1422 case USB_EP_ATTR_INTR: 1423 uhci_insert_intr_qh(uhcip, pp); 1424 break; 1425 case USB_EP_ATTR_ISOCH: 1426 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 1427 "uhci_insert_qh: Illegal request"); 1428 break; 1429 } 1430 } 1431 1432 1433 /* 1434 * uhci_insert_ctrl_qh: 1435 * Insert a control QH into the Host Controller's (HC) control QH list. 1436 */ 1437 static void 1438 uhci_insert_ctrl_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 1439 { 1440 queue_head_t *qh = pp->pp_qh; 1441 1442 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1443 "uhci_insert_ctrl_qh:"); 1444 1445 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1446 1447 if (uhcip->uhci_ctrl_xfers_q_head == uhcip->uhci_ctrl_xfers_q_tail) { 1448 uhcip->uhci_ctrl_xfers_q_head->prev_qh = UHCI_INVALID_PTR; 1449 } 1450 1451 SetQH32(uhcip, qh->link_ptr, 1452 GetQH32(uhcip, uhcip->uhci_ctrl_xfers_q_tail->link_ptr)); 1453 qh->prev_qh = uhcip->uhci_ctrl_xfers_q_tail; 1454 SetQH32(uhcip, uhcip->uhci_ctrl_xfers_q_tail->link_ptr, 1455 QH_PADDR(qh) | HC_QUEUE_HEAD); 1456 uhcip->uhci_ctrl_xfers_q_tail = qh; 1457 1458 } 1459 1460 1461 /* 1462 * uhci_insert_bulk_qh: 1463 * Insert a bulk QH into the Host Controller's (HC) bulk QH list. 1464 */ 1465 static void 1466 uhci_insert_bulk_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 1467 { 1468 queue_head_t *qh = pp->pp_qh; 1469 1470 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1471 "uhci_insert_bulk_qh:"); 1472 1473 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1474 1475 if (uhcip->uhci_bulk_xfers_q_head == uhcip->uhci_bulk_xfers_q_tail) { 1476 uhcip->uhci_bulk_xfers_q_head->prev_qh = UHCI_INVALID_PTR; 1477 } else if (uhcip->uhci_bulk_xfers_q_head->link_ptr == 1478 uhcip->uhci_bulk_xfers_q_tail->link_ptr) { 1479 1480 /* If there is already a loop, we should keep the loop. */ 1481 qh->link_ptr = uhcip->uhci_bulk_xfers_q_tail->link_ptr; 1482 } 1483 1484 qh->prev_qh = uhcip->uhci_bulk_xfers_q_tail; 1485 SetQH32(uhcip, uhcip->uhci_bulk_xfers_q_tail->link_ptr, 1486 QH_PADDR(qh) | HC_QUEUE_HEAD); 1487 uhcip->uhci_bulk_xfers_q_tail = qh; 1488 } 1489 1490 1491 /* 1492 * uhci_insert_intr_qh: 1493 * Insert a periodic Queue head i.e Interrupt queue head into the 1494 * Host Controller's (HC) interrupt lattice tree. 1495 */ 1496 static void 1497 uhci_insert_intr_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 1498 { 1499 uint_t node = pp->pp_node; /* The appropriate node was */ 1500 /* found during the opening */ 1501 /* of the pipe. */ 1502 queue_head_t *qh = pp->pp_qh; 1503 queue_head_t *next_lattice_qh, *lattice_qh; 1504 1505 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1506 "uhci_insert_intr_qh:"); 1507 1508 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1509 1510 /* Find the lattice queue head */ 1511 lattice_qh = &uhcip->uhci_qh_pool_addr[node]; 1512 next_lattice_qh = 1513 QH_VADDR(GetQH32(uhcip, lattice_qh->link_ptr) & QH_LINK_PTR_MASK); 1514 1515 next_lattice_qh->prev_qh = qh; 1516 qh->link_ptr = lattice_qh->link_ptr; 1517 qh->prev_qh = lattice_qh; 1518 SetQH32(uhcip, lattice_qh->link_ptr, QH_PADDR(qh) | HC_QUEUE_HEAD); 1519 pp->pp_data_toggle = 0; 1520 } 1521 1522 1523 /* 1524 * uhci_insert_intr_td: 1525 * Create a TD and a data buffer for an interrupt endpoint. 1526 */ 1527 int 1528 uhci_insert_intr_td( 1529 uhci_state_t *uhcip, 1530 usba_pipe_handle_data_t *ph, 1531 usb_intr_req_t *req, 1532 usb_flags_t flags) 1533 { 1534 int error, pipe_dir; 1535 uint_t length, mps; 1536 uint32_t buf_offs; 1537 uhci_td_t *tmp_td; 1538 usb_intr_req_t *intr_reqp; 1539 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 1540 uhci_trans_wrapper_t *tw; 1541 1542 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1543 "uhci_insert_intr_td: req: 0x%p", (void *)req); 1544 1545 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1546 1547 /* Get the interrupt pipe direction */ 1548 pipe_dir = UHCI_XFER_DIR(&ph->p_ep); 1549 1550 /* Get the current interrupt request pointer */ 1551 if (req) { 1552 length = req->intr_len; 1553 } else { 1554 ASSERT(pipe_dir == USB_EP_DIR_IN); 1555 length = (pp->pp_client_periodic_in_reqp) ? 1556 (((usb_intr_req_t *)pp-> 1557 pp_client_periodic_in_reqp)->intr_len) : 1558 ph->p_ep.wMaxPacketSize; 1559 } 1560 1561 /* Check the size of interrupt request */ 1562 if (length > UHCI_MAX_TD_XFER_SIZE) { 1563 1564 /* the length shouldn't exceed 8K */ 1565 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1566 "uhci_insert_intr_td: Intr request size 0x%x is " 1567 "more than 0x%x", length, UHCI_MAX_TD_XFER_SIZE); 1568 1569 return (USB_INVALID_REQUEST); 1570 } 1571 1572 USB_DPRINTF_L3(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1573 "uhci_insert_intr_td: length: 0x%x", length); 1574 1575 /* Allocate a transaction wrapper */ 1576 if ((tw = uhci_create_transfer_wrapper(uhcip, pp, length, flags)) == 1577 NULL) { 1578 1579 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1580 "uhci_insert_intr_td: TW allocation failed"); 1581 1582 return (USB_NO_RESOURCES); 1583 } 1584 1585 /* 1586 * Initialize the callback and any callback 1587 * data for when the td completes. 1588 */ 1589 tw->tw_handle_td = uhci_handle_intr_td; 1590 tw->tw_handle_callback_value = NULL; 1591 tw->tw_direction = (pipe_dir == USB_EP_DIR_OUT) ? 1592 PID_OUT : PID_IN; 1593 tw->tw_curr_xfer_reqp = (usb_opaque_t)req; 1594 1595 /* 1596 * If it is an Interrupt IN request and interrupt request is NULL, 1597 * allocate the usb interrupt request structure for the current 1598 * interrupt polling request. 1599 */ 1600 if (tw->tw_direction == PID_IN) { 1601 if ((error = uhci_allocate_periodic_in_resource(uhcip, 1602 pp, tw, flags)) != USB_SUCCESS) { 1603 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1604 "uhci_insert_intr_td: Interrupt request structure " 1605 "allocation failed"); 1606 1607 /* free the transfer wrapper */ 1608 uhci_deallocate_tw(uhcip, pp, tw); 1609 1610 return (error); 1611 } 1612 } 1613 1614 intr_reqp = (usb_intr_req_t *)tw->tw_curr_xfer_reqp; 1615 ASSERT(tw->tw_curr_xfer_reqp != NULL); 1616 1617 tw->tw_timeout_cnt = (intr_reqp->intr_attributes & USB_ATTRS_ONE_XFER) ? 1618 intr_reqp->intr_timeout : 0; 1619 1620 /* DATA IN */ 1621 if (tw->tw_direction == PID_IN) { 1622 /* Insert the td onto the queue head */ 1623 error = uhci_insert_hc_td(uhcip, 0, 1624 length, pp, tw, PID_IN, intr_reqp->intr_attributes); 1625 1626 if (error != USB_SUCCESS) { 1627 1628 uhci_deallocate_periodic_in_resource(uhcip, pp, tw); 1629 /* free the transfer wrapper */ 1630 uhci_deallocate_tw(uhcip, pp, tw); 1631 1632 return (USB_NO_RESOURCES); 1633 } 1634 tw->tw_bytes_xfered = 0; 1635 1636 return (USB_SUCCESS); 1637 } 1638 1639 if (req->intr_len) { 1640 /* DATA OUT */ 1641 ASSERT(req->intr_data != NULL); 1642 1643 /* Copy the data into the message */ 1644 ddi_rep_put8(tw->tw_accesshandle, req->intr_data->b_rptr, 1645 (uint8_t *)tw->tw_buf, req->intr_len, DDI_DEV_AUTOINCR); 1646 } 1647 1648 /* set tw->tw_claim flag, so that nobody else works on this tw. */ 1649 tw->tw_claim = UHCI_INTR_HDLR_CLAIMED; 1650 1651 mps = ph->p_ep.wMaxPacketSize; 1652 buf_offs = 0; 1653 1654 /* Insert tds onto the queue head */ 1655 while (length > 0) { 1656 1657 error = uhci_insert_hc_td(uhcip, buf_offs, 1658 (length > mps) ? mps : length, 1659 pp, tw, PID_OUT, 1660 intr_reqp->intr_attributes); 1661 1662 if (error != USB_SUCCESS) { 1663 /* no resource. */ 1664 break; 1665 } 1666 1667 if (length <= mps) { 1668 /* inserted all data. */ 1669 length = 0; 1670 1671 } else { 1672 1673 buf_offs += mps; 1674 length -= mps; 1675 } 1676 } 1677 1678 if (error != USB_SUCCESS) { 1679 1680 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 1681 "uhci_insert_intr_td: allocate td failed, free resource"); 1682 1683 /* remove all the tds */ 1684 while (tw->tw_hctd_head != NULL) { 1685 uhci_delete_td(uhcip, tw->tw_hctd_head); 1686 } 1687 1688 tw->tw_claim = UHCI_NOT_CLAIMED; 1689 uhci_deallocate_tw(uhcip, pp, tw); 1690 1691 return (error); 1692 } 1693 1694 /* allow HC to xfer the tds of this tw */ 1695 tmp_td = tw->tw_hctd_head; 1696 while (tmp_td != NULL) { 1697 1698 SetTD_status(uhcip, tmp_td, UHCI_TD_ACTIVE); 1699 tmp_td = tmp_td->tw_td_next; 1700 } 1701 1702 tw->tw_bytes_xfered = 0; 1703 tw->tw_claim = UHCI_NOT_CLAIMED; 1704 1705 return (error); 1706 } 1707 1708 1709 /* 1710 * uhci_create_transfer_wrapper: 1711 * Create a Transaction Wrapper (TW) for non-isoc transfer types. 1712 * This involves the allocating of DMA resources. 1713 * 1714 * For non-isoc transfers, one DMA handle and one DMA buffer are 1715 * allocated per transfer. The DMA buffer may contain multiple 1716 * DMA cookies and the cookies should meet certain alignment 1717 * requirement to be able to fit in the multiple TDs. The alignment 1718 * needs to ensure: 1719 * 1. the size of a cookie be larger than max TD length (0x500) 1720 * 2. the size of a cookie be a multiple of wMaxPacketSize of the 1721 * ctrl/bulk pipes 1722 * 1723 * wMaxPacketSize for ctrl and bulk pipes may be 8, 16, 32 or 64 bytes. 1724 * So the alignment should be a multiple of 64. wMaxPacketSize for intr 1725 * pipes is a little different since it only specifies the max to be 1726 * 64 bytes, but as long as an intr transfer is limited to max TD length, 1727 * any alignment can work if the cookie size is larger than max TD length. 1728 * 1729 * Considering the above conditions, 2K alignment is used. 4K alignment 1730 * should also be fine. 1731 */ 1732 static uhci_trans_wrapper_t * 1733 uhci_create_transfer_wrapper( 1734 uhci_state_t *uhcip, 1735 uhci_pipe_private_t *pp, 1736 size_t length, 1737 usb_flags_t usb_flags) 1738 { 1739 size_t real_length; 1740 uhci_trans_wrapper_t *tw; 1741 ddi_device_acc_attr_t dev_attr; 1742 ddi_dma_attr_t dma_attr; 1743 int kmem_flag; 1744 int (*dmamem_wait)(caddr_t); 1745 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 1746 1747 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1748 "uhci_create_transfer_wrapper: length = 0x%lx flags = 0x%x", 1749 length, usb_flags); 1750 1751 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1752 1753 /* isochronous pipe should not call into this function */ 1754 if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_ISOCH) { 1755 1756 return (NULL); 1757 } 1758 1759 /* SLEEP flag should not be used in interrupt context */ 1760 if (servicing_interrupt()) { 1761 kmem_flag = KM_NOSLEEP; 1762 dmamem_wait = DDI_DMA_DONTWAIT; 1763 } else { 1764 kmem_flag = KM_SLEEP; 1765 dmamem_wait = DDI_DMA_SLEEP; 1766 } 1767 1768 /* Allocate space for the transfer wrapper */ 1769 if ((tw = kmem_zalloc(sizeof (uhci_trans_wrapper_t), kmem_flag)) == 1770 NULL) { 1771 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1772 "uhci_create_transfer_wrapper: kmem_alloc failed"); 1773 1774 return (NULL); 1775 } 1776 1777 /* zero-length packet doesn't need to allocate dma memory */ 1778 if (length == 0) { 1779 1780 goto dmadone; 1781 } 1782 1783 /* allow sg lists for transfer wrapper dma memory */ 1784 bcopy(&uhcip->uhci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t)); 1785 dma_attr.dma_attr_sgllen = UHCI_DMA_ATTR_SGLLEN; 1786 dma_attr.dma_attr_align = UHCI_DMA_ATTR_ALIGN; 1787 1788 /* Store the transfer length */ 1789 tw->tw_length = length; 1790 1791 /* Allocate the DMA handle */ 1792 if (ddi_dma_alloc_handle(uhcip->uhci_dip, &dma_attr, dmamem_wait, 1793 0, &tw->tw_dmahandle) != DDI_SUCCESS) { 1794 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1795 "uhci_create_transfer_wrapper: Alloc handle failed"); 1796 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 1797 1798 return (NULL); 1799 } 1800 1801 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 1802 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 1803 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1804 1805 /* Allocate the memory */ 1806 if (ddi_dma_mem_alloc(tw->tw_dmahandle, tw->tw_length, &dev_attr, 1807 DDI_DMA_CONSISTENT, dmamem_wait, NULL, (caddr_t *)&tw->tw_buf, 1808 &real_length, &tw->tw_accesshandle) != DDI_SUCCESS) { 1809 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1810 "uhci_create_transfer_wrapper: dma_mem_alloc fail"); 1811 ddi_dma_free_handle(&tw->tw_dmahandle); 1812 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 1813 1814 return (NULL); 1815 } 1816 1817 ASSERT(real_length >= length); 1818 1819 /* Bind the handle */ 1820 if (ddi_dma_addr_bind_handle(tw->tw_dmahandle, NULL, 1821 (caddr_t)tw->tw_buf, real_length, DDI_DMA_RDWR|DDI_DMA_CONSISTENT, 1822 dmamem_wait, NULL, &tw->tw_cookie, &tw->tw_ncookies) != 1823 DDI_DMA_MAPPED) { 1824 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1825 "uhci_create_transfer_wrapper: Bind handle failed"); 1826 ddi_dma_mem_free(&tw->tw_accesshandle); 1827 ddi_dma_free_handle(&tw->tw_dmahandle); 1828 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 1829 1830 return (NULL); 1831 } 1832 1833 tw->tw_cookie_idx = 0; 1834 tw->tw_dma_offs = 0; 1835 1836 dmadone: 1837 /* 1838 * Only allow one wrapper to be added at a time. Insert the 1839 * new transaction wrapper into the list for this pipe. 1840 */ 1841 if (pp->pp_tw_head == NULL) { 1842 pp->pp_tw_head = tw; 1843 pp->pp_tw_tail = tw; 1844 } else { 1845 pp->pp_tw_tail->tw_next = tw; 1846 pp->pp_tw_tail = tw; 1847 ASSERT(tw->tw_next == NULL); 1848 } 1849 1850 /* Store a back pointer to the pipe private structure */ 1851 tw->tw_pipe_private = pp; 1852 1853 /* Store the transfer type - synchronous or asynchronous */ 1854 tw->tw_flags = usb_flags; 1855 1856 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1857 "uhci_create_transfer_wrapper: tw = 0x%p, ncookies = %u", 1858 (void *)tw, tw->tw_ncookies); 1859 1860 return (tw); 1861 } 1862 1863 1864 /* 1865 * uhci_insert_hc_td: 1866 * Insert a Transfer Descriptor (TD) on an QH. 1867 */ 1868 int 1869 uhci_insert_hc_td( 1870 uhci_state_t *uhcip, 1871 uint32_t buffer_offset, 1872 size_t hcgtd_length, 1873 uhci_pipe_private_t *pp, 1874 uhci_trans_wrapper_t *tw, 1875 uchar_t PID, 1876 usb_req_attrs_t attrs) 1877 { 1878 uhci_td_t *td, *current_dummy; 1879 queue_head_t *qh = pp->pp_qh; 1880 1881 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1882 1883 if ((td = uhci_allocate_td_from_pool(uhcip)) == NULL) { 1884 1885 return (USB_NO_RESOURCES); 1886 } 1887 1888 current_dummy = qh->td_tailp; 1889 1890 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 1891 "uhci_insert_hc_td: td %p, attrs = 0x%x", (void *)td, attrs); 1892 1893 /* 1894 * Fill in the current dummy td and 1895 * add the new dummy to the end. 1896 */ 1897 uhci_fill_in_td(uhcip, td, current_dummy, buffer_offset, 1898 hcgtd_length, pp, PID, attrs, tw); 1899 1900 /* 1901 * Allow HC hardware xfer the td, except interrupt out td. 1902 */ 1903 if ((tw->tw_handle_td != uhci_handle_intr_td) || (PID != PID_OUT)) { 1904 1905 SetTD_status(uhcip, current_dummy, UHCI_TD_ACTIVE); 1906 } 1907 1908 /* Insert this td onto the tw */ 1909 1910 if (tw->tw_hctd_head == NULL) { 1911 ASSERT(tw->tw_hctd_tail == NULL); 1912 tw->tw_hctd_head = current_dummy; 1913 tw->tw_hctd_tail = current_dummy; 1914 } else { 1915 /* Add the td to the end of the list */ 1916 tw->tw_hctd_tail->tw_td_next = current_dummy; 1917 tw->tw_hctd_tail = current_dummy; 1918 } 1919 1920 /* 1921 * Insert the TD on to the QH. When this occurs, 1922 * the Host Controller will see the newly filled in TD 1923 */ 1924 current_dummy->outst_td_next = NULL; 1925 current_dummy->outst_td_prev = uhcip->uhci_outst_tds_tail; 1926 if (uhcip->uhci_outst_tds_head == NULL) { 1927 uhcip->uhci_outst_tds_head = current_dummy; 1928 } else { 1929 uhcip->uhci_outst_tds_tail->outst_td_next = current_dummy; 1930 } 1931 uhcip->uhci_outst_tds_tail = current_dummy; 1932 current_dummy->tw = tw; 1933 1934 return (USB_SUCCESS); 1935 } 1936 1937 1938 /* 1939 * uhci_fill_in_td: 1940 * Fill in the fields of a Transfer Descriptor (TD). 1941 */ 1942 static void 1943 uhci_fill_in_td( 1944 uhci_state_t *uhcip, 1945 uhci_td_t *td, 1946 uhci_td_t *current_dummy, 1947 uint32_t buffer_offset, 1948 size_t length, 1949 uhci_pipe_private_t *pp, 1950 uchar_t PID, 1951 usb_req_attrs_t attrs, 1952 uhci_trans_wrapper_t *tw) 1953 { 1954 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 1955 uint32_t buf_addr; 1956 1957 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 1958 "uhci_fill_in_td: td 0x%p buf_offs 0x%x len 0x%lx " 1959 "attrs 0x%x", (void *)td, buffer_offset, length, attrs); 1960 1961 /* 1962 * If this is an isochronous TD, just return 1963 */ 1964 if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_ISOCH) { 1965 1966 return; 1967 } 1968 1969 /* The maximum transfer length of UHCI cannot exceed 0x500 bytes */ 1970 ASSERT(length <= UHCI_MAX_TD_XFER_SIZE); 1971 1972 bzero((char *)td, sizeof (uhci_td_t)); /* Clear the TD */ 1973 SetTD32(uhcip, current_dummy->link_ptr, TD_PADDR(td)); 1974 1975 if (attrs & USB_ATTRS_SHORT_XFER_OK) { 1976 SetTD_spd(uhcip, current_dummy, 1); 1977 } 1978 1979 mutex_enter(&ph->p_usba_device->usb_mutex); 1980 if (ph->p_usba_device->usb_port_status == USBA_LOW_SPEED_DEV) { 1981 SetTD_ls(uhcip, current_dummy, LOW_SPEED_DEVICE); 1982 } 1983 1984 SetTD_c_err(uhcip, current_dummy, UHCI_MAX_ERR_COUNT); 1985 SetTD_mlen(uhcip, current_dummy, 1986 (length == 0) ? ZERO_LENGTH : (length - 1)); 1987 SetTD_dtogg(uhcip, current_dummy, pp->pp_data_toggle); 1988 1989 /* Adjust the data toggle bit */ 1990 ADJ_DATA_TOGGLE(pp); 1991 1992 SetTD_devaddr(uhcip, current_dummy, ph->p_usba_device->usb_addr); 1993 SetTD_endpt(uhcip, current_dummy, 1994 ph->p_ep.bEndpointAddress & END_POINT_ADDRESS_MASK); 1995 SetTD_PID(uhcip, current_dummy, PID); 1996 SetTD_ioc(uhcip, current_dummy, INTERRUPT_ON_COMPLETION); 1997 1998 buf_addr = uhci_get_tw_paddr_by_offs(uhcip, buffer_offset, length, tw); 1999 SetTD32(uhcip, current_dummy->buffer_address, buf_addr); 2000 2001 td->qh_td_prev = current_dummy; 2002 current_dummy->qh_td_prev = NULL; 2003 pp->pp_qh->td_tailp = td; 2004 mutex_exit(&ph->p_usba_device->usb_mutex); 2005 } 2006 2007 /* 2008 * uhci_get_tw_paddr_by_offs: 2009 * Walk through the DMA cookies of a TW buffer to retrieve 2010 * the device address used for a TD. 2011 * 2012 * buffer_offset - the starting offset into the TW buffer, where the 2013 * TD should transfer from. When a TW has more than 2014 * one TD, the TDs must be filled in increasing order. 2015 */ 2016 static uint32_t 2017 uhci_get_tw_paddr_by_offs( 2018 uhci_state_t *uhcip, 2019 uint32_t buffer_offset, 2020 size_t length, 2021 uhci_trans_wrapper_t *tw) 2022 { 2023 uint32_t buf_addr; 2024 int rem_len; 2025 2026 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2027 "uhci_get_tw_paddr_by_offs: buf_offs 0x%x len 0x%lx", 2028 buffer_offset, length); 2029 2030 /* 2031 * TDs must be filled in increasing DMA offset order. 2032 * tw_dma_offs is initialized to be 0 at TW creation and 2033 * is only increased in this function. 2034 */ 2035 ASSERT(length == 0 || buffer_offset >= tw->tw_dma_offs); 2036 2037 if (length == 0) { 2038 buf_addr = 0; 2039 2040 return (buf_addr); 2041 } 2042 2043 /* 2044 * Advance to the next DMA cookie until finding the cookie 2045 * that buffer_offset falls in. 2046 * It is very likely this loop will never repeat more than 2047 * once. It is here just to accommodate the case buffer_offset 2048 * is increased by multiple cookies during two consecutive 2049 * calls into this function. In that case, the interim DMA 2050 * buffer is allowed to be skipped. 2051 */ 2052 while ((tw->tw_dma_offs + tw->tw_cookie.dmac_size) <= 2053 buffer_offset) { 2054 /* 2055 * tw_dma_offs always points to the starting offset 2056 * of a cookie 2057 */ 2058 tw->tw_dma_offs += tw->tw_cookie.dmac_size; 2059 ddi_dma_nextcookie(tw->tw_dmahandle, &tw->tw_cookie); 2060 tw->tw_cookie_idx++; 2061 ASSERT(tw->tw_cookie_idx < tw->tw_ncookies); 2062 } 2063 2064 /* 2065 * Counting the remained buffer length to be filled in 2066 * the TDs for current DMA cookie 2067 */ 2068 rem_len = (tw->tw_dma_offs + tw->tw_cookie.dmac_size) - 2069 buffer_offset; 2070 2071 /* Calculate the beginning address of the buffer */ 2072 ASSERT(length <= rem_len); 2073 buf_addr = (buffer_offset - tw->tw_dma_offs) + 2074 tw->tw_cookie.dmac_address; 2075 2076 USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2077 "uhci_get_tw_paddr_by_offs: dmac_addr 0x%x dmac_size " 2078 "0x%lx idx %d", buf_addr, tw->tw_cookie.dmac_size, 2079 tw->tw_cookie_idx); 2080 2081 return (buf_addr); 2082 } 2083 2084 2085 /* 2086 * uhci_modify_td_active_bits: 2087 * Sets active bit in all the tds of QH to INACTIVE so that 2088 * the HC stops processing the TD's related to the QH. 2089 */ 2090 void 2091 uhci_modify_td_active_bits( 2092 uhci_state_t *uhcip, 2093 uhci_pipe_private_t *pp) 2094 { 2095 uhci_td_t *td_head; 2096 usb_ep_descr_t *ept = &pp->pp_pipe_handle->p_ep; 2097 uhci_trans_wrapper_t *tw_head = pp->pp_tw_head; 2098 2099 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2100 "uhci_modify_td_active_bits: tw head %p", (void *)tw_head); 2101 2102 while (tw_head != NULL) { 2103 tw_head->tw_claim = UHCI_MODIFY_TD_BITS_CLAIMED; 2104 td_head = tw_head->tw_hctd_head; 2105 2106 while (td_head) { 2107 if (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_ISOCH) { 2108 SetTD_status(uhcip, td_head, 2109 GetTD_status(uhcip, td_head) & TD_INACTIVE); 2110 } else { 2111 SetTD32(uhcip, td_head->link_ptr, 2112 GetTD32(uhcip, td_head->link_ptr) | 2113 HC_END_OF_LIST); 2114 } 2115 2116 td_head = td_head->tw_td_next; 2117 } 2118 tw_head = tw_head->tw_next; 2119 } 2120 } 2121 2122 2123 /* 2124 * uhci_insert_ctrl_td: 2125 * Create a TD and a data buffer for a control Queue Head. 2126 */ 2127 int 2128 uhci_insert_ctrl_td( 2129 uhci_state_t *uhcip, 2130 usba_pipe_handle_data_t *ph, 2131 usb_ctrl_req_t *ctrl_reqp, 2132 usb_flags_t flags) 2133 { 2134 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 2135 uhci_trans_wrapper_t *tw; 2136 size_t ctrl_buf_size; 2137 2138 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2139 "uhci_insert_ctrl_td: timeout: 0x%x", ctrl_reqp->ctrl_timeout); 2140 2141 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2142 2143 /* 2144 * If we have a control data phase, make the data buffer start 2145 * on the next 64-byte boundary so as to ensure the DMA cookie 2146 * can fit in the multiple TDs. The buffer in the range of 2147 * [SETUP_SIZE, UHCI_CTRL_EPT_MAX_SIZE) is just for padding 2148 * and not to be transferred. 2149 */ 2150 if (ctrl_reqp->ctrl_wLength) { 2151 ctrl_buf_size = UHCI_CTRL_EPT_MAX_SIZE + 2152 ctrl_reqp->ctrl_wLength; 2153 } else { 2154 ctrl_buf_size = SETUP_SIZE; 2155 } 2156 2157 /* Allocate a transaction wrapper */ 2158 if ((tw = uhci_create_transfer_wrapper(uhcip, pp, 2159 ctrl_buf_size, flags)) == NULL) { 2160 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2161 "uhci_insert_ctrl_td: TW allocation failed"); 2162 2163 return (USB_NO_RESOURCES); 2164 } 2165 2166 pp->pp_data_toggle = 0; 2167 2168 tw->tw_curr_xfer_reqp = (usb_opaque_t)ctrl_reqp; 2169 tw->tw_bytes_xfered = 0; 2170 tw->tw_bytes_pending = ctrl_reqp->ctrl_wLength; 2171 tw->tw_timeout_cnt = max(UHCI_CTRL_TIMEOUT, ctrl_reqp->ctrl_timeout); 2172 2173 /* 2174 * Initialize the callback and any callback 2175 * data for when the td completes. 2176 */ 2177 tw->tw_handle_td = uhci_handle_ctrl_td; 2178 tw->tw_handle_callback_value = NULL; 2179 2180 if ((uhci_create_setup_pkt(uhcip, pp, tw)) != USB_SUCCESS) { 2181 tw->tw_ctrl_state = 0; 2182 2183 /* free the transfer wrapper */ 2184 uhci_deallocate_tw(uhcip, pp, tw); 2185 2186 return (USB_NO_RESOURCES); 2187 } 2188 2189 tw->tw_ctrl_state = SETUP; 2190 2191 return (USB_SUCCESS); 2192 } 2193 2194 2195 /* 2196 * uhci_create_setup_pkt: 2197 * create a setup packet to initiate a control transfer. 2198 * 2199 * OHCI driver has seen the case where devices fail if there is 2200 * more than one control transfer to the device within a frame. 2201 * So, the UHCI ensures that only one TD will be put on the control 2202 * pipe to one device (to be consistent with OHCI driver). 2203 */ 2204 static int 2205 uhci_create_setup_pkt( 2206 uhci_state_t *uhcip, 2207 uhci_pipe_private_t *pp, 2208 uhci_trans_wrapper_t *tw) 2209 { 2210 int sdata; 2211 usb_ctrl_req_t *req = (usb_ctrl_req_t *)tw->tw_curr_xfer_reqp; 2212 2213 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2214 "uhci_create_setup_pkt: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%p", 2215 req->ctrl_bmRequestType, req->ctrl_bRequest, req->ctrl_wValue, 2216 req->ctrl_wIndex, req->ctrl_wLength, (void *)req->ctrl_data); 2217 2218 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2219 ASSERT(tw != NULL); 2220 2221 /* Create the first four bytes of the setup packet */ 2222 sdata = (req->ctrl_bmRequestType | (req->ctrl_bRequest << 8) | 2223 (req->ctrl_wValue << 16)); 2224 ddi_put32(tw->tw_accesshandle, (uint_t *)tw->tw_buf, sdata); 2225 2226 /* Create the second four bytes */ 2227 sdata = (uint32_t)(req->ctrl_wIndex | (req->ctrl_wLength << 16)); 2228 ddi_put32(tw->tw_accesshandle, 2229 (uint_t *)(tw->tw_buf + sizeof (uint_t)), sdata); 2230 2231 /* 2232 * The TD's are placed on the QH one at a time. 2233 * Once this TD is placed on the done list, the 2234 * data or status phase TD will be enqueued. 2235 */ 2236 if ((uhci_insert_hc_td(uhcip, 0, SETUP_SIZE, 2237 pp, tw, PID_SETUP, req->ctrl_attributes)) != USB_SUCCESS) { 2238 2239 return (USB_NO_RESOURCES); 2240 } 2241 2242 USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2243 "Create_setup: pp = 0x%p, attrs = 0x%x", (void *)pp, 2244 req->ctrl_attributes); 2245 2246 /* 2247 * If this control transfer has a data phase, record the 2248 * direction. If the data phase is an OUT transaction , 2249 * copy the data into the buffer of the transfer wrapper. 2250 */ 2251 if (req->ctrl_wLength != 0) { 2252 /* There is a data stage. Find the direction */ 2253 if (req->ctrl_bmRequestType & USB_DEV_REQ_DEV_TO_HOST) { 2254 tw->tw_direction = PID_IN; 2255 } else { 2256 tw->tw_direction = PID_OUT; 2257 2258 /* Copy the data into the buffer */ 2259 ddi_rep_put8(tw->tw_accesshandle, 2260 req->ctrl_data->b_rptr, 2261 (uint8_t *)(tw->tw_buf + UHCI_CTRL_EPT_MAX_SIZE), 2262 req->ctrl_wLength, 2263 DDI_DEV_AUTOINCR); 2264 } 2265 } 2266 2267 return (USB_SUCCESS); 2268 } 2269 2270 2271 /* 2272 * uhci_create_stats: 2273 * Allocate and initialize the uhci kstat structures 2274 */ 2275 void 2276 uhci_create_stats(uhci_state_t *uhcip) 2277 { 2278 int i; 2279 char kstatname[KSTAT_STRLEN]; 2280 char *usbtypes[USB_N_COUNT_KSTATS] = 2281 {"ctrl", "isoch", "bulk", "intr"}; 2282 uint_t instance = uhcip->uhci_instance; 2283 const char *dname = ddi_driver_name(uhcip->uhci_dip); 2284 uhci_intrs_stats_t *isp; 2285 2286 if (UHCI_INTRS_STATS(uhcip) == NULL) { 2287 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,intrs", 2288 dname, instance); 2289 UHCI_INTRS_STATS(uhcip) = kstat_create("usba", instance, 2290 kstatname, "usb_interrupts", KSTAT_TYPE_NAMED, 2291 sizeof (uhci_intrs_stats_t) / sizeof (kstat_named_t), 2292 KSTAT_FLAG_PERSISTENT); 2293 2294 if (UHCI_INTRS_STATS(uhcip) != NULL) { 2295 isp = UHCI_INTRS_STATS_DATA(uhcip); 2296 kstat_named_init(&isp->uhci_intrs_hc_halted, 2297 "HC Halted", KSTAT_DATA_UINT64); 2298 kstat_named_init(&isp->uhci_intrs_hc_process_err, 2299 "HC Process Errors", KSTAT_DATA_UINT64); 2300 kstat_named_init(&isp->uhci_intrs_host_sys_err, 2301 "Host Sys Errors", KSTAT_DATA_UINT64); 2302 kstat_named_init(&isp->uhci_intrs_resume_detected, 2303 "Resume Detected", KSTAT_DATA_UINT64); 2304 kstat_named_init(&isp->uhci_intrs_usb_err_intr, 2305 "USB Error", KSTAT_DATA_UINT64); 2306 kstat_named_init(&isp->uhci_intrs_usb_intr, 2307 "USB Interrupts", KSTAT_DATA_UINT64); 2308 kstat_named_init(&isp->uhci_intrs_total, 2309 "Total Interrupts", KSTAT_DATA_UINT64); 2310 kstat_named_init(&isp->uhci_intrs_not_claimed, 2311 "Not Claimed", KSTAT_DATA_UINT64); 2312 2313 UHCI_INTRS_STATS(uhcip)->ks_private = uhcip; 2314 UHCI_INTRS_STATS(uhcip)->ks_update = nulldev; 2315 kstat_install(UHCI_INTRS_STATS(uhcip)); 2316 } 2317 } 2318 2319 if (UHCI_TOTAL_STATS(uhcip) == NULL) { 2320 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,total", 2321 dname, instance); 2322 UHCI_TOTAL_STATS(uhcip) = kstat_create("usba", instance, 2323 kstatname, "usb_byte_count", KSTAT_TYPE_IO, 1, 2324 KSTAT_FLAG_PERSISTENT); 2325 2326 if (UHCI_TOTAL_STATS(uhcip) != NULL) { 2327 kstat_install(UHCI_TOTAL_STATS(uhcip)); 2328 } 2329 } 2330 2331 for (i = 0; i < USB_N_COUNT_KSTATS; i++) { 2332 if (uhcip->uhci_count_stats[i] == NULL) { 2333 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,%s", 2334 dname, instance, usbtypes[i]); 2335 uhcip->uhci_count_stats[i] = kstat_create("usba", 2336 instance, kstatname, "usb_byte_count", 2337 KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 2338 2339 if (uhcip->uhci_count_stats[i] != NULL) { 2340 kstat_install(uhcip->uhci_count_stats[i]); 2341 } 2342 } 2343 } 2344 } 2345 2346 2347 /* 2348 * uhci_destroy_stats: 2349 * Clean up uhci kstat structures 2350 */ 2351 void 2352 uhci_destroy_stats(uhci_state_t *uhcip) 2353 { 2354 int i; 2355 2356 if (UHCI_INTRS_STATS(uhcip)) { 2357 kstat_delete(UHCI_INTRS_STATS(uhcip)); 2358 UHCI_INTRS_STATS(uhcip) = NULL; 2359 } 2360 2361 if (UHCI_TOTAL_STATS(uhcip)) { 2362 kstat_delete(UHCI_TOTAL_STATS(uhcip)); 2363 UHCI_TOTAL_STATS(uhcip) = NULL; 2364 } 2365 2366 for (i = 0; i < USB_N_COUNT_KSTATS; i++) { 2367 if (uhcip->uhci_count_stats[i]) { 2368 kstat_delete(uhcip->uhci_count_stats[i]); 2369 uhcip->uhci_count_stats[i] = NULL; 2370 } 2371 } 2372 } 2373 2374 2375 void 2376 uhci_do_intrs_stats(uhci_state_t *uhcip, int val) 2377 { 2378 if (UHCI_INTRS_STATS(uhcip) == NULL) { 2379 2380 return; 2381 } 2382 2383 UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_total.value.ui64++; 2384 switch (val) { 2385 case USBSTS_REG_HC_HALTED: 2386 UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_hc_halted.value.ui64++; 2387 break; 2388 case USBSTS_REG_HC_PROCESS_ERR: 2389 UHCI_INTRS_STATS_DATA(uhcip)-> 2390 uhci_intrs_hc_process_err.value.ui64++; 2391 break; 2392 case USBSTS_REG_HOST_SYS_ERR: 2393 UHCI_INTRS_STATS_DATA(uhcip)-> 2394 uhci_intrs_host_sys_err.value.ui64++; 2395 break; 2396 case USBSTS_REG_RESUME_DETECT: 2397 UHCI_INTRS_STATS_DATA(uhcip)-> 2398 uhci_intrs_resume_detected.value.ui64++; 2399 break; 2400 case USBSTS_REG_USB_ERR_INTR: 2401 UHCI_INTRS_STATS_DATA(uhcip)-> 2402 uhci_intrs_usb_err_intr.value.ui64++; 2403 break; 2404 case USBSTS_REG_USB_INTR: 2405 UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_usb_intr.value.ui64++; 2406 break; 2407 default: 2408 UHCI_INTRS_STATS_DATA(uhcip)-> 2409 uhci_intrs_not_claimed.value.ui64++; 2410 break; 2411 } 2412 } 2413 2414 2415 void 2416 uhci_do_byte_stats(uhci_state_t *uhcip, size_t len, uint8_t attr, uint8_t addr) 2417 { 2418 uint8_t type = attr & USB_EP_ATTR_MASK; 2419 uint8_t dir = addr & USB_EP_DIR_MASK; 2420 2421 switch (dir) { 2422 case USB_EP_DIR_IN: 2423 UHCI_TOTAL_STATS_DATA(uhcip)->reads++; 2424 UHCI_TOTAL_STATS_DATA(uhcip)->nread += len; 2425 switch (type) { 2426 case USB_EP_ATTR_CONTROL: 2427 UHCI_CTRL_STATS(uhcip)->reads++; 2428 UHCI_CTRL_STATS(uhcip)->nread += len; 2429 break; 2430 case USB_EP_ATTR_BULK: 2431 UHCI_BULK_STATS(uhcip)->reads++; 2432 UHCI_BULK_STATS(uhcip)->nread += len; 2433 break; 2434 case USB_EP_ATTR_INTR: 2435 UHCI_INTR_STATS(uhcip)->reads++; 2436 UHCI_INTR_STATS(uhcip)->nread += len; 2437 break; 2438 case USB_EP_ATTR_ISOCH: 2439 UHCI_ISOC_STATS(uhcip)->reads++; 2440 UHCI_ISOC_STATS(uhcip)->nread += len; 2441 break; 2442 } 2443 break; 2444 case USB_EP_DIR_OUT: 2445 UHCI_TOTAL_STATS_DATA(uhcip)->writes++; 2446 UHCI_TOTAL_STATS_DATA(uhcip)->nwritten += len; 2447 switch (type) { 2448 case USB_EP_ATTR_CONTROL: 2449 UHCI_CTRL_STATS(uhcip)->writes++; 2450 UHCI_CTRL_STATS(uhcip)->nwritten += len; 2451 break; 2452 case USB_EP_ATTR_BULK: 2453 UHCI_BULK_STATS(uhcip)->writes++; 2454 UHCI_BULK_STATS(uhcip)->nwritten += len; 2455 break; 2456 case USB_EP_ATTR_INTR: 2457 UHCI_INTR_STATS(uhcip)->writes++; 2458 UHCI_INTR_STATS(uhcip)->nwritten += len; 2459 break; 2460 case USB_EP_ATTR_ISOCH: 2461 UHCI_ISOC_STATS(uhcip)->writes++; 2462 UHCI_ISOC_STATS(uhcip)->nwritten += len; 2463 break; 2464 } 2465 break; 2466 } 2467 } 2468 2469 2470 /* 2471 * uhci_free_tw: 2472 * Free the Transfer Wrapper (TW). 2473 */ 2474 void 2475 uhci_free_tw(uhci_state_t *uhcip, uhci_trans_wrapper_t *tw) 2476 { 2477 int rval, i; 2478 2479 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, "uhci_free_tw:"); 2480 2481 ASSERT(tw != NULL); 2482 2483 if (tw->tw_isoc_strtlen > 0) { 2484 ASSERT(tw->tw_isoc_bufs != NULL); 2485 for (i = 0; i < tw->tw_ncookies; i++) { 2486 rval = ddi_dma_unbind_handle( 2487 tw->tw_isoc_bufs[i].dma_handle); 2488 ASSERT(rval == USB_SUCCESS); 2489 ddi_dma_mem_free(&tw->tw_isoc_bufs[i].mem_handle); 2490 ddi_dma_free_handle(&tw->tw_isoc_bufs[i].dma_handle); 2491 } 2492 kmem_free(tw->tw_isoc_bufs, tw->tw_isoc_strtlen); 2493 } else if (tw->tw_dmahandle != NULL) { 2494 rval = ddi_dma_unbind_handle(tw->tw_dmahandle); 2495 ASSERT(rval == DDI_SUCCESS); 2496 2497 ddi_dma_mem_free(&tw->tw_accesshandle); 2498 ddi_dma_free_handle(&tw->tw_dmahandle); 2499 } 2500 2501 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 2502 } 2503 2504 2505 /* 2506 * uhci_deallocate_tw: 2507 * Deallocate of a Transaction Wrapper (TW) and this involves 2508 * the freeing of DMA resources. 2509 */ 2510 void 2511 uhci_deallocate_tw(uhci_state_t *uhcip, 2512 uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw) 2513 { 2514 uhci_trans_wrapper_t *head; 2515 2516 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2517 "uhci_deallocate_tw:"); 2518 2519 /* 2520 * If the transfer wrapper has no Host Controller (HC) 2521 * Transfer Descriptors (TD) associated with it, then 2522 * remove the transfer wrapper. The transfers are done 2523 * in FIFO order, so this should be the first transfer 2524 * wrapper on the list. 2525 */ 2526 if (tw->tw_hctd_head != NULL) { 2527 ASSERT(tw->tw_hctd_tail != NULL); 2528 2529 return; 2530 } 2531 2532 ASSERT(tw->tw_hctd_tail == NULL); 2533 ASSERT(pp->pp_tw_head != NULL); 2534 2535 /* 2536 * If pp->pp_tw_head is NULL, set the tail also to NULL. 2537 */ 2538 head = pp->pp_tw_head; 2539 2540 if (head == tw) { 2541 pp->pp_tw_head = head->tw_next; 2542 if (pp->pp_tw_head == NULL) { 2543 pp->pp_tw_tail = NULL; 2544 } 2545 } else { 2546 while (head->tw_next != tw) 2547 head = head->tw_next; 2548 head->tw_next = tw->tw_next; 2549 if (tw->tw_next == NULL) { 2550 pp->pp_tw_tail = head; 2551 } 2552 } 2553 uhci_free_tw(uhcip, tw); 2554 } 2555 2556 2557 void 2558 uhci_delete_td(uhci_state_t *uhcip, uhci_td_t *td) 2559 { 2560 uhci_td_t *tmp_td; 2561 uhci_trans_wrapper_t *tw = td->tw; 2562 2563 if ((td->outst_td_next == NULL) && (td->outst_td_prev == NULL)) { 2564 uhcip->uhci_outst_tds_head = NULL; 2565 uhcip->uhci_outst_tds_tail = NULL; 2566 } else if (td->outst_td_next == NULL) { 2567 td->outst_td_prev->outst_td_next = NULL; 2568 uhcip->uhci_outst_tds_tail = td->outst_td_prev; 2569 } else if (td->outst_td_prev == NULL) { 2570 td->outst_td_next->outst_td_prev = NULL; 2571 uhcip->uhci_outst_tds_head = td->outst_td_next; 2572 } else { 2573 td->outst_td_prev->outst_td_next = td->outst_td_next; 2574 td->outst_td_next->outst_td_prev = td->outst_td_prev; 2575 } 2576 2577 tmp_td = tw->tw_hctd_head; 2578 2579 if (tmp_td != td) { 2580 while (tmp_td->tw_td_next != td) { 2581 tmp_td = tmp_td->tw_td_next; 2582 } 2583 ASSERT(tmp_td); 2584 tmp_td->tw_td_next = td->tw_td_next; 2585 if (td->tw_td_next == NULL) { 2586 tw->tw_hctd_tail = tmp_td; 2587 } 2588 } else { 2589 tw->tw_hctd_head = tw->tw_hctd_head->tw_td_next; 2590 if (tw->tw_hctd_head == NULL) { 2591 tw->tw_hctd_tail = NULL; 2592 } 2593 } 2594 2595 td->flag = TD_FLAG_FREE; 2596 } 2597 2598 2599 void 2600 uhci_remove_tds_tws( 2601 uhci_state_t *uhcip, 2602 usba_pipe_handle_data_t *ph) 2603 { 2604 usb_opaque_t curr_reqp; 2605 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 2606 usb_ep_descr_t *ept = &pp->pp_pipe_handle->p_ep; 2607 uhci_trans_wrapper_t *tw_tmp; 2608 uhci_trans_wrapper_t *tw_head = pp->pp_tw_head; 2609 2610 while (tw_head != NULL) { 2611 tw_tmp = tw_head; 2612 tw_head = tw_head->tw_next; 2613 2614 curr_reqp = tw_tmp->tw_curr_xfer_reqp; 2615 if (curr_reqp) { 2616 /* do this for control/bulk/intr */ 2617 if ((tw_tmp->tw_direction == PID_IN) && 2618 (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_INTR)) { 2619 uhci_deallocate_periodic_in_resource(uhcip, 2620 pp, tw_tmp); 2621 } else { 2622 uhci_hcdi_callback(uhcip, pp, 2623 pp->pp_pipe_handle, tw_tmp, USB_CR_FLUSHED); 2624 } 2625 } /* end of curr_reqp */ 2626 2627 if (tw_tmp->tw_claim != UHCI_MODIFY_TD_BITS_CLAIMED) { 2628 continue; 2629 } 2630 2631 while (tw_tmp->tw_hctd_head != NULL) { 2632 uhci_delete_td(uhcip, tw_tmp->tw_hctd_head); 2633 } 2634 2635 uhci_deallocate_tw(uhcip, pp, tw_tmp); 2636 } 2637 } 2638 2639 2640 /* 2641 * uhci_remove_qh: 2642 * Remove the Queue Head from the Host Controller's 2643 * appropriate QH list. 2644 */ 2645 void 2646 uhci_remove_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 2647 { 2648 uhci_td_t *dummy_td; 2649 2650 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2651 2652 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2653 "uhci_remove_qh:"); 2654 2655 dummy_td = pp->pp_qh->td_tailp; 2656 dummy_td->flag = TD_FLAG_FREE; 2657 2658 switch (UHCI_XFER_TYPE(&pp->pp_pipe_handle->p_ep)) { 2659 case USB_EP_ATTR_CONTROL: 2660 uhci_remove_ctrl_qh(uhcip, pp); 2661 break; 2662 case USB_EP_ATTR_BULK: 2663 uhci_remove_bulk_qh(uhcip, pp); 2664 break; 2665 case USB_EP_ATTR_INTR: 2666 uhci_remove_intr_qh(uhcip, pp); 2667 break; 2668 } 2669 } 2670 2671 2672 static void 2673 uhci_remove_intr_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 2674 { 2675 queue_head_t *qh = pp->pp_qh; 2676 queue_head_t *next_lattice_qh = 2677 QH_VADDR(GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK); 2678 2679 qh->prev_qh->link_ptr = qh->link_ptr; 2680 next_lattice_qh->prev_qh = qh->prev_qh; 2681 qh->qh_flag = QUEUE_HEAD_FLAG_FREE; 2682 2683 } 2684 2685 /* 2686 * uhci_remove_bulk_qh: 2687 * Remove a bulk QH from the Host Controller's QH list. There may be a 2688 * loop for bulk QHs, we must care about this while removing a bulk QH. 2689 */ 2690 static void 2691 uhci_remove_bulk_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 2692 { 2693 queue_head_t *qh = pp->pp_qh; 2694 queue_head_t *next_lattice_qh; 2695 uint32_t paddr; 2696 2697 paddr = (GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK); 2698 next_lattice_qh = (qh == uhcip->uhci_bulk_xfers_q_tail) ? 2699 0 : QH_VADDR(paddr); 2700 2701 if ((qh == uhcip->uhci_bulk_xfers_q_tail) && 2702 (qh->prev_qh == uhcip->uhci_bulk_xfers_q_head)) { 2703 SetQH32(uhcip, qh->prev_qh->link_ptr, HC_END_OF_LIST); 2704 } else { 2705 qh->prev_qh->link_ptr = qh->link_ptr; 2706 } 2707 2708 if (next_lattice_qh == NULL) { 2709 uhcip->uhci_bulk_xfers_q_tail = qh->prev_qh; 2710 } else { 2711 next_lattice_qh->prev_qh = qh->prev_qh; 2712 } 2713 2714 qh->qh_flag = QUEUE_HEAD_FLAG_FREE; 2715 2716 } 2717 2718 2719 static void 2720 uhci_remove_ctrl_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 2721 { 2722 queue_head_t *qh = pp->pp_qh; 2723 queue_head_t *next_lattice_qh = 2724 QH_VADDR(GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK); 2725 2726 qh->prev_qh->link_ptr = qh->link_ptr; 2727 if (next_lattice_qh->prev_qh != NULL) { 2728 next_lattice_qh->prev_qh = qh->prev_qh; 2729 } else { 2730 uhcip->uhci_ctrl_xfers_q_tail = qh->prev_qh; 2731 } 2732 2733 qh->qh_flag = QUEUE_HEAD_FLAG_FREE; 2734 } 2735 2736 2737 /* 2738 * uhci_allocate_td_from_pool: 2739 * Allocate a Transfer Descriptor (TD) from the TD buffer pool. 2740 */ 2741 static uhci_td_t * 2742 uhci_allocate_td_from_pool(uhci_state_t *uhcip) 2743 { 2744 int index; 2745 uhci_td_t *td; 2746 2747 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2748 2749 /* 2750 * Search for a blank Transfer Descriptor (TD) 2751 * in the TD buffer pool. 2752 */ 2753 for (index = 0; index < uhci_td_pool_size; index ++) { 2754 if (uhcip->uhci_td_pool_addr[index].flag == TD_FLAG_FREE) { 2755 break; 2756 } 2757 } 2758 2759 if (index == uhci_td_pool_size) { 2760 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2761 "uhci_allocate_td_from_pool: TD exhausted"); 2762 2763 return (NULL); 2764 } 2765 2766 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2767 "uhci_allocate_td_from_pool: Allocated %d", index); 2768 2769 /* Create a new dummy for the end of the TD list */ 2770 td = &uhcip->uhci_td_pool_addr[index]; 2771 2772 /* Mark the newly allocated TD as a dummy */ 2773 td->flag = TD_FLAG_DUMMY; 2774 td->qh_td_prev = NULL; 2775 2776 return (td); 2777 } 2778 2779 2780 /* 2781 * uhci_insert_bulk_td: 2782 */ 2783 int 2784 uhci_insert_bulk_td( 2785 uhci_state_t *uhcip, 2786 usba_pipe_handle_data_t *ph, 2787 usb_bulk_req_t *req, 2788 usb_flags_t flags) 2789 { 2790 size_t length; 2791 uint_t mps; /* MaxPacketSize */ 2792 uint_t num_bulk_tds, i, j; 2793 uint32_t buf_offs; 2794 uhci_td_t *bulk_td_ptr; 2795 uhci_td_t *current_dummy, *tmp_td; 2796 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 2797 uhci_trans_wrapper_t *tw; 2798 uhci_bulk_isoc_xfer_t *bulk_xfer_info; 2799 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 2800 2801 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2802 "uhci_insert_bulk_td: req: 0x%p, flags = 0x%x", (void *)req, flags); 2803 2804 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2805 2806 /* 2807 * Create transfer wrapper 2808 */ 2809 if ((tw = uhci_create_transfer_wrapper(uhcip, pp, req->bulk_len, 2810 flags)) == NULL) { 2811 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2812 "uhci_insert_bulk_td: TW allocation failed"); 2813 2814 return (USB_NO_RESOURCES); 2815 } 2816 2817 tw->tw_bytes_xfered = 0; 2818 tw->tw_bytes_pending = req->bulk_len; 2819 tw->tw_handle_td = uhci_handle_bulk_td; 2820 tw->tw_handle_callback_value = (usb_opaque_t)req->bulk_data; 2821 tw->tw_timeout_cnt = req->bulk_timeout; 2822 tw->tw_data = req->bulk_data; 2823 tw->tw_curr_xfer_reqp = (usb_opaque_t)req; 2824 2825 /* Get the bulk pipe direction */ 2826 tw->tw_direction = (UHCI_XFER_DIR(&ph->p_ep) == USB_EP_DIR_OUT) ? 2827 PID_OUT : PID_IN; 2828 2829 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2830 "uhci_insert_bulk_td: direction: 0x%x", tw->tw_direction); 2831 2832 /* If the DATA OUT, copy the data into transfer buffer. */ 2833 if (tw->tw_direction == PID_OUT) { 2834 if (req->bulk_len) { 2835 ASSERT(req->bulk_data != NULL); 2836 2837 /* Copy the data into the message */ 2838 ddi_rep_put8(tw->tw_accesshandle, 2839 req->bulk_data->b_rptr, 2840 (uint8_t *)tw->tw_buf, 2841 req->bulk_len, DDI_DEV_AUTOINCR); 2842 } 2843 } 2844 2845 /* Get the max packet size. */ 2846 length = mps = pp->pp_pipe_handle->p_ep.wMaxPacketSize; 2847 2848 /* 2849 * Calculate number of TD's to insert in the current frame interval. 2850 * Max number TD's allowed (driver implementation) is 128 2851 * in one frame interval. Once all the TD's are completed 2852 * then the remaining TD's will be inserted into the lattice 2853 * in the uhci_handle_bulk_td(). 2854 */ 2855 if ((tw->tw_bytes_pending / mps) >= MAX_NUM_BULK_TDS_PER_XFER) { 2856 num_bulk_tds = MAX_NUM_BULK_TDS_PER_XFER; 2857 } else { 2858 num_bulk_tds = (tw->tw_bytes_pending / mps); 2859 2860 if (tw->tw_bytes_pending % mps || tw->tw_bytes_pending == 0) { 2861 num_bulk_tds++; 2862 length = (tw->tw_bytes_pending % mps); 2863 } 2864 } 2865 2866 /* 2867 * Allocate memory for the bulk xfer information structure 2868 */ 2869 if ((bulk_xfer_info = kmem_zalloc( 2870 sizeof (uhci_bulk_isoc_xfer_t), KM_NOSLEEP)) == NULL) { 2871 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2872 "uhci_insert_bulk_td: kmem_zalloc failed"); 2873 2874 /* Free the transfer wrapper */ 2875 uhci_deallocate_tw(uhcip, pp, tw); 2876 2877 return (USB_FAILURE); 2878 } 2879 2880 /* Allocate memory for the bulk TD's */ 2881 if (uhci_alloc_bulk_isoc_tds(uhcip, num_bulk_tds, bulk_xfer_info) != 2882 USB_SUCCESS) { 2883 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2884 "uhci_insert_bulk_td: alloc_bulk_isoc_tds failed"); 2885 2886 kmem_free(bulk_xfer_info, sizeof (uhci_bulk_isoc_xfer_t)); 2887 2888 /* Free the transfer wrapper */ 2889 uhci_deallocate_tw(uhcip, pp, tw); 2890 2891 return (USB_FAILURE); 2892 } 2893 2894 td_pool_ptr = &bulk_xfer_info->td_pools[0]; 2895 bulk_td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 2896 bulk_td_ptr[0].qh_td_prev = NULL; 2897 current_dummy = pp->pp_qh->td_tailp; 2898 buf_offs = 0; 2899 pp->pp_qh->bulk_xfer_info = bulk_xfer_info; 2900 2901 /* Fill up all the bulk TD's */ 2902 for (i = 0; i < bulk_xfer_info->num_pools; i++) { 2903 for (j = 0; j < (td_pool_ptr->num_tds - 1); j++) { 2904 uhci_fill_in_bulk_isoc_td(uhcip, &bulk_td_ptr[j], 2905 &bulk_td_ptr[j+1], BULKTD_PADDR(td_pool_ptr, 2906 &bulk_td_ptr[j+1]), ph, buf_offs, mps, tw); 2907 buf_offs += mps; 2908 } 2909 2910 /* fill in the last TD */ 2911 if (i == (bulk_xfer_info->num_pools - 1)) { 2912 uhci_fill_in_bulk_isoc_td(uhcip, &bulk_td_ptr[j], 2913 current_dummy, TD_PADDR(current_dummy), 2914 ph, buf_offs, length, tw); 2915 } else { 2916 /* fill in the TD at the tail of a pool */ 2917 tmp_td = &bulk_td_ptr[j]; 2918 td_pool_ptr = &bulk_xfer_info->td_pools[i + 1]; 2919 bulk_td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 2920 uhci_fill_in_bulk_isoc_td(uhcip, tmp_td, 2921 &bulk_td_ptr[0], BULKTD_PADDR(td_pool_ptr, 2922 &bulk_td_ptr[0]), ph, buf_offs, mps, tw); 2923 buf_offs += mps; 2924 } 2925 } 2926 2927 bulk_xfer_info->num_tds = (ushort_t)num_bulk_tds; 2928 2929 /* 2930 * Point the end of the lattice tree to the start of the bulk xfers 2931 * queue head. This allows the HC to execute the same Queue Head/TD 2932 * in the same frame. There are some bulk devices, which NAKs after 2933 * completing each TD. As a result, the performance on such devices 2934 * is very bad. This loop will provide a chance to execute NAk'ed 2935 * bulk TDs again in the same frame. 2936 */ 2937 if (uhcip->uhci_pending_bulk_cmds++ == 0) { 2938 uhcip->uhci_bulk_xfers_q_tail->link_ptr = 2939 uhcip->uhci_bulk_xfers_q_head->link_ptr; 2940 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 2941 "uhci_insert_bulk_td: count = %d no tds %d", 2942 uhcip->uhci_pending_bulk_cmds, num_bulk_tds); 2943 } 2944 2945 /* Insert on the bulk queue head for the execution by HC */ 2946 SetQH32(uhcip, pp->pp_qh->element_ptr, 2947 bulk_xfer_info->td_pools[0].cookie.dmac_address); 2948 2949 return (USB_SUCCESS); 2950 } 2951 2952 2953 /* 2954 * uhci_fill_in_bulk_isoc_td 2955 * Fills the bulk/isoc TD 2956 * 2957 * offset - different meanings for bulk and isoc TDs: 2958 * starting offset into the TW buffer for a bulk TD 2959 * and the index into the isoc packet list for an isoc TD 2960 */ 2961 void 2962 uhci_fill_in_bulk_isoc_td(uhci_state_t *uhcip, uhci_td_t *current_td, 2963 uhci_td_t *next_td, 2964 uint32_t next_td_paddr, 2965 usba_pipe_handle_data_t *ph, 2966 uint_t offset, 2967 uint_t length, 2968 uhci_trans_wrapper_t *tw) 2969 { 2970 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 2971 usb_ep_descr_t *ept = &pp->pp_pipe_handle->p_ep; 2972 uint32_t buf_addr; 2973 2974 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2975 "uhci_fill_in_bulk_isoc_td: tw 0x%p offs 0x%x length 0x%x", 2976 (void *)tw, offset, length); 2977 2978 bzero((char *)current_td, sizeof (uhci_td_t)); 2979 SetTD32(uhcip, current_td->link_ptr, next_td_paddr | HC_DEPTH_FIRST); 2980 2981 switch (UHCI_XFER_TYPE(ept)) { 2982 case USB_EP_ATTR_ISOCH: 2983 if (((usb_isoc_req_t *)tw->tw_curr_xfer_reqp)->isoc_attributes 2984 & USB_ATTRS_SHORT_XFER_OK) { 2985 SetTD_spd(uhcip, current_td, 1); 2986 } 2987 break; 2988 case USB_EP_ATTR_BULK: 2989 if (((usb_bulk_req_t *)tw->tw_curr_xfer_reqp)->bulk_attributes 2990 & USB_ATTRS_SHORT_XFER_OK) { 2991 SetTD_spd(uhcip, current_td, 1); 2992 } 2993 break; 2994 } 2995 2996 mutex_enter(&ph->p_usba_device->usb_mutex); 2997 2998 SetTD_c_err(uhcip, current_td, UHCI_MAX_ERR_COUNT); 2999 SetTD_status(uhcip, current_td, UHCI_TD_ACTIVE); 3000 SetTD_ioc(uhcip, current_td, INTERRUPT_ON_COMPLETION); 3001 SetTD_mlen(uhcip, current_td, 3002 (length == 0) ? ZERO_LENGTH : (length - 1)); 3003 SetTD_dtogg(uhcip, current_td, pp->pp_data_toggle); 3004 SetTD_devaddr(uhcip, current_td, ph->p_usba_device->usb_addr); 3005 SetTD_endpt(uhcip, current_td, ph->p_ep.bEndpointAddress & 3006 END_POINT_ADDRESS_MASK); 3007 SetTD_PID(uhcip, current_td, tw->tw_direction); 3008 3009 /* Get the right buffer address for the current TD */ 3010 switch (UHCI_XFER_TYPE(ept)) { 3011 case USB_EP_ATTR_ISOCH: 3012 buf_addr = tw->tw_isoc_bufs[offset].cookie.dmac_address; 3013 break; 3014 case USB_EP_ATTR_BULK: 3015 buf_addr = uhci_get_tw_paddr_by_offs(uhcip, offset, 3016 length, tw); 3017 break; 3018 } 3019 SetTD32(uhcip, current_td->buffer_address, buf_addr); 3020 3021 /* 3022 * Adjust the data toggle. 3023 * The data toggle bit must always be 0 for isoc transfers. 3024 * And set the "iso" bit in the TD for isoc transfers. 3025 */ 3026 if (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_ISOCH) { 3027 pp->pp_data_toggle = 0; 3028 SetTD_iso(uhcip, current_td, 1); 3029 } else { 3030 ADJ_DATA_TOGGLE(pp); 3031 next_td->qh_td_prev = current_td; 3032 pp->pp_qh->td_tailp = next_td; 3033 } 3034 3035 current_td->outst_td_next = NULL; 3036 current_td->outst_td_prev = uhcip->uhci_outst_tds_tail; 3037 if (uhcip->uhci_outst_tds_head == NULL) { 3038 uhcip->uhci_outst_tds_head = current_td; 3039 } else { 3040 uhcip->uhci_outst_tds_tail->outst_td_next = current_td; 3041 } 3042 uhcip->uhci_outst_tds_tail = current_td; 3043 current_td->tw = tw; 3044 3045 if (tw->tw_hctd_head == NULL) { 3046 ASSERT(tw->tw_hctd_tail == NULL); 3047 tw->tw_hctd_head = current_td; 3048 tw->tw_hctd_tail = current_td; 3049 } else { 3050 /* Add the td to the end of the list */ 3051 tw->tw_hctd_tail->tw_td_next = current_td; 3052 tw->tw_hctd_tail = current_td; 3053 } 3054 3055 mutex_exit(&ph->p_usba_device->usb_mutex); 3056 } 3057 3058 3059 /* 3060 * uhci_alloc_bulk_isoc_tds: 3061 * - Allocates the isoc/bulk TD pools. It will allocate one whole 3062 * pool to store all the TDs if the system allows. Only when the 3063 * first allocation fails, it tries to allocate several small 3064 * pools with each pool limited in physical page size. 3065 */ 3066 static int 3067 uhci_alloc_bulk_isoc_tds( 3068 uhci_state_t *uhcip, 3069 uint_t num_tds, 3070 uhci_bulk_isoc_xfer_t *info) 3071 { 3072 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3073 "uhci_alloc_bulk_isoc_tds: num_tds: 0x%x info: 0x%p", 3074 num_tds, (void *)info); 3075 3076 info->num_pools = 1; 3077 /* allocate as a whole pool at the first time */ 3078 if (uhci_alloc_memory_for_tds(uhcip, num_tds, info) != 3079 USB_SUCCESS) { 3080 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3081 "alloc_memory_for_tds failed: num_tds %d num_pools %d", 3082 num_tds, info->num_pools); 3083 3084 /* reduce the td number per pool and alloc again */ 3085 info->num_pools = num_tds / UHCI_MAX_TD_NUM_PER_POOL; 3086 if (num_tds % UHCI_MAX_TD_NUM_PER_POOL) { 3087 info->num_pools++; 3088 } 3089 3090 if (uhci_alloc_memory_for_tds(uhcip, num_tds, info) != 3091 USB_SUCCESS) { 3092 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3093 "alloc_memory_for_tds failed: num_tds %d " 3094 "num_pools %d", num_tds, info->num_pools); 3095 3096 return (USB_NO_RESOURCES); 3097 } 3098 } 3099 3100 return (USB_SUCCESS); 3101 } 3102 3103 3104 /* 3105 * uhci_alloc_memory_for_tds: 3106 * - Allocates memory for the isoc/bulk td pools. 3107 */ 3108 static int 3109 uhci_alloc_memory_for_tds( 3110 uhci_state_t *uhcip, 3111 uint_t num_tds, 3112 uhci_bulk_isoc_xfer_t *info) 3113 { 3114 int result, i, j, err; 3115 size_t real_length; 3116 uint_t ccount, num; 3117 ddi_device_acc_attr_t dev_attr; 3118 uhci_bulk_isoc_td_pool_t *td_pool_ptr1, *td_pool_ptr2; 3119 3120 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3121 "uhci_alloc_memory_for_tds: num_tds: 0x%x info: 0x%p " 3122 "num_pools: %u", num_tds, (void *)info, info->num_pools); 3123 3124 /* The host controller will be little endian */ 3125 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 3126 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 3127 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 3128 3129 /* Allocate the TD pool structures */ 3130 if ((info->td_pools = kmem_zalloc( 3131 (sizeof (uhci_bulk_isoc_td_pool_t) * info->num_pools), 3132 KM_SLEEP)) == NULL) { 3133 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3134 "uhci_alloc_memory_for_tds: alloc td_pools failed"); 3135 3136 return (USB_FAILURE); 3137 } 3138 3139 for (i = 0; i < info->num_pools; i++) { 3140 if (info->num_pools == 1) { 3141 num = num_tds; 3142 } else if (i < (info->num_pools - 1)) { 3143 num = UHCI_MAX_TD_NUM_PER_POOL; 3144 } else { 3145 num = (num_tds % UHCI_MAX_TD_NUM_PER_POOL); 3146 } 3147 3148 td_pool_ptr1 = &info->td_pools[i]; 3149 3150 /* Allocate the bulk TD pool DMA handle */ 3151 if (ddi_dma_alloc_handle(uhcip->uhci_dip, 3152 &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 0, 3153 &td_pool_ptr1->dma_handle) != DDI_SUCCESS) { 3154 3155 for (j = 0; j < i; j++) { 3156 td_pool_ptr2 = &info->td_pools[j]; 3157 result = ddi_dma_unbind_handle( 3158 td_pool_ptr2->dma_handle); 3159 ASSERT(result == DDI_SUCCESS); 3160 ddi_dma_mem_free(&td_pool_ptr2->mem_handle); 3161 ddi_dma_free_handle(&td_pool_ptr2->dma_handle); 3162 } 3163 3164 kmem_free(info->td_pools, 3165 (sizeof (uhci_bulk_isoc_td_pool_t) * 3166 info->num_pools)); 3167 3168 return (USB_FAILURE); 3169 } 3170 3171 /* Allocate the memory for the bulk TD pool */ 3172 if (ddi_dma_mem_alloc(td_pool_ptr1->dma_handle, 3173 num * sizeof (uhci_td_t), &dev_attr, 3174 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0, 3175 &td_pool_ptr1->pool_addr, &real_length, 3176 &td_pool_ptr1->mem_handle) != DDI_SUCCESS) { 3177 3178 ddi_dma_free_handle(&td_pool_ptr1->dma_handle); 3179 3180 for (j = 0; j < i; j++) { 3181 td_pool_ptr2 = &info->td_pools[j]; 3182 result = ddi_dma_unbind_handle( 3183 td_pool_ptr2->dma_handle); 3184 ASSERT(result == DDI_SUCCESS); 3185 ddi_dma_mem_free(&td_pool_ptr2->mem_handle); 3186 ddi_dma_free_handle(&td_pool_ptr2->dma_handle); 3187 } 3188 3189 kmem_free(info->td_pools, 3190 (sizeof (uhci_bulk_isoc_td_pool_t) * 3191 info->num_pools)); 3192 3193 return (USB_FAILURE); 3194 } 3195 3196 /* Map the bulk TD pool into the I/O address space */ 3197 result = ddi_dma_addr_bind_handle(td_pool_ptr1->dma_handle, 3198 NULL, (caddr_t)td_pool_ptr1->pool_addr, real_length, 3199 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 3200 &td_pool_ptr1->cookie, &ccount); 3201 3202 /* Process the result */ 3203 err = USB_SUCCESS; 3204 3205 if (result != DDI_DMA_MAPPED) { 3206 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3207 "uhci_allocate_memory_for_tds: Result = %d", 3208 result); 3209 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, 3210 result); 3211 3212 err = USB_FAILURE; 3213 } 3214 3215 if ((result == DDI_DMA_MAPPED) && (ccount != 1)) { 3216 /* The cookie count should be 1 */ 3217 USB_DPRINTF_L2(PRINT_MASK_ATTA, 3218 uhcip->uhci_log_hdl, 3219 "uhci_allocate_memory_for_tds: " 3220 "More than 1 cookie"); 3221 3222 result = ddi_dma_unbind_handle( 3223 td_pool_ptr1->dma_handle); 3224 ASSERT(result == DDI_SUCCESS); 3225 3226 err = USB_FAILURE; 3227 } 3228 3229 if (err == USB_FAILURE) { 3230 3231 ddi_dma_mem_free(&td_pool_ptr1->mem_handle); 3232 ddi_dma_free_handle(&td_pool_ptr1->dma_handle); 3233 3234 for (j = 0; j < i; j++) { 3235 td_pool_ptr2 = &info->td_pools[j]; 3236 result = ddi_dma_unbind_handle( 3237 td_pool_ptr2->dma_handle); 3238 ASSERT(result == DDI_SUCCESS); 3239 ddi_dma_mem_free(&td_pool_ptr2->mem_handle); 3240 ddi_dma_free_handle(&td_pool_ptr2->dma_handle); 3241 } 3242 3243 kmem_free(info->td_pools, 3244 (sizeof (uhci_bulk_isoc_td_pool_t) * 3245 info->num_pools)); 3246 3247 return (USB_FAILURE); 3248 } 3249 3250 bzero((void *)td_pool_ptr1->pool_addr, 3251 num * sizeof (uhci_td_t)); 3252 td_pool_ptr1->num_tds = (ushort_t)num; 3253 } 3254 3255 return (USB_SUCCESS); 3256 } 3257 3258 3259 /* 3260 * uhci_handle_bulk_td: 3261 * 3262 * Handles the completed bulk transfer descriptors 3263 */ 3264 void 3265 uhci_handle_bulk_td(uhci_state_t *uhcip, uhci_td_t *td) 3266 { 3267 uint_t num_bulk_tds, index, td_count, j; 3268 usb_cr_t error; 3269 uint_t length, bytes_xfered; 3270 ushort_t MaxPacketSize; 3271 uint32_t buf_offs, paddr; 3272 uhci_td_t *bulk_td_ptr, *current_dummy, *td_head; 3273 uhci_td_t *tmp_td; 3274 queue_head_t *qh, *next_qh; 3275 uhci_trans_wrapper_t *tw = td->tw; 3276 uhci_pipe_private_t *pp = tw->tw_pipe_private; 3277 uhci_bulk_isoc_xfer_t *bulk_xfer_info; 3278 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 3279 usba_pipe_handle_data_t *ph; 3280 3281 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3282 "uhci_handle_bulk_td: td = 0x%p tw = 0x%p", (void *)td, (void *)tw); 3283 3284 /* 3285 * Update the tw_bytes_pending, and tw_bytes_xfered 3286 */ 3287 bytes_xfered = ZERO_LENGTH; 3288 3289 /* 3290 * Check whether there are any errors occurred in the xfer. 3291 * If so, update the data_toggle for the queue head and 3292 * return error to the upper layer. 3293 */ 3294 if (GetTD_status(uhcip, td) & TD_STATUS_MASK) { 3295 uhci_handle_bulk_td_errors(uhcip, td); 3296 3297 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3298 "uhci_handle_bulk_td: error; data toggle: 0x%x", 3299 pp->pp_data_toggle); 3300 3301 return; 3302 } 3303 3304 /* 3305 * Update the tw_bytes_pending, and tw_bytes_xfered 3306 */ 3307 bytes_xfered = GetTD_alen(uhcip, td); 3308 if (bytes_xfered != ZERO_LENGTH) { 3309 tw->tw_bytes_pending -= (bytes_xfered + 1); 3310 tw->tw_bytes_xfered += (bytes_xfered + 1); 3311 } 3312 3313 /* 3314 * Get Bulk pipe information and pipe handle 3315 */ 3316 bulk_xfer_info = pp->pp_qh->bulk_xfer_info; 3317 ph = tw->tw_pipe_private->pp_pipe_handle; 3318 3319 /* 3320 * Check whether data underrun occurred. 3321 * If so, complete the transfer 3322 * Update the data toggle bit 3323 */ 3324 if (bytes_xfered != GetTD_mlen(uhcip, td)) { 3325 bulk_xfer_info->num_tds = 1; 3326 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3327 "uhci_handle_bulk_td: Data underrun occured"); 3328 3329 pp->pp_data_toggle = GetTD_dtogg(uhcip, td) == 0 ? 1 : 0; 3330 } 3331 3332 /* 3333 * If the TD's in the current frame are completed, then check 3334 * whether we have any more bytes to xfer. If so, insert TD's. 3335 * If no more bytes needs to be transferred, then do callback to the 3336 * upper layer. 3337 * If the TD's in the current frame are not completed, then 3338 * just delete the TD from the linked lists. 3339 */ 3340 USB_DPRINTF_L3(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3341 "uhci_handle_bulk_td: completed TD data toggle: 0x%x", 3342 GetTD_dtogg(uhcip, td)); 3343 3344 if (--bulk_xfer_info->num_tds == 0) { 3345 uhci_delete_td(uhcip, td); 3346 3347 if ((tw->tw_bytes_pending) && 3348 (GetTD_mlen(uhcip, td) - GetTD_alen(uhcip, td) == 0)) { 3349 3350 MaxPacketSize = pp->pp_pipe_handle->p_ep.wMaxPacketSize; 3351 length = MaxPacketSize; 3352 3353 qh = pp->pp_qh; 3354 paddr = GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK; 3355 if (GetQH32(uhcip, qh->link_ptr) != 3356 GetQH32(uhcip, 3357 uhcip->uhci_bulk_xfers_q_head->link_ptr)) { 3358 next_qh = QH_VADDR(paddr); 3359 SetQH32(uhcip, qh->prev_qh->link_ptr, 3360 paddr|(0x2)); 3361 next_qh->prev_qh = qh->prev_qh; 3362 SetQH32(uhcip, qh->link_ptr, 3363 GetQH32(uhcip, 3364 uhcip->uhci_bulk_xfers_q_head->link_ptr)); 3365 qh->prev_qh = uhcip->uhci_bulk_xfers_q_tail; 3366 SetQH32(uhcip, 3367 uhcip->uhci_bulk_xfers_q_tail->link_ptr, 3368 QH_PADDR(qh) | 0x2); 3369 uhcip->uhci_bulk_xfers_q_tail = qh; 3370 } 3371 3372 if ((tw->tw_bytes_pending / MaxPacketSize) >= 3373 MAX_NUM_BULK_TDS_PER_XFER) { 3374 num_bulk_tds = MAX_NUM_BULK_TDS_PER_XFER; 3375 } else { 3376 num_bulk_tds = 3377 (tw->tw_bytes_pending / MaxPacketSize); 3378 if (tw->tw_bytes_pending % MaxPacketSize) { 3379 num_bulk_tds++; 3380 length = (tw->tw_bytes_pending % 3381 MaxPacketSize); 3382 } 3383 } 3384 3385 current_dummy = pp->pp_qh->td_tailp; 3386 td_pool_ptr = &bulk_xfer_info->td_pools[0]; 3387 bulk_td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 3388 buf_offs = tw->tw_bytes_xfered; 3389 td_count = num_bulk_tds; 3390 index = 0; 3391 3392 /* reuse the TDs to transfer more data */ 3393 while (td_count > 0) { 3394 for (j = 0; 3395 (j < (td_pool_ptr->num_tds - 1)) && 3396 (td_count > 1); j++, td_count--) { 3397 uhci_fill_in_bulk_isoc_td(uhcip, 3398 &bulk_td_ptr[j], &bulk_td_ptr[j+1], 3399 BULKTD_PADDR(td_pool_ptr, 3400 &bulk_td_ptr[j+1]), ph, buf_offs, 3401 MaxPacketSize, tw); 3402 buf_offs += MaxPacketSize; 3403 } 3404 3405 if (td_count == 1) { 3406 uhci_fill_in_bulk_isoc_td(uhcip, 3407 &bulk_td_ptr[j], current_dummy, 3408 TD_PADDR(current_dummy), ph, 3409 buf_offs, length, tw); 3410 3411 break; 3412 } else { 3413 tmp_td = &bulk_td_ptr[j]; 3414 ASSERT(index < 3415 (bulk_xfer_info->num_pools - 1)); 3416 td_pool_ptr = &bulk_xfer_info-> 3417 td_pools[index + 1]; 3418 bulk_td_ptr = (uhci_td_t *) 3419 td_pool_ptr->pool_addr; 3420 uhci_fill_in_bulk_isoc_td(uhcip, 3421 tmp_td, &bulk_td_ptr[0], 3422 BULKTD_PADDR(td_pool_ptr, 3423 &bulk_td_ptr[0]), ph, buf_offs, 3424 MaxPacketSize, tw); 3425 buf_offs += MaxPacketSize; 3426 td_count--; 3427 index++; 3428 } 3429 } 3430 3431 pp->pp_qh->bulk_xfer_info = bulk_xfer_info; 3432 bulk_xfer_info->num_tds = (ushort_t)num_bulk_tds; 3433 SetQH32(uhcip, pp->pp_qh->element_ptr, 3434 bulk_xfer_info->td_pools[0].cookie.dmac_address); 3435 } else { 3436 usba_pipe_handle_data_t *usb_pp = pp->pp_pipe_handle; 3437 3438 pp->pp_qh->bulk_xfer_info = NULL; 3439 3440 if (tw->tw_bytes_pending) { 3441 /* Update the element pointer */ 3442 SetQH32(uhcip, pp->pp_qh->element_ptr, 3443 TD_PADDR(pp->pp_qh->td_tailp)); 3444 3445 /* Remove all the tds */ 3446 td_head = tw->tw_hctd_head; 3447 while (td_head != NULL) { 3448 uhci_delete_td(uhcip, td_head); 3449 td_head = tw->tw_hctd_head; 3450 } 3451 } 3452 3453 if (tw->tw_direction == PID_IN) { 3454 usb_req_attrs_t attrs = ((usb_bulk_req_t *) 3455 tw->tw_curr_xfer_reqp)->bulk_attributes; 3456 3457 error = USB_CR_OK; 3458 3459 /* Data run occurred */ 3460 if (tw->tw_bytes_pending && 3461 (!(attrs & USB_ATTRS_SHORT_XFER_OK))) { 3462 error = USB_CR_DATA_UNDERRUN; 3463 } 3464 3465 uhci_sendup_td_message(uhcip, error, tw); 3466 } else { 3467 uhci_do_byte_stats(uhcip, tw->tw_length, 3468 usb_pp->p_ep.bmAttributes, 3469 usb_pp->p_ep.bEndpointAddress); 3470 3471 /* Data underrun occurred */ 3472 if (tw->tw_bytes_pending) { 3473 3474 tw->tw_data->b_rptr += 3475 tw->tw_bytes_xfered; 3476 3477 USB_DPRINTF_L2(PRINT_MASK_ATTA, 3478 uhcip->uhci_log_hdl, 3479 "uhci_handle_bulk_td: " 3480 "data underrun occurred"); 3481 3482 uhci_hcdi_callback(uhcip, pp, 3483 tw->tw_pipe_private->pp_pipe_handle, 3484 tw, USB_CR_DATA_UNDERRUN); 3485 } else { 3486 uhci_hcdi_callback(uhcip, pp, 3487 tw->tw_pipe_private->pp_pipe_handle, 3488 tw, USB_CR_OK); 3489 } 3490 } /* direction */ 3491 3492 /* Deallocate DMA memory */ 3493 uhci_deallocate_tw(uhcip, pp, tw); 3494 for (j = 0; j < bulk_xfer_info->num_pools; j++) { 3495 td_pool_ptr = &bulk_xfer_info->td_pools[j]; 3496 (void) ddi_dma_unbind_handle( 3497 td_pool_ptr->dma_handle); 3498 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 3499 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 3500 } 3501 kmem_free(bulk_xfer_info->td_pools, 3502 (sizeof (uhci_bulk_isoc_td_pool_t) * 3503 bulk_xfer_info->num_pools)); 3504 kmem_free(bulk_xfer_info, 3505 sizeof (uhci_bulk_isoc_xfer_t)); 3506 3507 /* 3508 * When there are no pending bulk commands, point the 3509 * end of the lattice tree to NULL. This will make sure 3510 * that the HC control does not loop anymore and PCI 3511 * bus is not affected. 3512 */ 3513 if (--uhcip->uhci_pending_bulk_cmds == 0) { 3514 uhcip->uhci_bulk_xfers_q_tail->link_ptr = 3515 HC_END_OF_LIST; 3516 USB_DPRINTF_L3(PRINT_MASK_ATTA, 3517 uhcip->uhci_log_hdl, 3518 "uhci_handle_bulk_td: count = %d", 3519 uhcip->uhci_pending_bulk_cmds); 3520 } 3521 } 3522 } else { 3523 uhci_delete_td(uhcip, td); 3524 } 3525 } 3526 3527 3528 void 3529 uhci_handle_bulk_td_errors(uhci_state_t *uhcip, uhci_td_t *td) 3530 { 3531 usb_cr_t usb_err; 3532 uint32_t paddr_tail, element_ptr, paddr; 3533 uhci_td_t *next_td; 3534 uhci_pipe_private_t *pp; 3535 uhci_trans_wrapper_t *tw = td->tw; 3536 usba_pipe_handle_data_t *ph; 3537 uhci_bulk_isoc_td_pool_t *td_pool_ptr = NULL; 3538 3539 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3540 "uhci_handle_bulk_td_errors: td = %p", (void *)td); 3541 3542 #ifdef DEBUG 3543 uhci_print_td(uhcip, td); 3544 #endif 3545 3546 tw = td->tw; 3547 ph = tw->tw_pipe_private->pp_pipe_handle; 3548 pp = (uhci_pipe_private_t *)ph->p_hcd_private; 3549 3550 /* 3551 * Find the type of error occurred and return the error 3552 * to the upper layer. And adjust the data toggle. 3553 */ 3554 element_ptr = GetQH32(uhcip, pp->pp_qh->element_ptr) & 3555 QH_ELEMENT_PTR_MASK; 3556 paddr_tail = TD_PADDR(pp->pp_qh->td_tailp); 3557 3558 /* 3559 * If a timeout occurs before a transfer has completed, 3560 * the timeout handler sets the CRC/Timeout bit and clears the Active 3561 * bit in the link_ptr for each td in the transfer. 3562 * It then waits (at least) 1 ms so that any tds the controller might 3563 * have been executing will have completed. 3564 * So at this point element_ptr will point to either: 3565 * 1) the next td for the transfer (which has not been executed, 3566 * and has the CRC/Timeout status bit set and Active bit cleared), 3567 * 2) the dummy td for this qh. 3568 * So if the element_ptr does not point to the dummy td, we know 3569 * it points to the next td that would have been executed. 3570 * That td has the data toggle we want to save. 3571 * All outstanding tds have been marked as CRC/Timeout, 3572 * so it doesn't matter which td we pass to uhci_parse_td_error 3573 * for the error status. 3574 */ 3575 if (element_ptr != paddr_tail) { 3576 paddr = (element_ptr & QH_ELEMENT_PTR_MASK); 3577 uhci_get_bulk_td_by_paddr(uhcip, pp->pp_qh->bulk_xfer_info, 3578 paddr, &td_pool_ptr); 3579 next_td = BULKTD_VADDR(td_pool_ptr, paddr); 3580 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3581 "uhci_handle_bulk_td_errors: next td = %p", 3582 (void *)next_td); 3583 3584 usb_err = uhci_parse_td_error(uhcip, pp, next_td); 3585 } else { 3586 usb_err = uhci_parse_td_error(uhcip, pp, td); 3587 } 3588 3589 /* 3590 * Update the link pointer. 3591 */ 3592 SetQH32(uhcip, pp->pp_qh->element_ptr, TD_PADDR(pp->pp_qh->td_tailp)); 3593 3594 /* 3595 * Send up number of bytes transferred before the error condition. 3596 */ 3597 if ((tw->tw_direction == PID_OUT) && tw->tw_data) { 3598 tw->tw_data->b_rptr += tw->tw_bytes_xfered; 3599 } 3600 3601 uhci_remove_bulk_tds_tws(uhcip, tw->tw_pipe_private, UHCI_IN_ERROR); 3602 3603 /* 3604 * When there are no pending bulk commands, point the end of the 3605 * lattice tree to NULL. This will make sure that the HC control 3606 * does not loop anymore and PCI bus is not affected. 3607 */ 3608 if (--uhcip->uhci_pending_bulk_cmds == 0) { 3609 uhcip->uhci_bulk_xfers_q_tail->link_ptr = HC_END_OF_LIST; 3610 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3611 "uhci_handle_bulk_td_errors: count = %d", 3612 uhcip->uhci_pending_bulk_cmds); 3613 } 3614 3615 uhci_hcdi_callback(uhcip, pp, ph, tw, usb_err); 3616 uhci_deallocate_tw(uhcip, pp, tw); 3617 } 3618 3619 3620 /* 3621 * uhci_get_bulk_td_by_paddr: 3622 * Obtain the address of the TD pool the physical address falls in. 3623 * 3624 * td_pool_pp - pointer to the address of the TD pool containing the paddr 3625 */ 3626 /* ARGSUSED */ 3627 static void 3628 uhci_get_bulk_td_by_paddr( 3629 uhci_state_t *uhcip, 3630 uhci_bulk_isoc_xfer_t *info, 3631 uint32_t paddr, 3632 uhci_bulk_isoc_td_pool_t **td_pool_pp) 3633 { 3634 uint_t i = 0; 3635 3636 while (i < info->num_pools) { 3637 *td_pool_pp = &info->td_pools[i]; 3638 if (((*td_pool_pp)->cookie.dmac_address <= paddr) && 3639 (((*td_pool_pp)->cookie.dmac_address + 3640 (*td_pool_pp)->cookie.dmac_size) > paddr)) { 3641 3642 break; 3643 } 3644 i++; 3645 } 3646 3647 ASSERT(i < info->num_pools); 3648 } 3649 3650 3651 void 3652 uhci_remove_bulk_tds_tws( 3653 uhci_state_t *uhcip, 3654 uhci_pipe_private_t *pp, 3655 int what) 3656 { 3657 uint_t rval, i; 3658 uhci_td_t *head; 3659 uhci_td_t *head_next; 3660 usb_opaque_t curr_reqp; 3661 uhci_bulk_isoc_xfer_t *info; 3662 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 3663 3664 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 3665 3666 if ((info = pp->pp_qh->bulk_xfer_info) == NULL) { 3667 3668 return; 3669 } 3670 3671 head = uhcip->uhci_outst_tds_head; 3672 3673 while (head) { 3674 uhci_trans_wrapper_t *tw_tmp = head->tw; 3675 head_next = head->outst_td_next; 3676 3677 if (pp->pp_qh == tw_tmp->tw_pipe_private->pp_qh) { 3678 curr_reqp = tw_tmp->tw_curr_xfer_reqp; 3679 if (curr_reqp && 3680 ((what == UHCI_IN_CLOSE) || 3681 (what == UHCI_IN_RESET))) { 3682 uhci_hcdi_callback(uhcip, pp, 3683 pp->pp_pipe_handle, 3684 tw_tmp, USB_CR_FLUSHED); 3685 } /* end of curr_reqp */ 3686 3687 uhci_delete_td(uhcip, head); 3688 3689 if (what == UHCI_IN_CLOSE || what == UHCI_IN_RESET) { 3690 ASSERT(info->num_tds > 0); 3691 if (--info->num_tds == 0) { 3692 uhci_deallocate_tw(uhcip, pp, tw_tmp); 3693 3694 /* 3695 * This will make sure that the HC 3696 * does not loop anymore when there 3697 * are no pending bulk commands. 3698 */ 3699 if (--uhcip->uhci_pending_bulk_cmds 3700 == 0) { 3701 uhcip->uhci_bulk_xfers_q_tail-> 3702 link_ptr = HC_END_OF_LIST; 3703 USB_DPRINTF_L3(PRINT_MASK_ATTA, 3704 uhcip->uhci_log_hdl, 3705 "uhci_remove_bulk_tds_tws:" 3706 " count = %d", 3707 uhcip-> 3708 uhci_pending_bulk_cmds); 3709 } 3710 } 3711 } 3712 } 3713 3714 head = head_next; 3715 } 3716 3717 if (what == UHCI_IN_CLOSE || what == UHCI_IN_RESET) { 3718 ASSERT(info->num_tds == 0); 3719 } 3720 3721 for (i = 0; i < info->num_pools; i++) { 3722 td_pool_ptr = &info->td_pools[i]; 3723 rval = ddi_dma_unbind_handle(td_pool_ptr->dma_handle); 3724 ASSERT(rval == DDI_SUCCESS); 3725 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 3726 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 3727 } 3728 kmem_free(info->td_pools, (sizeof (uhci_bulk_isoc_td_pool_t) * 3729 info->num_pools)); 3730 kmem_free(info, sizeof (uhci_bulk_isoc_xfer_t)); 3731 pp->pp_qh->bulk_xfer_info = NULL; 3732 } 3733 3734 3735 /* 3736 * uhci_save_data_toggle () 3737 * Save the data toggle in the usba_device structure 3738 */ 3739 void 3740 uhci_save_data_toggle(uhci_pipe_private_t *pp) 3741 { 3742 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 3743 3744 /* Save the data toggle in the usb devices structure. */ 3745 mutex_enter(&ph->p_mutex); 3746 usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress, 3747 pp->pp_data_toggle); 3748 mutex_exit(&ph->p_mutex); 3749 } 3750 3751 /* 3752 * uhci_create_isoc_transfer_wrapper: 3753 * Create a Transaction Wrapper (TW) for isoc transfer. 3754 * This involves the allocating of DMA resources. 3755 * 3756 * For isoc transfers, one isoc transfer includes multiple packets 3757 * and each packet may have a different length. So each packet is 3758 * transfered by one TD. We only know the individual packet length 3759 * won't exceed 1023 bytes, but we don't know exactly the lengths. 3760 * It is hard to make one physically discontiguous DMA buffer which 3761 * can fit in all the TDs like what can be done to the ctrl/bulk/ 3762 * intr transfers. It is also undesirable to make one physically 3763 * contiguous DMA buffer for all the packets, since this may easily 3764 * fail when the system is in low memory. So an individual DMA 3765 * buffer is allocated for an individual isoc packet and each DMA 3766 * buffer is physically contiguous. An extra structure is allocated 3767 * to save the multiple DMA handles. 3768 */ 3769 static uhci_trans_wrapper_t * 3770 uhci_create_isoc_transfer_wrapper( 3771 uhci_state_t *uhcip, 3772 uhci_pipe_private_t *pp, 3773 usb_isoc_req_t *req, 3774 size_t length, 3775 usb_flags_t usb_flags) 3776 { 3777 int result; 3778 size_t real_length, strtlen, xfer_size; 3779 uhci_trans_wrapper_t *tw; 3780 ddi_device_acc_attr_t dev_attr; 3781 ddi_dma_attr_t dma_attr; 3782 int kmem_flag; 3783 int (*dmamem_wait)(caddr_t); 3784 uint_t i, j, ccount; 3785 usb_isoc_req_t *tmp_req = req; 3786 3787 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 3788 3789 if (UHCI_XFER_TYPE(&pp->pp_pipe_handle->p_ep) != USB_EP_ATTR_ISOCH) { 3790 3791 return (NULL); 3792 } 3793 3794 if ((req == NULL) && (UHCI_XFER_DIR(&pp->pp_pipe_handle->p_ep) == 3795 USB_EP_DIR_IN)) { 3796 tmp_req = (usb_isoc_req_t *)pp->pp_client_periodic_in_reqp; 3797 } 3798 3799 if (tmp_req == NULL) { 3800 3801 return (NULL); 3802 } 3803 3804 3805 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3806 "uhci_create_isoc_transfer_wrapper: length = 0x%lx flags = 0x%x", 3807 length, usb_flags); 3808 3809 /* SLEEP flag should not be used in interrupt context */ 3810 if (servicing_interrupt()) { 3811 kmem_flag = KM_NOSLEEP; 3812 dmamem_wait = DDI_DMA_DONTWAIT; 3813 } else { 3814 kmem_flag = KM_SLEEP; 3815 dmamem_wait = DDI_DMA_SLEEP; 3816 } 3817 3818 /* Allocate space for the transfer wrapper */ 3819 if ((tw = kmem_zalloc(sizeof (uhci_trans_wrapper_t), kmem_flag)) == 3820 NULL) { 3821 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3822 "uhci_create_isoc_transfer_wrapper: kmem_alloc failed"); 3823 3824 return (NULL); 3825 } 3826 3827 /* Allocate space for the isoc buffer handles */ 3828 strtlen = sizeof (uhci_isoc_buf_t) * tmp_req->isoc_pkts_count; 3829 if ((tw->tw_isoc_bufs = kmem_zalloc(strtlen, kmem_flag)) == NULL) { 3830 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3831 "uhci_create_isoc_transfer_wrapper: kmem_alloc " 3832 "isoc buffer failed"); 3833 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 3834 3835 return (NULL); 3836 } 3837 3838 bcopy(&uhcip->uhci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t)); 3839 dma_attr.dma_attr_sgllen = 1; 3840 3841 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 3842 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 3843 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 3844 3845 /* Store the transfer length */ 3846 tw->tw_length = length; 3847 3848 for (i = 0; i < tmp_req->isoc_pkts_count; i++) { 3849 tw->tw_isoc_bufs[i].index = (ushort_t)i; 3850 3851 /* Allocate the DMA handle */ 3852 if ((result = ddi_dma_alloc_handle(uhcip->uhci_dip, &dma_attr, 3853 dmamem_wait, 0, &tw->tw_isoc_bufs[i].dma_handle)) != 3854 DDI_SUCCESS) { 3855 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3856 "uhci_create_isoc_transfer_wrapper: " 3857 "Alloc handle %d failed", i); 3858 3859 for (j = 0; j < i; j++) { 3860 result = ddi_dma_unbind_handle( 3861 tw->tw_isoc_bufs[j].dma_handle); 3862 ASSERT(result == USB_SUCCESS); 3863 ddi_dma_mem_free(&tw->tw_isoc_bufs[j]. 3864 mem_handle); 3865 ddi_dma_free_handle(&tw->tw_isoc_bufs[j]. 3866 dma_handle); 3867 } 3868 kmem_free(tw->tw_isoc_bufs, strtlen); 3869 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 3870 3871 return (NULL); 3872 } 3873 3874 /* Allocate the memory */ 3875 xfer_size = tmp_req->isoc_pkt_descr[i].isoc_pkt_length; 3876 if ((result = ddi_dma_mem_alloc(tw->tw_isoc_bufs[i].dma_handle, 3877 xfer_size, &dev_attr, DDI_DMA_CONSISTENT, dmamem_wait, 3878 NULL, (caddr_t *)&tw->tw_isoc_bufs[i].buf_addr, 3879 &real_length, &tw->tw_isoc_bufs[i].mem_handle)) != 3880 DDI_SUCCESS) { 3881 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3882 "uhci_create_isoc_transfer_wrapper: " 3883 "dma_mem_alloc %d fail", i); 3884 ddi_dma_free_handle(&tw->tw_isoc_bufs[i].dma_handle); 3885 3886 for (j = 0; j < i; j++) { 3887 result = ddi_dma_unbind_handle( 3888 tw->tw_isoc_bufs[j].dma_handle); 3889 ASSERT(result == USB_SUCCESS); 3890 ddi_dma_mem_free(&tw->tw_isoc_bufs[j]. 3891 mem_handle); 3892 ddi_dma_free_handle(&tw->tw_isoc_bufs[j]. 3893 dma_handle); 3894 } 3895 kmem_free(tw->tw_isoc_bufs, strtlen); 3896 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 3897 3898 return (NULL); 3899 } 3900 3901 ASSERT(real_length >= xfer_size); 3902 3903 /* Bind the handle */ 3904 result = ddi_dma_addr_bind_handle( 3905 tw->tw_isoc_bufs[i].dma_handle, NULL, 3906 (caddr_t)tw->tw_isoc_bufs[i].buf_addr, real_length, 3907 DDI_DMA_RDWR|DDI_DMA_CONSISTENT, dmamem_wait, NULL, 3908 &tw->tw_isoc_bufs[i].cookie, &ccount); 3909 3910 if ((result == DDI_DMA_MAPPED) && (ccount == 1)) { 3911 tw->tw_isoc_bufs[i].length = xfer_size; 3912 3913 continue; 3914 } else { 3915 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3916 "uhci_create_isoc_transfer_wrapper: " 3917 "Bind handle %d failed", i); 3918 if (result == DDI_DMA_MAPPED) { 3919 result = ddi_dma_unbind_handle( 3920 tw->tw_isoc_bufs[i].dma_handle); 3921 ASSERT(result == USB_SUCCESS); 3922 } 3923 ddi_dma_mem_free(&tw->tw_isoc_bufs[i].mem_handle); 3924 ddi_dma_free_handle(&tw->tw_isoc_bufs[i].dma_handle); 3925 3926 for (j = 0; j < i; j++) { 3927 result = ddi_dma_unbind_handle( 3928 tw->tw_isoc_bufs[j].dma_handle); 3929 ASSERT(result == USB_SUCCESS); 3930 ddi_dma_mem_free(&tw->tw_isoc_bufs[j]. 3931 mem_handle); 3932 ddi_dma_free_handle(&tw->tw_isoc_bufs[j]. 3933 dma_handle); 3934 } 3935 kmem_free(tw->tw_isoc_bufs, strtlen); 3936 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 3937 3938 return (NULL); 3939 } 3940 } 3941 3942 tw->tw_ncookies = tmp_req->isoc_pkts_count; 3943 tw->tw_isoc_strtlen = strtlen; 3944 3945 /* 3946 * Only allow one wrapper to be added at a time. Insert the 3947 * new transaction wrapper into the list for this pipe. 3948 */ 3949 if (pp->pp_tw_head == NULL) { 3950 pp->pp_tw_head = tw; 3951 pp->pp_tw_tail = tw; 3952 } else { 3953 pp->pp_tw_tail->tw_next = tw; 3954 pp->pp_tw_tail = tw; 3955 ASSERT(tw->tw_next == NULL); 3956 } 3957 3958 /* Store a back pointer to the pipe private structure */ 3959 tw->tw_pipe_private = pp; 3960 3961 /* Store the transfer type - synchronous or asynchronous */ 3962 tw->tw_flags = usb_flags; 3963 3964 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3965 "uhci_create_isoc_transfer_wrapper: tw = 0x%p, ncookies = %u", 3966 (void *)tw, tw->tw_ncookies); 3967 3968 return (tw); 3969 } 3970 3971 /* 3972 * uhci_insert_isoc_td: 3973 * - Create transfer wrapper 3974 * - Allocate memory for the isoc td's 3975 * - Fill up all the TD's and submit to the HC 3976 * - Update all the linked lists 3977 */ 3978 int 3979 uhci_insert_isoc_td( 3980 uhci_state_t *uhcip, 3981 usba_pipe_handle_data_t *ph, 3982 usb_isoc_req_t *isoc_req, 3983 size_t length, 3984 usb_flags_t flags) 3985 { 3986 int rval = USB_SUCCESS; 3987 int error; 3988 uint_t ddic; 3989 uint32_t i, j, index; 3990 uint32_t bytes_to_xfer; 3991 uint32_t expired_frames = 0; 3992 usb_frame_number_t start_frame, end_frame, current_frame; 3993 uhci_td_t *td_ptr; 3994 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 3995 uhci_trans_wrapper_t *tw; 3996 uhci_bulk_isoc_xfer_t *isoc_xfer_info; 3997 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 3998 3999 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4000 "uhci_insert_isoc_td: ph = 0x%p isoc req = %p length = %lu", 4001 (void *)ph, (void *)isoc_req, length); 4002 4003 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4004 4005 /* Allocate a transfer wrapper */ 4006 if ((tw = uhci_create_isoc_transfer_wrapper(uhcip, pp, isoc_req, 4007 length, flags)) == NULL) { 4008 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4009 "uhci_insert_isoc_td: TW allocation failed"); 4010 4011 return (USB_NO_RESOURCES); 4012 } 4013 4014 /* Save current isochronous request pointer */ 4015 tw->tw_curr_xfer_reqp = (usb_opaque_t)isoc_req; 4016 4017 /* 4018 * Initialize the transfer wrapper. These values are useful 4019 * for sending back the reply. 4020 */ 4021 tw->tw_handle_td = uhci_handle_isoc_td; 4022 tw->tw_handle_callback_value = NULL; 4023 tw->tw_direction = (UHCI_XFER_DIR(&ph->p_ep) == USB_EP_DIR_OUT) ? 4024 PID_OUT : PID_IN; 4025 4026 /* 4027 * If the transfer isoc send, then copy the data from the request 4028 * to the transfer wrapper. 4029 */ 4030 if ((tw->tw_direction == PID_OUT) && length) { 4031 uchar_t *p; 4032 4033 ASSERT(isoc_req->isoc_data != NULL); 4034 p = isoc_req->isoc_data->b_rptr; 4035 4036 /* Copy the data into the message */ 4037 for (i = 0; i < isoc_req->isoc_pkts_count; i++) { 4038 ddi_rep_put8(tw->tw_isoc_bufs[i].mem_handle, 4039 p, (uint8_t *)tw->tw_isoc_bufs[i].buf_addr, 4040 isoc_req->isoc_pkt_descr[i].isoc_pkt_length, 4041 DDI_DEV_AUTOINCR); 4042 p += isoc_req->isoc_pkt_descr[i].isoc_pkt_length; 4043 } 4044 } 4045 4046 if (tw->tw_direction == PID_IN) { 4047 if ((rval = uhci_allocate_periodic_in_resource(uhcip, pp, tw, 4048 flags)) != USB_SUCCESS) { 4049 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4050 "uhci_insert_isoc_td: isoc_req_t alloc failed"); 4051 uhci_deallocate_tw(uhcip, pp, tw); 4052 4053 return (rval); 4054 } 4055 4056 isoc_req = (usb_isoc_req_t *)tw->tw_curr_xfer_reqp; 4057 } 4058 4059 tw->tw_isoc_req = (usb_isoc_req_t *)tw->tw_curr_xfer_reqp; 4060 4061 /* Get the pointer to the isoc_xfer_info structure */ 4062 isoc_xfer_info = (uhci_bulk_isoc_xfer_t *)&tw->tw_xfer_info; 4063 isoc_xfer_info->num_tds = isoc_req->isoc_pkts_count; 4064 4065 /* 4066 * Allocate memory for isoc tds 4067 */ 4068 if ((rval = uhci_alloc_bulk_isoc_tds(uhcip, isoc_req->isoc_pkts_count, 4069 isoc_xfer_info)) != USB_SUCCESS) { 4070 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4071 "uhci_alloc_bulk_isoc_td: Memory allocation failure"); 4072 4073 if (tw->tw_direction == PID_IN) { 4074 uhci_deallocate_periodic_in_resource(uhcip, pp, tw); 4075 } 4076 uhci_deallocate_tw(uhcip, pp, tw); 4077 4078 return (rval); 4079 } 4080 4081 /* 4082 * Get the isoc td pool address, buffer address and 4083 * max packet size that the device supports. 4084 */ 4085 td_pool_ptr = &isoc_xfer_info->td_pools[0]; 4086 td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 4087 index = 0; 4088 4089 /* 4090 * Fill up the isoc tds 4091 */ 4092 USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4093 "uhci_insert_isoc_td : isoc pkts %d", isoc_req->isoc_pkts_count); 4094 4095 for (i = 0; i < isoc_xfer_info->num_pools; i++) { 4096 for (j = 0; j < td_pool_ptr->num_tds; j++) { 4097 bytes_to_xfer = 4098 isoc_req->isoc_pkt_descr[index].isoc_pkt_length; 4099 4100 uhci_fill_in_bulk_isoc_td(uhcip, &td_ptr[j], 4101 (uhci_td_t *)NULL, HC_END_OF_LIST, ph, index, 4102 bytes_to_xfer, tw); 4103 td_ptr[j].isoc_pkt_index = (ushort_t)index; 4104 index++; 4105 } 4106 4107 if (i < (isoc_xfer_info->num_pools - 1)) { 4108 td_pool_ptr = &isoc_xfer_info->td_pools[i + 1]; 4109 td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 4110 } 4111 } 4112 4113 /* 4114 * Get the starting frame number. 4115 * The client drivers sets the flag USB_ATTRS_ISOC_XFER_ASAP to inform 4116 * the HCD to care of starting frame number. 4117 * 4118 * Following code is very time critical. So, perform atomic execution. 4119 */ 4120 ddic = ddi_enter_critical(); 4121 current_frame = uhci_get_sw_frame_number(uhcip); 4122 4123 if (isoc_req->isoc_attributes & USB_ATTRS_ISOC_START_FRAME) { 4124 start_frame = isoc_req->isoc_frame_no; 4125 end_frame = start_frame + isoc_req->isoc_pkts_count; 4126 4127 /* Check available frames */ 4128 if ((end_frame - current_frame) < UHCI_MAX_ISOC_FRAMES) { 4129 if (current_frame > start_frame) { 4130 if ((current_frame + FRNUM_OFFSET) < 4131 end_frame) { 4132 expired_frames = current_frame + 4133 FRNUM_OFFSET - start_frame; 4134 start_frame = current_frame + 4135 FRNUM_OFFSET; 4136 } else { 4137 rval = USB_INVALID_START_FRAME; 4138 } 4139 } 4140 } else { 4141 rval = USB_INVALID_START_FRAME; 4142 } 4143 4144 } else if (isoc_req->isoc_attributes & USB_ATTRS_ISOC_XFER_ASAP) { 4145 start_frame = pp->pp_frame_num; 4146 4147 if (start_frame == INVALID_FRNUM) { 4148 start_frame = current_frame + FRNUM_OFFSET; 4149 } else if (current_frame > start_frame) { 4150 start_frame = current_frame + FRNUM_OFFSET; 4151 } 4152 4153 end_frame = start_frame + isoc_req->isoc_pkts_count; 4154 isoc_req->isoc_frame_no = start_frame; 4155 4156 } 4157 4158 if (rval != USB_SUCCESS) { 4159 4160 /* Exit the critical */ 4161 ddi_exit_critical(ddic); 4162 4163 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4164 "uhci_insert_isoc_td: Invalid starting frame number"); 4165 4166 if (tw->tw_direction == PID_IN) { 4167 uhci_deallocate_periodic_in_resource(uhcip, pp, tw); 4168 } 4169 4170 while (tw->tw_hctd_head) { 4171 uhci_delete_td(uhcip, tw->tw_hctd_head); 4172 } 4173 4174 for (i = 0; i < isoc_xfer_info->num_pools; i++) { 4175 td_pool_ptr = &isoc_xfer_info->td_pools[i]; 4176 error = ddi_dma_unbind_handle(td_pool_ptr->dma_handle); 4177 ASSERT(error == DDI_SUCCESS); 4178 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 4179 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 4180 } 4181 kmem_free(isoc_xfer_info->td_pools, 4182 (sizeof (uhci_bulk_isoc_td_pool_t) * 4183 isoc_xfer_info->num_pools)); 4184 4185 uhci_deallocate_tw(uhcip, pp, tw); 4186 4187 return (rval); 4188 } 4189 4190 for (i = 0; i < expired_frames; i++) { 4191 isoc_req->isoc_pkt_descr[i].isoc_pkt_status = 4192 USB_CR_NOT_ACCESSED; 4193 isoc_req->isoc_pkt_descr[i].isoc_pkt_actual_length = 4194 isoc_req->isoc_pkt_descr[i].isoc_pkt_length; 4195 uhci_get_isoc_td_by_index(uhcip, isoc_xfer_info, i, 4196 &td_ptr, &td_pool_ptr); 4197 uhci_delete_td(uhcip, td_ptr); 4198 --isoc_xfer_info->num_tds; 4199 } 4200 4201 /* 4202 * Add the TD's to the HC list 4203 */ 4204 start_frame = (start_frame & 0x3ff); 4205 for (; i < isoc_req->isoc_pkts_count; i++) { 4206 uhci_get_isoc_td_by_index(uhcip, isoc_xfer_info, i, 4207 &td_ptr, &td_pool_ptr); 4208 if (uhcip->uhci_isoc_q_tailp[start_frame]) { 4209 td_ptr->isoc_prev = 4210 uhcip->uhci_isoc_q_tailp[start_frame]; 4211 td_ptr->isoc_next = NULL; 4212 td_ptr->link_ptr = 4213 uhcip->uhci_isoc_q_tailp[start_frame]->link_ptr; 4214 uhcip->uhci_isoc_q_tailp[start_frame]->isoc_next = 4215 td_ptr; 4216 SetTD32(uhcip, 4217 uhcip->uhci_isoc_q_tailp[start_frame]->link_ptr, 4218 ISOCTD_PADDR(td_pool_ptr, td_ptr)); 4219 uhcip->uhci_isoc_q_tailp[start_frame] = td_ptr; 4220 } else { 4221 uhcip->uhci_isoc_q_tailp[start_frame] = td_ptr; 4222 td_ptr->isoc_next = NULL; 4223 td_ptr->isoc_prev = NULL; 4224 SetTD32(uhcip, td_ptr->link_ptr, 4225 GetFL32(uhcip, 4226 uhcip->uhci_frame_lst_tablep[start_frame])); 4227 SetFL32(uhcip, 4228 uhcip->uhci_frame_lst_tablep[start_frame], 4229 ISOCTD_PADDR(td_pool_ptr, td_ptr)); 4230 } 4231 td_ptr->starting_frame = (uint_t)start_frame; 4232 4233 if (++start_frame == NUM_FRAME_LST_ENTRIES) 4234 start_frame = 0; 4235 } 4236 4237 ddi_exit_critical(ddic); 4238 pp->pp_frame_num = end_frame; 4239 4240 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4241 "uhci_insert_isoc_td: current frame number 0x%llx, pipe frame num" 4242 " 0x%llx", (unsigned long long)current_frame, 4243 (unsigned long long)(pp->pp_frame_num)); 4244 4245 return (rval); 4246 } 4247 4248 4249 /* 4250 * uhci_get_isoc_td_by_index: 4251 * Obtain the addresses of the TD pool and the TD at the index. 4252 * 4253 * tdpp - pointer to the address of the TD at the isoc packet index 4254 * td_pool_pp - pointer to the address of the TD pool containing 4255 * the specified TD 4256 */ 4257 /* ARGSUSED */ 4258 static void 4259 uhci_get_isoc_td_by_index( 4260 uhci_state_t *uhcip, 4261 uhci_bulk_isoc_xfer_t *info, 4262 uint_t index, 4263 uhci_td_t **tdpp, 4264 uhci_bulk_isoc_td_pool_t **td_pool_pp) 4265 { 4266 uint_t i = 0, j = 0; 4267 uhci_td_t *td_ptr; 4268 4269 while (j < info->num_pools) { 4270 if ((i + info->td_pools[j].num_tds) <= index) { 4271 i += info->td_pools[j].num_tds; 4272 j++; 4273 } else { 4274 i = index - i; 4275 4276 break; 4277 } 4278 } 4279 4280 ASSERT(j < info->num_pools); 4281 *td_pool_pp = &info->td_pools[j]; 4282 td_ptr = (uhci_td_t *)((*td_pool_pp)->pool_addr); 4283 *tdpp = &td_ptr[i]; 4284 } 4285 4286 4287 /* 4288 * uhci_handle_isoc_td: 4289 * Handles the completed isoc tds 4290 */ 4291 void 4292 uhci_handle_isoc_td(uhci_state_t *uhcip, uhci_td_t *td) 4293 { 4294 uint_t rval, i; 4295 uint32_t pkt_index = td->isoc_pkt_index; 4296 usb_cr_t cr; 4297 uhci_trans_wrapper_t *tw = td->tw; 4298 usb_isoc_req_t *isoc_req = (usb_isoc_req_t *)tw->tw_isoc_req; 4299 uhci_pipe_private_t *pp = tw->tw_pipe_private; 4300 uhci_bulk_isoc_xfer_t *isoc_xfer_info = &tw->tw_xfer_info; 4301 usba_pipe_handle_data_t *usb_pp; 4302 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 4303 4304 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4305 "uhci_handle_isoc_td: td = 0x%p, pp = 0x%p, tw = 0x%p, req = 0x%p, " 4306 "index = %x", (void *)td, (void *)pp, (void *)tw, (void *)isoc_req, 4307 pkt_index); 4308 4309 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4310 4311 usb_pp = pp->pp_pipe_handle; 4312 4313 /* 4314 * Check whether there are any errors occurred. If so, update error 4315 * count and return it to the upper.But never return a non zero 4316 * completion reason. 4317 */ 4318 cr = USB_CR_OK; 4319 if (GetTD_status(uhcip, td) & TD_STATUS_MASK) { 4320 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4321 "uhci_handle_isoc_td: Error Occurred: TD Status = %x", 4322 GetTD_status(uhcip, td)); 4323 isoc_req->isoc_error_count++; 4324 } 4325 4326 if (isoc_req != NULL) { 4327 isoc_req->isoc_pkt_descr[pkt_index].isoc_pkt_status = cr; 4328 isoc_req->isoc_pkt_descr[pkt_index].isoc_pkt_actual_length = 4329 (GetTD_alen(uhcip, td) == ZERO_LENGTH) ? 0 : 4330 GetTD_alen(uhcip, td) + 1; 4331 } 4332 4333 uhci_delete_isoc_td(uhcip, td); 4334 4335 if (--isoc_xfer_info->num_tds != 0) { 4336 USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4337 "uhci_handle_isoc_td: Number of TDs %d", 4338 isoc_xfer_info->num_tds); 4339 4340 return; 4341 } 4342 4343 tw->tw_claim = UHCI_INTR_HDLR_CLAIMED; 4344 if (tw->tw_direction == PID_IN) { 4345 uhci_sendup_td_message(uhcip, cr, tw); 4346 4347 if ((uhci_handle_isoc_receive(uhcip, pp, tw)) != USB_SUCCESS) { 4348 USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4349 "uhci_handle_isoc_td: Drop message"); 4350 } 4351 4352 } else { 4353 /* update kstats only for OUT. sendup_td_msg() does it for IN */ 4354 uhci_do_byte_stats(uhcip, tw->tw_length, 4355 usb_pp->p_ep.bmAttributes, usb_pp->p_ep.bEndpointAddress); 4356 4357 uhci_hcdi_callback(uhcip, pp, usb_pp, tw, USB_CR_OK); 4358 } 4359 4360 for (i = 0; i < isoc_xfer_info->num_pools; i++) { 4361 td_pool_ptr = &isoc_xfer_info->td_pools[i]; 4362 rval = ddi_dma_unbind_handle(td_pool_ptr->dma_handle); 4363 ASSERT(rval == DDI_SUCCESS); 4364 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 4365 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 4366 } 4367 kmem_free(isoc_xfer_info->td_pools, 4368 (sizeof (uhci_bulk_isoc_td_pool_t) * 4369 isoc_xfer_info->num_pools)); 4370 uhci_deallocate_tw(uhcip, pp, tw); 4371 } 4372 4373 4374 /* 4375 * uhci_handle_isoc_receive: 4376 * - Sends the isoc data to the client 4377 * - Inserts another isoc receive request 4378 */ 4379 static int 4380 uhci_handle_isoc_receive( 4381 uhci_state_t *uhcip, 4382 uhci_pipe_private_t *pp, 4383 uhci_trans_wrapper_t *tw) 4384 { 4385 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4386 "uhci_handle_isoc_receive: tw = 0x%p", (void *)tw); 4387 4388 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4389 4390 /* 4391 * -- check for pipe state being polling before 4392 * inserting a new request. Check when is TD 4393 * de-allocation being done? (so we can reuse the same TD) 4394 */ 4395 if (uhci_start_isoc_receive_polling(uhcip, 4396 pp->pp_pipe_handle, (usb_isoc_req_t *)tw->tw_curr_xfer_reqp, 4397 0) != USB_SUCCESS) { 4398 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4399 "uhci_handle_isoc_receive: receive polling failed"); 4400 4401 return (USB_FAILURE); 4402 } 4403 4404 return (USB_SUCCESS); 4405 } 4406 4407 4408 /* 4409 * uhci_delete_isoc_td: 4410 * - Delete from the outstanding command queue 4411 * - Delete from the tw queue 4412 * - Delete from the isoc queue 4413 * - Delete from the HOST CONTROLLER list 4414 */ 4415 static void 4416 uhci_delete_isoc_td(uhci_state_t *uhcip, uhci_td_t *td) 4417 { 4418 uint32_t starting_frame = td->starting_frame; 4419 4420 if ((td->isoc_next == NULL) && (td->isoc_prev == NULL)) { 4421 SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[starting_frame], 4422 GetTD32(uhcip, td->link_ptr)); 4423 uhcip->uhci_isoc_q_tailp[starting_frame] = 0; 4424 } else if (td->isoc_next == NULL) { 4425 td->isoc_prev->link_ptr = td->link_ptr; 4426 td->isoc_prev->isoc_next = NULL; 4427 uhcip->uhci_isoc_q_tailp[starting_frame] = td->isoc_prev; 4428 } else if (td->isoc_prev == NULL) { 4429 td->isoc_next->isoc_prev = NULL; 4430 SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[starting_frame], 4431 GetTD32(uhcip, td->link_ptr)); 4432 } else { 4433 td->isoc_prev->isoc_next = td->isoc_next; 4434 td->isoc_next->isoc_prev = td->isoc_prev; 4435 td->isoc_prev->link_ptr = td->link_ptr; 4436 } 4437 4438 uhci_delete_td(uhcip, td); 4439 } 4440 4441 4442 /* 4443 * uhci_send_isoc_receive 4444 * - Allocates usb_isoc_request 4445 * - Updates the isoc request 4446 * - Inserts the isoc td's into the HC processing list. 4447 */ 4448 int 4449 uhci_start_isoc_receive_polling( 4450 uhci_state_t *uhcip, 4451 usba_pipe_handle_data_t *ph, 4452 usb_isoc_req_t *isoc_req, 4453 usb_flags_t usb_flags) 4454 { 4455 int ii, error; 4456 size_t max_isoc_xfer_size, length, isoc_pkts_length; 4457 ushort_t isoc_pkt_count; 4458 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 4459 usb_isoc_pkt_descr_t *isoc_pkt_descr; 4460 4461 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4462 "uhci_start_isoc_receive_polling: usb_flags = %x", usb_flags); 4463 4464 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4465 4466 max_isoc_xfer_size = ph->p_ep.wMaxPacketSize * UHCI_MAX_ISOC_PKTS; 4467 4468 if (isoc_req) { 4469 isoc_pkt_descr = isoc_req->isoc_pkt_descr; 4470 isoc_pkt_count = isoc_req->isoc_pkts_count; 4471 isoc_pkts_length = isoc_req->isoc_pkts_length; 4472 } else { 4473 isoc_pkt_descr = ((usb_isoc_req_t *) 4474 pp->pp_client_periodic_in_reqp)->isoc_pkt_descr; 4475 isoc_pkt_count = ((usb_isoc_req_t *) 4476 pp->pp_client_periodic_in_reqp)->isoc_pkts_count; 4477 isoc_pkts_length = ((usb_isoc_req_t *) 4478 pp->pp_client_periodic_in_reqp)->isoc_pkts_length; 4479 } 4480 4481 for (ii = 0, length = 0; ii < isoc_pkt_count; ii++) { 4482 length += isoc_pkt_descr->isoc_pkt_length; 4483 isoc_pkt_descr++; 4484 } 4485 4486 if ((isoc_pkts_length) && (isoc_pkts_length != length)) { 4487 4488 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 4489 "uhci_start_isoc_receive_polling: isoc_pkts_length 0x%lx " 4490 "is not equal to the sum of all pkt lengths 0x%lx in " 4491 "an isoc request", isoc_pkts_length, length); 4492 4493 return (USB_FAILURE); 4494 } 4495 4496 /* Check the size of isochronous request */ 4497 if (length > max_isoc_xfer_size) { 4498 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4499 "uhci_start_isoc_receive_polling: " 4500 "Max isoc request size = %lx, Given isoc req size = %lx", 4501 max_isoc_xfer_size, length); 4502 4503 return (USB_FAILURE); 4504 } 4505 4506 /* Add the TD into the Host Controller's isoc list */ 4507 error = uhci_insert_isoc_td(uhcip, ph, isoc_req, length, usb_flags); 4508 4509 return (error); 4510 } 4511 4512 4513 /* 4514 * uhci_remove_isoc_tds_tws 4515 * This routine scans the pipe and removes all the td's 4516 * and transfer wrappers and deallocates the memory 4517 * associated with those td's and tw's. 4518 */ 4519 void 4520 uhci_remove_isoc_tds_tws(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 4521 { 4522 uint_t rval, i; 4523 uhci_td_t *tmp_td, *td_head; 4524 usb_isoc_req_t *isoc_req; 4525 uhci_trans_wrapper_t *tmp_tw, *tw_head; 4526 uhci_bulk_isoc_xfer_t *isoc_xfer_info; 4527 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 4528 4529 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4530 "uhci_remove_isoc_tds_tws: pp = %p", (void *)pp); 4531 4532 tw_head = pp->pp_tw_head; 4533 while (tw_head) { 4534 tmp_tw = tw_head; 4535 tw_head = tw_head->tw_next; 4536 td_head = tmp_tw->tw_hctd_head; 4537 if (tmp_tw->tw_direction == PID_IN) { 4538 uhci_deallocate_periodic_in_resource(uhcip, pp, 4539 tmp_tw); 4540 } else if (tmp_tw->tw_direction == PID_OUT) { 4541 uhci_hcdi_callback(uhcip, pp, pp->pp_pipe_handle, 4542 tmp_tw, USB_CR_FLUSHED); 4543 } 4544 4545 while (td_head) { 4546 tmp_td = td_head; 4547 td_head = td_head->tw_td_next; 4548 uhci_delete_isoc_td(uhcip, tmp_td); 4549 } 4550 4551 isoc_req = (usb_isoc_req_t *)tmp_tw->tw_isoc_req; 4552 if (isoc_req) { 4553 usb_free_isoc_req(isoc_req); 4554 } 4555 4556 ASSERT(tmp_tw->tw_hctd_head == NULL); 4557 4558 if (tmp_tw->tw_xfer_info.td_pools) { 4559 isoc_xfer_info = 4560 (uhci_bulk_isoc_xfer_t *)&tmp_tw->tw_xfer_info; 4561 for (i = 0; i < isoc_xfer_info->num_pools; i++) { 4562 td_pool_ptr = &isoc_xfer_info->td_pools[i]; 4563 rval = ddi_dma_unbind_handle( 4564 td_pool_ptr->dma_handle); 4565 ASSERT(rval == DDI_SUCCESS); 4566 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 4567 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 4568 } 4569 kmem_free(isoc_xfer_info->td_pools, 4570 (sizeof (uhci_bulk_isoc_td_pool_t) * 4571 isoc_xfer_info->num_pools)); 4572 } 4573 4574 uhci_deallocate_tw(uhcip, pp, tmp_tw); 4575 } 4576 } 4577 4578 4579 /* 4580 * uhci_isoc_update_sw_frame_number() 4581 * to avoid code duplication, call uhci_get_sw_frame_number() 4582 */ 4583 void 4584 uhci_isoc_update_sw_frame_number(uhci_state_t *uhcip) 4585 { 4586 (void) uhci_get_sw_frame_number(uhcip); 4587 } 4588 4589 4590 /* 4591 * uhci_get_sw_frame_number: 4592 * Hold the uhci_int_mutex before calling this routine. 4593 */ 4594 uint64_t 4595 uhci_get_sw_frame_number(uhci_state_t *uhcip) 4596 { 4597 uint64_t sw_frnum, hw_frnum, current_frnum; 4598 4599 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4600 4601 sw_frnum = uhcip->uhci_sw_frnum; 4602 hw_frnum = Get_OpReg16(FRNUM); 4603 4604 /* 4605 * Check bit 10 in the software counter and hardware frame counter. 4606 * If both are same, then don't increment the software frame counter 4607 * (Bit 10 of hw frame counter toggle for every 1024 frames) 4608 * The lower 11 bits of software counter contains the hardware frame 4609 * counter value. The MSB (bit 10) of software counter is incremented 4610 * for every 1024 frames either here or in get frame number routine. 4611 */ 4612 if ((sw_frnum & UHCI_BIT_10_MASK) == (hw_frnum & UHCI_BIT_10_MASK)) { 4613 /* The MSB of hw counter did not toggle */ 4614 current_frnum = ((sw_frnum & (SW_FRNUM_MASK)) | hw_frnum); 4615 } else { 4616 /* 4617 * The hw counter wrapped around. And the interrupt handler 4618 * did not get a chance to update the sw frame counter. 4619 * So, update the sw frame counter and return correct frame no. 4620 */ 4621 sw_frnum >>= UHCI_SIZE_OF_HW_FRNUM - 1; 4622 current_frnum = 4623 ((++sw_frnum << (UHCI_SIZE_OF_HW_FRNUM - 1)) | hw_frnum); 4624 } 4625 uhcip->uhci_sw_frnum = current_frnum; 4626 4627 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 4628 "uhci_get_sw_frame_number: sw=%lld hd=%lld", 4629 (unsigned long long)(uhcip->uhci_sw_frnum), 4630 (unsigned long long)hw_frnum); 4631 4632 return (current_frnum); 4633 } 4634 4635 4636 /* 4637 * uhci_cmd_timeout_hdlr: 4638 * This routine will get called for every second. It checks for 4639 * timed out control commands/bulk commands. Timeout any commands 4640 * that exceeds the time out period specified by the pipe policy. 4641 */ 4642 void 4643 uhci_cmd_timeout_hdlr(void *arg) 4644 { 4645 uint_t flag = B_FALSE; 4646 uhci_td_t *head, *tmp_td; 4647 uhci_state_t *uhcip = (uhci_state_t *)arg; 4648 uhci_pipe_private_t *pp; 4649 4650 /* 4651 * Check whether any of the control xfers are timed out. 4652 * If so, complete those commands with time out as reason. 4653 */ 4654 mutex_enter(&uhcip->uhci_int_mutex); 4655 head = uhcip->uhci_outst_tds_head; 4656 4657 while (head) { 4658 /* 4659 * If timeout out is zero, then dont timeout command. 4660 */ 4661 if (head->tw->tw_timeout_cnt == 0) { 4662 head = head->outst_td_next; 4663 continue; 4664 } 4665 4666 if (!(head->tw->tw_flags & TW_TIMEOUT_FLAG)) { 4667 head->tw->tw_flags |= TW_TIMEOUT_FLAG; 4668 --head->tw->tw_timeout_cnt; 4669 } 4670 4671 /* only do it for bulk and control TDs */ 4672 if ((head->tw->tw_timeout_cnt == 0) && 4673 (head->tw->tw_handle_td != uhci_handle_isoc_td)) { 4674 4675 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 4676 "Command timed out: td = %p", (void *)head); 4677 4678 head->tw->tw_claim = UHCI_TIMEOUT_HDLR_CLAIMED; 4679 4680 /* 4681 * Check finally whether the command completed 4682 */ 4683 if (GetTD_status(uhcip, head) & UHCI_TD_ACTIVE) { 4684 SetTD32(uhcip, head->link_ptr, 4685 GetTD32(uhcip, head->link_ptr) | 4686 HC_END_OF_LIST); 4687 pp = head->tw->tw_pipe_private; 4688 SetQH32(uhcip, pp->pp_qh->element_ptr, 4689 GetQH32(uhcip, pp->pp_qh->element_ptr) | 4690 HC_END_OF_LIST); 4691 } 4692 4693 flag = B_TRUE; 4694 } 4695 4696 head = head->outst_td_next; 4697 } 4698 4699 if (flag) { 4700 (void) uhci_wait_for_sof(uhcip); 4701 } 4702 4703 head = uhcip->uhci_outst_tds_head; 4704 while (head) { 4705 if (head->tw->tw_flags & TW_TIMEOUT_FLAG) { 4706 head->tw->tw_flags &= ~TW_TIMEOUT_FLAG; 4707 } 4708 if (head->tw->tw_claim == UHCI_TIMEOUT_HDLR_CLAIMED) { 4709 head->tw->tw_claim = UHCI_NOT_CLAIMED; 4710 tmp_td = head->tw->tw_hctd_head; 4711 while (tmp_td) { 4712 SetTD_status(uhcip, tmp_td, 4713 UHCI_TD_CRC_TIMEOUT); 4714 tmp_td = tmp_td->tw_td_next; 4715 } 4716 } 4717 head = head->outst_td_next; 4718 } 4719 4720 /* 4721 * Process the td which was completed before shifting from normal 4722 * mode to polled mode 4723 */ 4724 if (uhcip->uhci_polled_flag == UHCI_POLLED_FLAG_TRUE) { 4725 uhci_process_submitted_td_queue(uhcip); 4726 uhcip->uhci_polled_flag = UHCI_POLLED_FLAG_FALSE; 4727 } else if (flag) { 4728 /* Process the completed/timed out commands */ 4729 uhci_process_submitted_td_queue(uhcip); 4730 } 4731 4732 /* Re-register the control/bulk/intr commands' timeout handler */ 4733 if (uhcip->uhci_cmd_timeout_id) { 4734 uhcip->uhci_cmd_timeout_id = timeout(uhci_cmd_timeout_hdlr, 4735 (void *)uhcip, UHCI_ONE_SECOND); 4736 } 4737 4738 mutex_exit(&uhcip->uhci_int_mutex); 4739 } 4740 4741 4742 /* 4743 * uhci_wait_for_sof: 4744 * Wait for the start of the next frame (implying any changes made in the 4745 * lattice have now taken effect). 4746 * To be sure this is the case, we wait for the completion of the current 4747 * frame (which might have already been pending), then another complete 4748 * frame to ensure everything has taken effect. 4749 */ 4750 int 4751 uhci_wait_for_sof(uhci_state_t *uhcip) 4752 { 4753 int n, error; 4754 ushort_t cmd_reg; 4755 usb_frame_number_t before_frame_number, after_frame_number; 4756 clock_t time, rval; 4757 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 4758 "uhci_wait_for_sof: uhcip = %p", (void *)uhcip); 4759 4760 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4761 4762 error = uhci_state_is_operational(uhcip); 4763 4764 if (error != USB_SUCCESS) { 4765 4766 return (error); 4767 } 4768 4769 before_frame_number = uhci_get_sw_frame_number(uhcip); 4770 for (n = 0; n < MAX_SOF_WAIT_COUNT; n++) { 4771 SetTD_ioc(uhcip, uhcip->uhci_sof_td, 1); 4772 uhcip->uhci_cv_signal = B_TRUE; 4773 4774 time = ddi_get_lbolt() + UHCI_ONE_SECOND; 4775 rval = cv_timedwait(&uhcip->uhci_cv_SOF, 4776 &uhcip->uhci_int_mutex, time); 4777 4778 after_frame_number = uhci_get_sw_frame_number(uhcip); 4779 if ((rval == -1) && 4780 (after_frame_number <= before_frame_number)) { 4781 cmd_reg = Get_OpReg16(USBCMD); 4782 Set_OpReg16(USBCMD, (cmd_reg | USBCMD_REG_HC_RUN)); 4783 Set_OpReg16(USBINTR, ENABLE_ALL_INTRS); 4784 after_frame_number = uhci_get_sw_frame_number(uhcip); 4785 } 4786 before_frame_number = after_frame_number; 4787 } 4788 4789 SetTD_ioc(uhcip, uhcip->uhci_sof_td, 0); 4790 4791 return (uhcip->uhci_cv_signal ? USB_FAILURE : USB_SUCCESS); 4792 4793 } 4794 4795 /* 4796 * uhci_allocate_periodic_in_resource: 4797 * Allocate interrupt/isochronous request structure for the 4798 * interrupt/isochronous IN transfer. 4799 */ 4800 int 4801 uhci_allocate_periodic_in_resource( 4802 uhci_state_t *uhcip, 4803 uhci_pipe_private_t *pp, 4804 uhci_trans_wrapper_t *tw, 4805 usb_flags_t flags) 4806 { 4807 size_t length = 0; 4808 usb_opaque_t client_periodic_in_reqp; 4809 usb_intr_req_t *cur_intr_req; 4810 usb_isoc_req_t *curr_isoc_reqp; 4811 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 4812 4813 USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4814 "uhci_allocate_periodic_in_resource:\n\t" 4815 "ph = 0x%p, pp = 0x%p, tw = 0x%p, flags = 0x%x", 4816 (void *)ph, (void *)pp, (void *)tw, flags); 4817 4818 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4819 4820 /* Check the current periodic in request pointer */ 4821 if (tw->tw_curr_xfer_reqp) { 4822 USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4823 "uhci_allocate_periodic_in_resource: Interrupt " 4824 "request structure already exists: " 4825 "allocation failed"); 4826 4827 return (USB_SUCCESS); 4828 } 4829 4830 /* Get the client periodic in request pointer */ 4831 client_periodic_in_reqp = pp->pp_client_periodic_in_reqp; 4832 4833 /* 4834 * If it a periodic IN request and periodic request is NULL, 4835 * allocate corresponding usb periodic IN request for the 4836 * current periodic polling request and copy the information 4837 * from the saved periodic request structure. 4838 */ 4839 if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_INTR) { 4840 /* Get the interrupt transfer length */ 4841 length = ((usb_intr_req_t *)client_periodic_in_reqp)-> 4842 intr_len; 4843 4844 cur_intr_req = usba_hcdi_dup_intr_req(ph->p_dip, 4845 (usb_intr_req_t *)client_periodic_in_reqp, length, flags); 4846 if (cur_intr_req == NULL) { 4847 USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4848 "uhci_allocate_periodic_in_resource: Interrupt " 4849 "request structure allocation failed"); 4850 4851 return (USB_NO_RESOURCES); 4852 } 4853 4854 /* Check and save the timeout value */ 4855 tw->tw_timeout_cnt = (cur_intr_req->intr_attributes & 4856 USB_ATTRS_ONE_XFER) ? cur_intr_req->intr_timeout: 0; 4857 tw->tw_curr_xfer_reqp = (usb_opaque_t)cur_intr_req; 4858 tw->tw_length = cur_intr_req->intr_len; 4859 } else { 4860 ASSERT(client_periodic_in_reqp != NULL); 4861 4862 if ((curr_isoc_reqp = usba_hcdi_dup_isoc_req(ph->p_dip, 4863 (usb_isoc_req_t *)client_periodic_in_reqp, flags)) == 4864 NULL) { 4865 USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4866 "uhci_allocate_periodic_in_resource: Isochronous " 4867 "request structure allocation failed"); 4868 4869 return (USB_NO_RESOURCES); 4870 } 4871 4872 /* 4873 * Save the client's isochronous request pointer and 4874 * length of isochronous transfer in transfer wrapper. 4875 * The dup'ed request is saved in pp_client_periodic_in_reqp 4876 */ 4877 tw->tw_curr_xfer_reqp = 4878 (usb_opaque_t)pp->pp_client_periodic_in_reqp; 4879 pp->pp_client_periodic_in_reqp = (usb_opaque_t)curr_isoc_reqp; 4880 } 4881 4882 mutex_enter(&ph->p_mutex); 4883 ph->p_req_count++; 4884 mutex_exit(&ph->p_mutex); 4885 4886 return (USB_SUCCESS); 4887 } 4888 4889 4890 /* 4891 * uhci_deallocate_periodic_in_resource: 4892 * Deallocate interrupt/isochronous request structure for the 4893 * interrupt/isochronous IN transfer. 4894 */ 4895 void 4896 uhci_deallocate_periodic_in_resource( 4897 uhci_state_t *uhcip, 4898 uhci_pipe_private_t *pp, 4899 uhci_trans_wrapper_t *tw) 4900 { 4901 usb_opaque_t curr_xfer_reqp; 4902 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 4903 4904 USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4905 "uhci_deallocate_periodic_in_resource: " 4906 "pp = 0x%p tw = 0x%p", (void *)pp, (void *)tw); 4907 4908 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4909 4910 curr_xfer_reqp = tw->tw_curr_xfer_reqp; 4911 if (curr_xfer_reqp) { 4912 /* 4913 * Reset periodic in request usb isoch 4914 * packet request pointers to null. 4915 */ 4916 tw->tw_curr_xfer_reqp = NULL; 4917 tw->tw_isoc_req = NULL; 4918 4919 mutex_enter(&ph->p_mutex); 4920 ph->p_req_count--; 4921 mutex_exit(&ph->p_mutex); 4922 4923 /* 4924 * Free pre-allocated interrupt or isochronous requests. 4925 */ 4926 switch (UHCI_XFER_TYPE(&ph->p_ep)) { 4927 case USB_EP_ATTR_INTR: 4928 usb_free_intr_req((usb_intr_req_t *)curr_xfer_reqp); 4929 break; 4930 case USB_EP_ATTR_ISOCH: 4931 usb_free_isoc_req((usb_isoc_req_t *)curr_xfer_reqp); 4932 break; 4933 } 4934 } 4935 } 4936 4937 4938 /* 4939 * uhci_hcdi_callback() 4940 * convenience wrapper around usba_hcdi_callback() 4941 */ 4942 void 4943 uhci_hcdi_callback(uhci_state_t *uhcip, uhci_pipe_private_t *pp, 4944 usba_pipe_handle_data_t *ph, uhci_trans_wrapper_t *tw, usb_cr_t cr) 4945 { 4946 usb_opaque_t curr_xfer_reqp; 4947 4948 USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4949 "uhci_hcdi_callback: ph = 0x%p, tw = 0x%p, cr = 0x%x", 4950 (void *)ph, (void *)tw, cr); 4951 4952 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4953 4954 if (tw && tw->tw_curr_xfer_reqp) { 4955 curr_xfer_reqp = tw->tw_curr_xfer_reqp; 4956 tw->tw_curr_xfer_reqp = NULL; 4957 tw->tw_isoc_req = NULL; 4958 } else { 4959 ASSERT(pp->pp_client_periodic_in_reqp != NULL); 4960 4961 curr_xfer_reqp = pp->pp_client_periodic_in_reqp; 4962 pp->pp_client_periodic_in_reqp = NULL; 4963 } 4964 4965 ASSERT(curr_xfer_reqp != NULL); 4966 4967 mutex_exit(&uhcip->uhci_int_mutex); 4968 usba_hcdi_cb(ph, curr_xfer_reqp, cr); 4969 mutex_enter(&uhcip->uhci_int_mutex); 4970 } 4971 4972 4973 /* 4974 * uhci_state_is_operational: 4975 * 4976 * Check the Host controller state and return proper values. 4977 */ 4978 int 4979 uhci_state_is_operational(uhci_state_t *uhcip) 4980 { 4981 int val; 4982 4983 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4984 4985 switch (uhcip->uhci_hc_soft_state) { 4986 case UHCI_CTLR_INIT_STATE: 4987 case UHCI_CTLR_SUSPEND_STATE: 4988 val = USB_FAILURE; 4989 break; 4990 case UHCI_CTLR_OPERATIONAL_STATE: 4991 val = USB_SUCCESS; 4992 break; 4993 case UHCI_CTLR_ERROR_STATE: 4994 val = USB_HC_HARDWARE_ERROR; 4995 break; 4996 default: 4997 val = USB_FAILURE; 4998 break; 4999 } 5000 5001 return (val); 5002 } 5003 5004 5005 #ifdef DEBUG 5006 static void 5007 uhci_print_td(uhci_state_t *uhcip, uhci_td_t *td) 5008 { 5009 uint_t *ptr = (uint_t *)td; 5010 5011 #ifndef lint 5012 _NOTE(NO_COMPETING_THREADS_NOW); 5013 #endif 5014 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 5015 "\tDWORD 1 0x%x\t DWORD 2 0x%x", ptr[0], ptr[1]); 5016 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 5017 "\tDWORD 3 0x%x\t DWORD 4 0x%x", ptr[2], ptr[3]); 5018 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 5019 "\tBytes xfered = %d", td->tw->tw_bytes_xfered); 5020 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 5021 "\tBytes Pending = %d", td->tw->tw_bytes_pending); 5022 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 5023 "Queue Head Details:"); 5024 uhci_print_qh(uhcip, td->tw->tw_pipe_private->pp_qh); 5025 5026 #ifndef lint 5027 _NOTE(COMPETING_THREADS_NOW); 5028 #endif 5029 } 5030 5031 5032 static void 5033 uhci_print_qh(uhci_state_t *uhcip, queue_head_t *qh) 5034 { 5035 uint_t *ptr = (uint_t *)qh; 5036 5037 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 5038 "\tLink Ptr = %x Element Ptr = %x", ptr[0], ptr[1]); 5039 } 5040 #endif 5041