1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * Universal Host Controller Driver (UHCI) 30 * 31 * The UHCI driver is a driver which interfaces to the Universal 32 * Serial Bus Driver (USBA) and the Host Controller (HC). The interface to 33 * the Host Controller is defined by the UHCI. 34 * This file contains misc functions. 35 */ 36 #include <sys/usb/hcd/uhci/uhcid.h> 37 #include <sys/usb/hcd/uhci/uhciutil.h> 38 #include <sys/usb/hcd/uhci/uhcipolled.h> 39 40 #include <sys/disp.h> 41 42 /* Globals */ 43 extern uint_t uhci_td_pool_size; /* Num TDs */ 44 extern uint_t uhci_qh_pool_size; /* Num QHs */ 45 extern ushort_t uhci_tree_bottom_nodes[]; 46 extern void *uhci_statep; 47 48 /* function prototypes */ 49 static void uhci_build_interrupt_lattice(uhci_state_t *uhcip); 50 static int uhci_init_frame_lst_table(dev_info_t *dip, uhci_state_t *uhcip); 51 52 static uint_t uhci_lattice_height(uint_t bandwidth); 53 static uint_t uhci_lattice_parent(uint_t node); 54 static uint_t uhci_leftmost_leaf(uint_t node, uint_t height); 55 static uint_t uhci_compute_total_bandwidth(usb_ep_descr_t *endpoint, 56 usb_port_status_t port_status); 57 58 static int uhci_bandwidth_adjust(uhci_state_t *uhcip, 59 usb_ep_descr_t *endpoint, usb_port_status_t port_status); 60 61 static uhci_td_t *uhci_allocate_td_from_pool(uhci_state_t *uhcip); 62 static void uhci_fill_in_td(uhci_state_t *uhcip, 63 uhci_td_t *td, uhci_td_t *current_dummy, 64 uint32_t buffer_offset, size_t length, 65 uhci_pipe_private_t *pp, uchar_t PID, 66 usb_req_attrs_t attrs, uhci_trans_wrapper_t *tw); 67 static uint32_t uhci_get_tw_paddr_by_offs(uhci_state_t *uhcip, 68 uint32_t buffer_offset, size_t length, 69 uhci_trans_wrapper_t *tw); 70 static uhci_trans_wrapper_t *uhci_create_transfer_wrapper( 71 uhci_state_t *uhcip, uhci_pipe_private_t *pp, 72 size_t length, usb_flags_t usb_flags); 73 static uhci_trans_wrapper_t *uhci_create_isoc_transfer_wrapper( 74 uhci_state_t *uhcip, uhci_pipe_private_t *pp, 75 usb_isoc_req_t *req, size_t length, 76 usb_flags_t usb_flags); 77 78 static int uhci_create_setup_pkt(uhci_state_t *uhcip, 79 uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw); 80 static void uhci_insert_ctrl_qh(uhci_state_t *uhcip, 81 uhci_pipe_private_t *pp); 82 static void uhci_remove_ctrl_qh(uhci_state_t *uhcip, 83 uhci_pipe_private_t *pp); 84 static void uhci_insert_intr_qh(uhci_state_t *uhcip, 85 uhci_pipe_private_t *pp); 86 static void uhci_remove_intr_qh(uhci_state_t *uhcip, 87 uhci_pipe_private_t *pp); 88 static void uhci_remove_bulk_qh(uhci_state_t *uhcip, 89 uhci_pipe_private_t *pp); 90 static void uhci_insert_bulk_qh(uhci_state_t *uhcip, 91 uhci_pipe_private_t *pp); 92 static void uhci_handle_bulk_td_errors(uhci_state_t *uhcip, uhci_td_t *td); 93 static int uhci_alloc_memory_for_tds(uhci_state_t *uhcip, uint_t num_tds, 94 uhci_bulk_isoc_xfer_t *info); 95 static int uhci_alloc_bulk_isoc_tds(uhci_state_t *uhcip, uint_t num_tds, 96 uhci_bulk_isoc_xfer_t *info); 97 static void uhci_get_isoc_td_by_index(uhci_state_t *uhcip, 98 uhci_bulk_isoc_xfer_t *info, uint_t index, 99 uhci_td_t **tdpp, uhci_bulk_isoc_td_pool_t **td_pool_pp); 100 static void uhci_get_bulk_td_by_paddr(uhci_state_t *uhcip, 101 uhci_bulk_isoc_xfer_t *info, uint32_t paddr, 102 uhci_bulk_isoc_td_pool_t **td_pool_pp); 103 104 static int uhci_handle_isoc_receive(uhci_state_t *uhcip, 105 uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw); 106 static void uhci_delete_isoc_td(uhci_state_t *uhcip, 107 uhci_td_t *td); 108 #ifdef DEBUG 109 static void uhci_print_td(uhci_state_t *uhcip, uhci_td_t *td); 110 static void uhci_print_qh(uhci_state_t *uhcip, queue_head_t *qh); 111 #endif 112 113 114 /* 115 * uhci_build_interrupt_lattice: 116 * 117 * Construct the interrupt lattice tree using static Queue Head pointers. 118 * This interrupt lattice tree will have total of 63 queue heads and the 119 * Host Controller (HC) processes queue heads every frame. 120 */ 121 static void 122 uhci_build_interrupt_lattice(uhci_state_t *uhcip) 123 { 124 int half_list = NUM_INTR_QH_LISTS / 2; 125 uint16_t i, j, k; 126 uhci_td_t *sof_td, *isoc_td; 127 uintptr_t addr; 128 queue_head_t *list_array = uhcip->uhci_qh_pool_addr; 129 queue_head_t *tmp_qh; 130 frame_lst_table_t *frame_lst_tablep = 131 uhcip->uhci_frame_lst_tablep; 132 133 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 134 "uhci_build_interrupt_lattice:"); 135 136 /* 137 * Reserve the first 63 queue head structures in the pool as static 138 * queue heads & these are required for constructing interrupt 139 * lattice tree. 140 */ 141 for (i = 0; i < NUM_INTR_QH_LISTS; i++) { 142 SetQH32(uhcip, list_array[i].link_ptr, HC_END_OF_LIST); 143 SetQH32(uhcip, list_array[i].element_ptr, HC_END_OF_LIST); 144 list_array[i].qh_flag = QUEUE_HEAD_FLAG_STATIC; 145 list_array[i].node = i; 146 } 147 148 /* Build the interrupt lattice tree */ 149 for (i = 0; i < half_list - 1; i++) { 150 /* 151 * The next pointer in the host controller queue head 152 * descriptor must contain an iommu address. Calculate 153 * the offset into the cpu address and add this to the 154 * starting iommu address. 155 */ 156 addr = QH_PADDR(&list_array[i]) | HC_QUEUE_HEAD; 157 158 SetQH32(uhcip, list_array[2*i + 1].link_ptr, addr); 159 SetQH32(uhcip, list_array[2*i + 2].link_ptr, addr); 160 } 161 162 /* 163 * Initialize the interrupt list in the Frame list Table 164 * so that it points to the bottom of the tree. 165 */ 166 for (i = 0, j = 0; i < pow_2(TREE_HEIGHT); i++) { 167 addr = QH_PADDR(&list_array[half_list + i - 1]); 168 for (k = 0; k < pow_2(VIRTUAL_TREE_HEIGHT); k++) { 169 SetFL32(uhcip, 170 frame_lst_tablep[uhci_tree_bottom_nodes[j++]], 171 addr | HC_QUEUE_HEAD); 172 } 173 } 174 175 /* 176 * Create a controller and bulk Queue heads 177 */ 178 uhcip->uhci_ctrl_xfers_q_head = uhci_alloc_queue_head(uhcip); 179 tmp_qh = uhcip->uhci_ctrl_xfers_q_tail = uhcip->uhci_ctrl_xfers_q_head; 180 181 SetQH32(uhcip, list_array[0].link_ptr, 182 (QH_PADDR(tmp_qh) | HC_QUEUE_HEAD)); 183 184 uhcip->uhci_bulk_xfers_q_head = uhci_alloc_queue_head(uhcip); 185 uhcip->uhci_bulk_xfers_q_tail = uhcip->uhci_bulk_xfers_q_head; 186 SetQH32(uhcip, tmp_qh->link_ptr, 187 (QH_PADDR(uhcip->uhci_bulk_xfers_q_head)|HC_QUEUE_HEAD)); 188 189 SetQH32(uhcip, uhcip->uhci_bulk_xfers_q_head->link_ptr, HC_END_OF_LIST); 190 191 /* 192 * Add a dummy TD to the static queue head 0. THis is used 193 * to generate an at the end of frame. 194 */ 195 sof_td = uhci_allocate_td_from_pool(uhcip); 196 197 SetQH32(uhcip, list_array[0].element_ptr, 198 TD_PADDR(sof_td) | HC_TD_HEAD); 199 SetTD32(uhcip, sof_td->link_ptr, HC_END_OF_LIST); 200 uhcip->uhci_sof_td = sof_td; 201 202 /* 203 * Add a dummy td that is used to generate an interrupt for 204 * every 1024 frames. 205 */ 206 isoc_td = uhci_allocate_td_from_pool(uhcip); 207 SetTD32(uhcip, isoc_td->link_ptr, HC_END_OF_LIST); 208 uhcip->uhci_isoc_td = isoc_td; 209 210 uhcip->uhci_isoc_qh = uhci_alloc_queue_head(uhcip); 211 SetQH32(uhcip, uhcip->uhci_isoc_qh->link_ptr, 212 GetFL32(uhcip, uhcip->uhci_frame_lst_tablep[MAX_FRAME_NUM])); 213 SetQH32(uhcip, uhcip->uhci_isoc_qh->element_ptr, TD_PADDR(isoc_td)); 214 SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[MAX_FRAME_NUM], 215 QH_PADDR(uhcip->uhci_isoc_qh) | HC_QUEUE_HEAD); 216 } 217 218 219 /* 220 * uhci_allocate_pools: 221 * Allocate the system memory for the Queue Heads Descriptor and 222 * for the Transfer Descriptor (TD) pools. Both QH and TD structures 223 * must be aligned to a 16 byte boundary. 224 */ 225 int 226 uhci_allocate_pools(uhci_state_t *uhcip) 227 { 228 dev_info_t *dip = uhcip->uhci_dip; 229 size_t real_length; 230 int i, result; 231 uint_t ccount; 232 ddi_device_acc_attr_t dev_attr; 233 234 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 235 "uhci_allocate_pools:"); 236 237 /* The host controller will be little endian */ 238 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 239 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 240 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 241 242 /* Allocate the TD pool DMA handle */ 243 if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 0, 244 &uhcip->uhci_td_pool_dma_handle) != DDI_SUCCESS) { 245 246 return (USB_FAILURE); 247 } 248 249 /* Allocate the memory for the TD pool */ 250 if (ddi_dma_mem_alloc(uhcip->uhci_td_pool_dma_handle, 251 uhci_td_pool_size * sizeof (uhci_td_t), 252 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0, 253 (caddr_t *)&uhcip->uhci_td_pool_addr, &real_length, 254 &uhcip->uhci_td_pool_mem_handle)) { 255 256 return (USB_FAILURE); 257 } 258 259 /* Map the TD pool into the I/O address space */ 260 result = ddi_dma_addr_bind_handle(uhcip->uhci_td_pool_dma_handle, 261 NULL, (caddr_t)uhcip->uhci_td_pool_addr, real_length, 262 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 263 NULL, &uhcip->uhci_td_pool_cookie, &ccount); 264 265 bzero((void *)uhcip->uhci_td_pool_addr, 266 uhci_td_pool_size * sizeof (uhci_td_t)); 267 268 /* Process the result */ 269 if (result == DDI_DMA_MAPPED) { 270 /* The cookie count should be 1 */ 271 if (ccount != 1) { 272 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 273 "uhci_allocate_pools: More than 1 cookie"); 274 275 return (USB_FAILURE); 276 } 277 } else { 278 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 279 "uhci_allocate_pools: Result = %d", result); 280 281 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result); 282 283 return (USB_FAILURE); 284 } 285 286 uhcip->uhci_dma_addr_bind_flag |= UHCI_TD_POOL_BOUND; 287 288 /* Initialize the TD pool */ 289 for (i = 0; i < uhci_td_pool_size; i++) { 290 uhcip->uhci_td_pool_addr[i].flag = TD_FLAG_FREE; 291 } 292 293 /* Allocate the TD pool DMA handle */ 294 if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 295 0, &uhcip->uhci_qh_pool_dma_handle) != DDI_SUCCESS) { 296 297 return (USB_FAILURE); 298 } 299 300 /* Allocate the memory for the QH pool */ 301 if (ddi_dma_mem_alloc(uhcip->uhci_qh_pool_dma_handle, 302 uhci_qh_pool_size * sizeof (queue_head_t), 303 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0, 304 (caddr_t *)&uhcip->uhci_qh_pool_addr, &real_length, 305 &uhcip->uhci_qh_pool_mem_handle) != DDI_SUCCESS) { 306 307 return (USB_FAILURE); 308 } 309 310 result = ddi_dma_addr_bind_handle(uhcip->uhci_qh_pool_dma_handle, 311 NULL, (caddr_t)uhcip->uhci_qh_pool_addr, real_length, 312 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 313 &uhcip->uhci_qh_pool_cookie, &ccount); 314 315 /* Process the result */ 316 if (result == DDI_DMA_MAPPED) { 317 /* The cookie count should be 1 */ 318 if (ccount != 1) { 319 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 320 "uhci_allocate_pools: More than 1 cookie"); 321 322 return (USB_FAILURE); 323 } 324 } else { 325 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result); 326 327 return (USB_FAILURE); 328 } 329 330 uhcip->uhci_dma_addr_bind_flag |= UHCI_QH_POOL_BOUND; 331 332 bzero((void *)uhcip->uhci_qh_pool_addr, 333 uhci_qh_pool_size * sizeof (queue_head_t)); 334 335 /* Initialize the QH pool */ 336 for (i = 0; i < uhci_qh_pool_size; i ++) { 337 uhcip->uhci_qh_pool_addr[i].qh_flag = QUEUE_HEAD_FLAG_FREE; 338 } 339 340 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 341 "uhci_allocate_pools: Completed"); 342 343 return (USB_SUCCESS); 344 } 345 346 347 /* 348 * uhci_free_pools: 349 * Cleanup on attach failure or detach 350 */ 351 void 352 uhci_free_pools(uhci_state_t *uhcip) 353 { 354 int i, flag, rval; 355 uhci_td_t *td; 356 uhci_trans_wrapper_t *tw; 357 358 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 359 "uhci_free_pools:"); 360 361 if (uhcip->uhci_td_pool_addr && uhcip->uhci_td_pool_mem_handle) { 362 for (i = 0; i < uhci_td_pool_size; i ++) { 363 td = &uhcip->uhci_td_pool_addr[i]; 364 365 flag = uhcip->uhci_td_pool_addr[i].flag; 366 if ((flag != TD_FLAG_FREE) && 367 (flag != TD_FLAG_DUMMY) && (td->tw != NULL)) { 368 tw = td->tw; 369 uhci_free_tw(uhcip, tw); 370 } 371 372 } 373 374 if (uhcip->uhci_dma_addr_bind_flag & UHCI_TD_POOL_BOUND) { 375 rval = ddi_dma_unbind_handle( 376 uhcip->uhci_td_pool_dma_handle); 377 ASSERT(rval == DDI_SUCCESS); 378 } 379 380 ddi_dma_mem_free(&uhcip->uhci_td_pool_mem_handle); 381 } 382 383 /* Free the TD pool */ 384 if (uhcip->uhci_td_pool_dma_handle) { 385 ddi_dma_free_handle(&uhcip->uhci_td_pool_dma_handle); 386 } 387 388 if (uhcip->uhci_qh_pool_addr && uhcip->uhci_qh_pool_mem_handle) { 389 if (uhcip->uhci_dma_addr_bind_flag & UHCI_QH_POOL_BOUND) { 390 rval = ddi_dma_unbind_handle( 391 uhcip->uhci_qh_pool_dma_handle); 392 ASSERT(rval == DDI_SUCCESS); 393 } 394 ddi_dma_mem_free(&uhcip->uhci_qh_pool_mem_handle); 395 } 396 397 /* Free the QH pool */ 398 if (uhcip->uhci_qh_pool_dma_handle) { 399 ddi_dma_free_handle(&uhcip->uhci_qh_pool_dma_handle); 400 } 401 402 /* Free the Frame list Table area */ 403 if (uhcip->uhci_frame_lst_tablep && uhcip->uhci_flt_mem_handle) { 404 if (uhcip->uhci_dma_addr_bind_flag & UHCI_FLA_POOL_BOUND) { 405 rval = ddi_dma_unbind_handle( 406 uhcip->uhci_flt_dma_handle); 407 ASSERT(rval == DDI_SUCCESS); 408 } 409 ddi_dma_mem_free(&uhcip->uhci_flt_mem_handle); 410 } 411 412 if (uhcip->uhci_flt_dma_handle) { 413 ddi_dma_free_handle(&uhcip->uhci_flt_dma_handle); 414 } 415 } 416 417 418 /* 419 * uhci_decode_ddi_dma_addr_bind_handle_result: 420 * Process the return values of ddi_dma_addr_bind_handle() 421 */ 422 void 423 uhci_decode_ddi_dma_addr_bind_handle_result(uhci_state_t *uhcip, int result) 424 { 425 char *msg; 426 427 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 428 "uhci_decode_ddi_dma_addr_bind_handle_result:"); 429 430 switch (result) { 431 case DDI_DMA_PARTIAL_MAP: 432 msg = "Partial transfers not allowed"; 433 break; 434 case DDI_DMA_INUSE: 435 msg = "Handle is in use"; 436 break; 437 case DDI_DMA_NORESOURCES: 438 msg = "No resources"; 439 break; 440 case DDI_DMA_NOMAPPING: 441 msg = "No mapping"; 442 break; 443 case DDI_DMA_TOOBIG: 444 msg = "Object is too big"; 445 break; 446 default: 447 msg = "Unknown dma error"; 448 } 449 450 USB_DPRINTF_L4(PRINT_MASK_ALL, uhcip->uhci_log_hdl, "%s", msg); 451 } 452 453 454 /* 455 * uhci_init_ctlr: 456 * Initialize the Host Controller (HC). 457 */ 458 int 459 uhci_init_ctlr(uhci_state_t *uhcip) 460 { 461 dev_info_t *dip = uhcip->uhci_dip; 462 uint_t cmd_reg; 463 uint_t frame_base_addr; 464 465 mutex_enter(&uhcip->uhci_int_mutex); 466 467 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, "uhci_init_ctlr:"); 468 469 /* 470 * When USB legacy mode is enabled, the BIOS manages the USB keyboard 471 * attached to the UHCI controller. It has been observed that some 472 * times the BIOS does not clear the interrupts in the legacy mode 473 * register in the PCI configuration space. So, disable the SMI intrs 474 * and route the intrs to PIRQD here. 475 */ 476 pci_config_put16(uhcip->uhci_config_handle, 477 LEGACYMODE_REG_OFFSET, LEGACYMODE_REG_INIT_VALUE); 478 479 /* 480 * Disable all the interrupts. 481 */ 482 Set_OpReg16(USBINTR, DISABLE_ALL_INTRS); 483 484 cmd_reg = Get_OpReg16(USBCMD); 485 cmd_reg &= (~USBCMD_REG_HC_RUN); 486 487 /* Stop the controller */ 488 Set_OpReg16(USBCMD, cmd_reg); 489 490 /* Reset the host controller */ 491 Set_OpReg16(USBCMD, USBCMD_REG_GBL_RESET); 492 493 /* Wait 10ms for reset to complete */ 494 mutex_exit(&uhcip->uhci_int_mutex); 495 delay(drv_usectohz(UHCI_RESET_DELAY)); 496 mutex_enter(&uhcip->uhci_int_mutex); 497 498 Set_OpReg16(USBCMD, 0); 499 500 /* Set the frame number to zero */ 501 Set_OpReg16(FRNUM, 0); 502 503 if (uhcip->uhci_hc_soft_state == UHCI_CTLR_INIT_STATE) { 504 /* Initialize the Frame list base address area */ 505 if (uhci_init_frame_lst_table(dip, uhcip) != USB_SUCCESS) { 506 mutex_exit(&uhcip->uhci_int_mutex); 507 508 return (USB_FAILURE); 509 } 510 } 511 512 /* Save the contents of the Frame Interval Registers */ 513 uhcip->uhci_frame_interval = Get_OpReg8(SOFMOD); 514 515 frame_base_addr = uhcip->uhci_flt_cookie.dmac_address; 516 517 /* Set the Frame list base address */ 518 Set_OpReg32(FRBASEADD, frame_base_addr); 519 520 /* 521 * Begin sending SOFs 522 * Set the Host Controller Functional State to Operational 523 */ 524 cmd_reg = Get_OpReg16(USBCMD); 525 cmd_reg |= (USBCMD_REG_HC_RUN | USBCMD_REG_MAXPKT_64 | 526 USBCMD_REG_CONFIG_FLAG); 527 528 Set_OpReg16(USBCMD, cmd_reg); 529 530 /* 531 * Verify the Command and interrupt enable registers, 532 * a sanity check whether actually initialized or not 533 */ 534 cmd_reg = Get_OpReg16(USBCMD); 535 536 if (!(cmd_reg & (USBCMD_REG_HC_RUN | USBCMD_REG_MAXPKT_64 | 537 USBCMD_REG_CONFIG_FLAG))) { 538 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 539 "uhci_init_ctlr: Controller initialization failed"); 540 mutex_exit(&uhcip->uhci_int_mutex); 541 542 return (USB_FAILURE); 543 } 544 545 /* 546 * Set the ioc bit of the isoc intr td. This enables 547 * the generation of an interrupt for every 1024 frames. 548 */ 549 SetTD_ioc(uhcip, uhcip->uhci_isoc_td, 1); 550 551 /* Set host controller soft state to operational */ 552 uhcip->uhci_hc_soft_state = UHCI_CTLR_OPERATIONAL_STATE; 553 mutex_exit(&uhcip->uhci_int_mutex); 554 555 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 556 "uhci_init_ctlr: Completed"); 557 558 return (USB_SUCCESS); 559 } 560 561 562 /* 563 * uhci_uninit_ctlr: 564 * uninitialize the Host Controller (HC). 565 */ 566 void 567 uhci_uninit_ctlr(uhci_state_t *uhcip) 568 { 569 if (uhcip->uhci_regs_handle) { 570 /* Disable all the interrupts. */ 571 Set_OpReg16(USBINTR, DISABLE_ALL_INTRS); 572 573 /* Complete the current transaction and then halt. */ 574 Set_OpReg16(USBCMD, 0); 575 576 /* Wait for sometime */ 577 mutex_exit(&uhcip->uhci_int_mutex); 578 delay(drv_usectohz(UHCI_TIMEWAIT)); 579 mutex_enter(&uhcip->uhci_int_mutex); 580 } 581 } 582 583 584 /* 585 * uhci_map_regs: 586 * The Host Controller (HC) contains a set of on-chip operational 587 * registers and which should be mapped into a non-cacheable 588 * portion of the system addressable space. 589 */ 590 int 591 uhci_map_regs(uhci_state_t *uhcip) 592 { 593 dev_info_t *dip = uhcip->uhci_dip; 594 int index; 595 uint32_t regs_prop_len; 596 int32_t *regs_list; 597 uint16_t command_reg; 598 ddi_device_acc_attr_t attr; 599 600 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, "uhci_map_regs:"); 601 602 /* The host controller will be little endian */ 603 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 604 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 605 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 606 607 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, uhcip->uhci_dip, 608 DDI_PROP_DONTPASS, "reg", ®s_list, ®s_prop_len) != 609 DDI_PROP_SUCCESS) { 610 611 return (USB_FAILURE); 612 } 613 614 for (index = 0; index * 5 < regs_prop_len; index++) { 615 if (regs_list[index * 5] & UHCI_PROP_MASK) { 616 break; 617 } 618 } 619 620 /* 621 * Deallocate the memory allocated by the ddi_prop_lookup_int_array 622 */ 623 ddi_prop_free(regs_list); 624 625 if (index * 5 >= regs_prop_len) { 626 627 return (USB_FAILURE); 628 } 629 630 /* Map in operational registers */ 631 if (ddi_regs_map_setup(dip, index, (caddr_t *)&uhcip->uhci_regsp, 632 0, sizeof (hc_regs_t), &attr, &uhcip->uhci_regs_handle) != 633 DDI_SUCCESS) { 634 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 635 "ddi_regs_map_setup: failed"); 636 637 return (USB_FAILURE); 638 } 639 640 if (pci_config_setup(dip, &uhcip->uhci_config_handle) != DDI_SUCCESS) { 641 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 642 "uhci_map_regs: Config error"); 643 644 return (USB_FAILURE); 645 } 646 647 /* Make sure Memory Access Enable and Master Enable are set */ 648 command_reg = pci_config_get16(uhcip->uhci_config_handle, 649 PCI_CONF_COMM); 650 if (!(command_reg & (PCI_COMM_MAE | PCI_COMM_ME))) { 651 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 652 "uhci_map_regs: No MAE/ME"); 653 } 654 655 command_reg |= PCI_COMM_MAE | PCI_COMM_ME; 656 pci_config_put16(uhcip->uhci_config_handle, PCI_CONF_COMM, command_reg); 657 658 /* 659 * Check whether I/O base address is configured and enabled. 660 */ 661 if (!(command_reg & PCI_COMM_IO)) { 662 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 663 "I/O Base address access disabled"); 664 665 return (USB_FAILURE); 666 } 667 /* 668 * Get the IO base address of the controller 669 */ 670 uhcip->uhci_iobase = (pci_config_get16(uhcip->uhci_config_handle, 671 PCI_CONF_IOBASE) & PCI_CONF_IOBASE_MASK); 672 673 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 674 "uhci_map_regs: Completed"); 675 676 return (USB_SUCCESS); 677 } 678 679 680 void 681 uhci_unmap_regs(uhci_state_t *uhcip) 682 { 683 /* Unmap the UHCI registers */ 684 if (uhcip->uhci_regs_handle) { 685 /* Reset the host controller */ 686 Set_OpReg16(USBCMD, USBCMD_REG_GBL_RESET); 687 688 ddi_regs_map_free(&uhcip->uhci_regs_handle); 689 } 690 691 if (uhcip->uhci_config_handle) { 692 pci_config_teardown(&uhcip->uhci_config_handle); 693 } 694 } 695 696 697 /* 698 * uhci_set_dma_attributes: 699 * Set the limits in the DMA attributes structure. Most of the values used 700 * in the DMA limit structres are the default values as specified by the 701 * Writing PCI device drivers document. 702 */ 703 void 704 uhci_set_dma_attributes(uhci_state_t *uhcip) 705 { 706 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 707 "uhci_set_dma_attributes:"); 708 709 /* Initialize the DMA attributes */ 710 uhcip->uhci_dma_attr.dma_attr_version = DMA_ATTR_V0; 711 uhcip->uhci_dma_attr.dma_attr_addr_lo = 0x00000000ull; 712 uhcip->uhci_dma_attr.dma_attr_addr_hi = 0xfffffff0ull; 713 714 /* 32 bit addressing */ 715 uhcip->uhci_dma_attr.dma_attr_count_max = 0xffffffull; 716 717 /* 718 * Setting the dam_att_align to 512, some times fails the 719 * binding handle. I dont know why ? But setting to 16 will 720 * be right for our case (16 byte alignment required per 721 * UHCI spec for TD descriptors). 722 */ 723 724 /* 16 byte alignment */ 725 uhcip->uhci_dma_attr.dma_attr_align = 0x10; 726 727 /* 728 * Since PCI specification is byte alignment, the 729 * burstsize field should be set to 1 for PCI devices. 730 */ 731 uhcip->uhci_dma_attr.dma_attr_burstsizes = 0x1; 732 733 uhcip->uhci_dma_attr.dma_attr_minxfer = 0x1; 734 uhcip->uhci_dma_attr.dma_attr_maxxfer = 0xffffffull; 735 uhcip->uhci_dma_attr.dma_attr_seg = 0xffffffffull; 736 uhcip->uhci_dma_attr.dma_attr_sgllen = 1; 737 uhcip->uhci_dma_attr.dma_attr_granular = 1; 738 uhcip->uhci_dma_attr.dma_attr_flags = 0; 739 } 740 741 742 uint_t 743 pow_2(uint_t x) 744 { 745 return ((x == 0) ? 1 : (1 << x)); 746 } 747 748 749 uint_t 750 log_2(uint_t x) 751 { 752 int ret_val = 0; 753 754 while (x != 1) { 755 ret_val++; 756 x = x >> 1; 757 } 758 759 return (ret_val); 760 } 761 762 763 /* 764 * uhci_obtain_state: 765 */ 766 uhci_state_t * 767 uhci_obtain_state(dev_info_t *dip) 768 { 769 int instance = ddi_get_instance(dip); 770 uhci_state_t *state = ddi_get_soft_state(uhci_statep, instance); 771 772 ASSERT(state != NULL); 773 774 return (state); 775 } 776 777 778 /* 779 * uhci_alloc_hcdi_ops: 780 * The HCDI interfaces or entry points are the software interfaces used by 781 * the Universal Serial Bus Driver (USBA) to access the services of the 782 * Host Controller Driver (HCD). During HCD initialization, inform USBA 783 * about all available HCDI interfaces or entry points. 784 */ 785 usba_hcdi_ops_t * 786 uhci_alloc_hcdi_ops(uhci_state_t *uhcip) 787 { 788 usba_hcdi_ops_t *hcdi_ops; 789 790 USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 791 "uhci_alloc_hcdi_ops:"); 792 793 hcdi_ops = usba_alloc_hcdi_ops(); 794 795 hcdi_ops->usba_hcdi_ops_version = HCDI_OPS_VERSION_1; 796 797 hcdi_ops->usba_hcdi_pipe_open = uhci_hcdi_pipe_open; 798 hcdi_ops->usba_hcdi_pipe_close = uhci_hcdi_pipe_close; 799 hcdi_ops->usba_hcdi_pipe_reset = uhci_hcdi_pipe_reset; 800 801 hcdi_ops->usba_hcdi_pipe_ctrl_xfer = uhci_hcdi_pipe_ctrl_xfer; 802 hcdi_ops->usba_hcdi_pipe_bulk_xfer = uhci_hcdi_pipe_bulk_xfer; 803 hcdi_ops->usba_hcdi_pipe_intr_xfer = uhci_hcdi_pipe_intr_xfer; 804 hcdi_ops->usba_hcdi_pipe_isoc_xfer = uhci_hcdi_pipe_isoc_xfer; 805 806 hcdi_ops->usba_hcdi_bulk_transfer_size = uhci_hcdi_bulk_transfer_size; 807 hcdi_ops->usba_hcdi_pipe_stop_intr_polling = 808 uhci_hcdi_pipe_stop_intr_polling; 809 hcdi_ops->usba_hcdi_pipe_stop_isoc_polling = 810 uhci_hcdi_pipe_stop_isoc_polling; 811 812 hcdi_ops->usba_hcdi_get_current_frame_number = 813 uhci_hcdi_get_current_frame_number; 814 hcdi_ops->usba_hcdi_get_max_isoc_pkts = uhci_hcdi_get_max_isoc_pkts; 815 816 hcdi_ops->usba_hcdi_console_input_init = uhci_hcdi_polled_input_init; 817 hcdi_ops->usba_hcdi_console_input_enter = uhci_hcdi_polled_input_enter; 818 hcdi_ops->usba_hcdi_console_read = uhci_hcdi_polled_read; 819 hcdi_ops->usba_hcdi_console_input_exit = uhci_hcdi_polled_input_exit; 820 hcdi_ops->usba_hcdi_console_input_fini = uhci_hcdi_polled_input_fini; 821 822 hcdi_ops->usba_hcdi_console_output_init = uhci_hcdi_polled_output_init; 823 hcdi_ops->usba_hcdi_console_output_enter = 824 uhci_hcdi_polled_output_enter; 825 hcdi_ops->usba_hcdi_console_write = uhci_hcdi_polled_write; 826 hcdi_ops->usba_hcdi_console_output_exit = uhci_hcdi_polled_output_exit; 827 hcdi_ops->usba_hcdi_console_output_fini = uhci_hcdi_polled_output_fini; 828 829 return (hcdi_ops); 830 } 831 832 833 /* 834 * uhci_init_frame_lst_table : 835 * Allocate the system memory and initialize Host Controller 836 * Frame list table area The starting of the Frame list Table 837 * area must be 4096 byte aligned. 838 */ 839 static int 840 uhci_init_frame_lst_table(dev_info_t *dip, uhci_state_t *uhcip) 841 { 842 int result; 843 uint_t ccount; 844 size_t real_length; 845 ddi_device_acc_attr_t dev_attr; 846 847 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 848 849 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 850 "uhci_init_frame_lst_table:"); 851 852 /* The host controller will be little endian */ 853 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 854 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 855 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 856 857 /* 4K alignment required */ 858 uhcip->uhci_dma_attr.dma_attr_align = 0x1000; 859 860 /* Create space for the HCCA block */ 861 if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 862 0, &uhcip->uhci_flt_dma_handle) != DDI_SUCCESS) { 863 864 return (USB_FAILURE); 865 } 866 867 /* Reset to default 16 bytes */ 868 uhcip->uhci_dma_attr.dma_attr_align = 0x10; 869 870 if (ddi_dma_mem_alloc(uhcip->uhci_flt_dma_handle, 871 SIZE_OF_FRAME_LST_TABLE, &dev_attr, DDI_DMA_CONSISTENT, 872 DDI_DMA_SLEEP, 0, (caddr_t *)&uhcip->uhci_frame_lst_tablep, 873 &real_length, &uhcip->uhci_flt_mem_handle)) { 874 875 return (USB_FAILURE); 876 } 877 878 /* Map the whole Frame list base area into the I/O address space */ 879 result = ddi_dma_addr_bind_handle(uhcip->uhci_flt_dma_handle, 880 NULL, (caddr_t)uhcip->uhci_frame_lst_tablep, real_length, 881 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 882 &uhcip->uhci_flt_cookie, &ccount); 883 884 if (result == DDI_DMA_MAPPED) { 885 /* The cookie count should be 1 */ 886 if (ccount != 1) { 887 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 888 "uhci_init_frame_list_table: More than 1 cookie"); 889 890 return (USB_FAILURE); 891 } 892 } else { 893 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result); 894 895 return (USB_FAILURE); 896 } 897 898 uhcip->uhci_dma_addr_bind_flag |= UHCI_FLA_POOL_BOUND; 899 900 bzero((void *)uhcip->uhci_frame_lst_tablep, real_length); 901 902 /* Initialize the interrupt lists */ 903 uhci_build_interrupt_lattice(uhcip); 904 905 return (USB_SUCCESS); 906 } 907 908 909 /* 910 * uhci_alloc_queue_head: 911 * Allocate a queue head 912 */ 913 queue_head_t * 914 uhci_alloc_queue_head(uhci_state_t *uhcip) 915 { 916 int index; 917 uhci_td_t *dummy_td; 918 queue_head_t *queue_head; 919 920 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 921 "uhci_alloc_queue_head"); 922 923 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 924 925 /* Allocate a dummy td first. */ 926 if ((dummy_td = uhci_allocate_td_from_pool(uhcip)) == NULL) { 927 928 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 929 "uhci_alloc_queue_head: allocate td from pool failed"); 930 931 return (NULL); 932 } 933 934 /* 935 * The first 63 queue heads in the Queue Head (QH) 936 * buffer pool are reserved for building interrupt lattice 937 * tree. Search for a blank Queue head in the QH buffer pool. 938 */ 939 for (index = NUM_STATIC_NODES; index < uhci_qh_pool_size; index++) { 940 if (uhcip->uhci_qh_pool_addr[index].qh_flag == 941 QUEUE_HEAD_FLAG_FREE) { 942 break; 943 } 944 } 945 946 USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 947 "uhci_alloc_queue_head: Allocated %d", index); 948 949 if (index == uhci_qh_pool_size) { 950 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 951 "uhci_alloc_queue_head: All QH exhausted"); 952 953 /* Free the dummy td allocated for this qh. */ 954 dummy_td->flag = TD_FLAG_FREE; 955 956 return (NULL); 957 } 958 959 queue_head = &uhcip->uhci_qh_pool_addr[index]; 960 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 961 "uhci_alloc_queue_head: Allocated address 0x%p", 962 (void *)queue_head); 963 964 bzero((void *)queue_head, sizeof (queue_head_t)); 965 SetQH32(uhcip, queue_head->link_ptr, HC_END_OF_LIST); 966 SetQH32(uhcip, queue_head->element_ptr, HC_END_OF_LIST); 967 queue_head->prev_qh = NULL; 968 queue_head->qh_flag = QUEUE_HEAD_FLAG_BUSY; 969 970 bzero((char *)dummy_td, sizeof (uhci_td_t)); 971 queue_head->td_tailp = dummy_td; 972 SetQH32(uhcip, queue_head->element_ptr, TD_PADDR(dummy_td)); 973 974 return (queue_head); 975 } 976 977 978 /* 979 * uhci_allocate_bandwidth: 980 * Figure out whether or not this interval may be supported. Return 981 * the index into the lattice if it can be supported. Return 982 * allocation failure if it can not be supported. 983 */ 984 int 985 uhci_allocate_bandwidth( 986 uhci_state_t *uhcip, 987 usba_pipe_handle_data_t *pipe_handle, 988 uint_t *node) 989 { 990 int bandwidth; /* Requested bandwidth */ 991 uint_t min, min_index; 992 uint_t i; 993 uint_t height; /* Bandwidth's height in the tree */ 994 uint_t leftmost; 995 uint_t length; 996 uint32_t paddr; 997 queue_head_t *tmp_qh; 998 usb_ep_descr_t *endpoint = &pipe_handle->p_ep; 999 1000 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1001 1002 /* 1003 * Calculate the length in bytes of a transaction on this 1004 * periodic endpoint. 1005 */ 1006 mutex_enter(&pipe_handle->p_usba_device->usb_mutex); 1007 1008 length = uhci_compute_total_bandwidth(endpoint, 1009 pipe_handle->p_usba_device->usb_port_status); 1010 mutex_exit(&pipe_handle->p_usba_device->usb_mutex); 1011 1012 /* 1013 * If the length in bytes plus the allocated bandwidth exceeds 1014 * the maximum, return bandwidth allocation failure. 1015 */ 1016 if ((length + uhcip->uhci_bandwidth_intr_min + 1017 uhcip->uhci_bandwidth_isoch_sum) > (MAX_PERIODIC_BANDWIDTH)) { 1018 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1019 "uhci_allocate_bandwidth: " 1020 "Reached maximum bandwidth value and cannot allocate " 1021 "bandwidth for a given Interrupt/Isoch endpoint"); 1022 1023 return (USB_NO_BANDWIDTH); 1024 } 1025 1026 /* 1027 * ISOC xfers are not supported at this point type 1028 */ 1029 if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_ISOCH) { 1030 uhcip->uhci_bandwidth_isoch_sum += length; 1031 1032 return (USB_SUCCESS); 1033 } 1034 1035 /* 1036 * This is an interrupt endpoint. 1037 * Adjust bandwidth to be a power of 2 1038 */ 1039 mutex_enter(&pipe_handle->p_usba_device->usb_mutex); 1040 bandwidth = uhci_bandwidth_adjust(uhcip, endpoint, 1041 pipe_handle->p_usba_device->usb_port_status); 1042 mutex_exit(&pipe_handle->p_usba_device->usb_mutex); 1043 1044 /* 1045 * If this bandwidth can't be supported, 1046 * return allocation failure. 1047 */ 1048 if (bandwidth == USB_FAILURE) { 1049 1050 return (USB_FAILURE); 1051 } 1052 1053 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1054 "The new bandwidth is %d", bandwidth); 1055 1056 /* Find the leaf with the smallest allocated bandwidth */ 1057 min_index = 0; 1058 min = uhcip->uhci_bandwidth[0]; 1059 1060 for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) { 1061 if (uhcip->uhci_bandwidth[i] < min) { 1062 min_index = i; 1063 min = uhcip->uhci_bandwidth[i]; 1064 } 1065 } 1066 1067 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1068 "The leaf with minimal bandwidth %d, " 1069 "The smallest bandwidth %d", min_index, min); 1070 1071 /* 1072 * Find the index into the lattice given the 1073 * leaf with the smallest allocated bandwidth. 1074 */ 1075 height = uhci_lattice_height(bandwidth); 1076 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1077 "The height is %d", height); 1078 1079 *node = uhci_tree_bottom_nodes[min_index]; 1080 1081 /* check if there are isocs TDs scheduled for this frame */ 1082 if (uhcip->uhci_isoc_q_tailp[*node]) { 1083 paddr = (uhcip->uhci_isoc_q_tailp[*node]->link_ptr & 1084 FRAME_LST_PTR_MASK); 1085 } else { 1086 paddr = (uhcip->uhci_frame_lst_tablep[*node] & 1087 FRAME_LST_PTR_MASK); 1088 } 1089 1090 tmp_qh = QH_VADDR(paddr); 1091 *node = tmp_qh->node; 1092 for (i = 0; i < height; i++) { 1093 *node = uhci_lattice_parent(*node); 1094 } 1095 1096 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1097 "The real node is %d", *node); 1098 1099 /* 1100 * Find the leftmost leaf in the subtree specified by the node. 1101 */ 1102 leftmost = uhci_leftmost_leaf(*node, height); 1103 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1104 "Leftmost %d", leftmost); 1105 1106 for (i = leftmost; i < leftmost + 1107 (NUM_FRAME_LST_ENTRIES/bandwidth); i ++) { 1108 1109 if ((length + uhcip->uhci_bandwidth_isoch_sum + 1110 uhcip->uhci_bandwidth[i]) > MAX_PERIODIC_BANDWIDTH) { 1111 1112 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1113 "uhci_allocate_bandwidth: " 1114 "Reached maximum bandwidth value and cannot " 1115 "allocate bandwidth for Interrupt endpoint"); 1116 1117 return (USB_NO_BANDWIDTH); 1118 } 1119 } 1120 1121 /* 1122 * All the leaves for this node must be updated with the bandwidth. 1123 */ 1124 for (i = leftmost; i < leftmost + 1125 (NUM_FRAME_LST_ENTRIES/bandwidth); i ++) { 1126 uhcip->uhci_bandwidth[i] += length; 1127 } 1128 1129 /* Find the leaf with the smallest allocated bandwidth */ 1130 min_index = 0; 1131 min = uhcip->uhci_bandwidth[0]; 1132 1133 for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) { 1134 if (uhcip->uhci_bandwidth[i] < min) { 1135 min_index = i; 1136 min = uhcip->uhci_bandwidth[i]; 1137 } 1138 } 1139 1140 /* Save the minimum for later use */ 1141 uhcip->uhci_bandwidth_intr_min = min; 1142 1143 return (USB_SUCCESS); 1144 } 1145 1146 1147 /* 1148 * uhci_deallocate_bandwidth: 1149 * Deallocate bandwidth for the given node in the lattice 1150 * and the length of transfer. 1151 */ 1152 void 1153 uhci_deallocate_bandwidth(uhci_state_t *uhcip, 1154 usba_pipe_handle_data_t *pipe_handle) 1155 { 1156 uint_t bandwidth; 1157 uint_t height; 1158 uint_t leftmost; 1159 uint_t i; 1160 uint_t min; 1161 usb_ep_descr_t *endpoint = &pipe_handle->p_ep; 1162 uint_t node, length; 1163 uhci_pipe_private_t *pp = 1164 (uhci_pipe_private_t *)pipe_handle->p_hcd_private; 1165 1166 /* This routine is protected by the uhci_int_mutex */ 1167 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1168 1169 /* Obtain the length */ 1170 mutex_enter(&pipe_handle->p_usba_device->usb_mutex); 1171 length = uhci_compute_total_bandwidth(endpoint, 1172 pipe_handle->p_usba_device->usb_port_status); 1173 mutex_exit(&pipe_handle->p_usba_device->usb_mutex); 1174 1175 /* 1176 * If this is an isochronous endpoint, just delete endpoint's 1177 * bandwidth from the total allocated isochronous bandwidth. 1178 */ 1179 if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_ISOCH) { 1180 uhcip->uhci_bandwidth_isoch_sum -= length; 1181 1182 return; 1183 } 1184 1185 /* Obtain the node */ 1186 node = pp->pp_node; 1187 1188 /* Adjust bandwidth to be a power of 2 */ 1189 mutex_enter(&pipe_handle->p_usba_device->usb_mutex); 1190 bandwidth = uhci_bandwidth_adjust(uhcip, endpoint, 1191 pipe_handle->p_usba_device->usb_port_status); 1192 mutex_exit(&pipe_handle->p_usba_device->usb_mutex); 1193 1194 /* Find the height in the tree */ 1195 height = uhci_lattice_height(bandwidth); 1196 1197 /* 1198 * Find the leftmost leaf in the subtree specified by the node 1199 */ 1200 leftmost = uhci_leftmost_leaf(node, height); 1201 1202 /* Delete the bandwith from the appropriate lists */ 1203 for (i = leftmost; i < leftmost + (NUM_FRAME_LST_ENTRIES/bandwidth); 1204 i ++) { 1205 uhcip->uhci_bandwidth[i] -= length; 1206 } 1207 1208 min = uhcip->uhci_bandwidth[0]; 1209 1210 /* Recompute the minimum */ 1211 for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) { 1212 if (uhcip->uhci_bandwidth[i] < min) { 1213 min = uhcip->uhci_bandwidth[i]; 1214 } 1215 } 1216 1217 /* Save the minimum for later use */ 1218 uhcip->uhci_bandwidth_intr_min = min; 1219 } 1220 1221 1222 /* 1223 * uhci_compute_total_bandwidth: 1224 * 1225 * Given a periodic endpoint (interrupt or isochronous) determine the total 1226 * bandwidth for one transaction. The UHCI host controller traverses the 1227 * endpoint descriptor lists on a first-come-first-serve basis. When the HC 1228 * services an endpoint, only a single transaction attempt is made. The HC 1229 * moves to the next Endpoint Descriptor after the first transaction attempt 1230 * rather than finishing the entire Transfer Descriptor. Therefore, when a 1231 * Transfer Descriptor is inserted into the lattice, we will only count the 1232 * number of bytes for one transaction. 1233 * 1234 * The following are the formulas used for calculating bandwidth in terms 1235 * bytes and it is for the single USB full speed and low speed transaction 1236 * respectively. The protocol overheads will be different for each of type 1237 * of USB transfer and all these formulas & protocol overheads are derived 1238 * from the 5.9.3 section of USB Specification & with the help of Bandwidth 1239 * Analysis white paper which is posted on the USB developer forum. 1240 * 1241 * Full-Speed: 1242 * Protocol overhead + ((MaxPacketSize * 7)/6 ) + Host_Delay 1243 * 1244 * Low-Speed: 1245 * Protocol overhead + Hub LS overhead + 1246 * (Low-Speed clock * ((MaxPacketSize * 7)/6 )) + Host_Delay 1247 */ 1248 static uint_t 1249 uhci_compute_total_bandwidth(usb_ep_descr_t *endpoint, 1250 usb_port_status_t port_status) 1251 { 1252 uint_t bandwidth; 1253 ushort_t MaxPacketSize = endpoint->wMaxPacketSize; 1254 1255 /* Add Host Controller specific delay to required bandwidth */ 1256 bandwidth = HOST_CONTROLLER_DELAY; 1257 1258 /* Add bit-stuffing overhead */ 1259 MaxPacketSize = (ushort_t)((MaxPacketSize * 7) / 6); 1260 1261 /* Low Speed interrupt transaction */ 1262 if (port_status == USBA_LOW_SPEED_DEV) { 1263 /* Low Speed interrupt transaction */ 1264 bandwidth += (LOW_SPEED_PROTO_OVERHEAD + 1265 HUB_LOW_SPEED_PROTO_OVERHEAD + 1266 (LOW_SPEED_CLOCK * MaxPacketSize)); 1267 } else { 1268 /* Full Speed transaction */ 1269 bandwidth += MaxPacketSize; 1270 1271 if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_INTR) { 1272 /* Full Speed interrupt transaction */ 1273 bandwidth += FS_NON_ISOC_PROTO_OVERHEAD; 1274 } else { 1275 /* Isochronus and input transaction */ 1276 if (UHCI_XFER_DIR(endpoint) == USB_EP_DIR_IN) { 1277 bandwidth += FS_ISOC_INPUT_PROTO_OVERHEAD; 1278 } else { 1279 /* Isochronus and output transaction */ 1280 bandwidth += FS_ISOC_OUTPUT_PROTO_OVERHEAD; 1281 } 1282 } 1283 } 1284 1285 return (bandwidth); 1286 } 1287 1288 1289 /* 1290 * uhci_bandwidth_adjust: 1291 */ 1292 static int 1293 uhci_bandwidth_adjust( 1294 uhci_state_t *uhcip, 1295 usb_ep_descr_t *endpoint, 1296 usb_port_status_t port_status) 1297 { 1298 int i = 0; 1299 uint_t interval; 1300 1301 /* 1302 * Get the polling interval from the endpoint descriptor 1303 */ 1304 interval = endpoint->bInterval; 1305 1306 /* 1307 * The bInterval value in the endpoint descriptor can range 1308 * from 1 to 255ms. The interrupt lattice has 32 leaf nodes, 1309 * and the host controller cycles through these nodes every 1310 * 32ms. The longest polling interval that the controller 1311 * supports is 32ms. 1312 */ 1313 1314 /* 1315 * Return an error if the polling interval is less than 1ms 1316 * and greater than 255ms 1317 */ 1318 if ((interval < MIN_POLL_INTERVAL) || (interval > MAX_POLL_INTERVAL)) { 1319 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1320 "uhci_bandwidth_adjust: Endpoint's poll interval must be " 1321 "between %d and %d ms", MIN_POLL_INTERVAL, 1322 MAX_POLL_INTERVAL); 1323 1324 return (USB_FAILURE); 1325 } 1326 1327 /* 1328 * According USB Specifications, a full-speed endpoint can 1329 * specify a desired polling interval 1ms to 255ms and a low 1330 * speed endpoints are limited to specifying only 10ms to 1331 * 255ms. But some old keyboards & mice uses polling interval 1332 * of 8ms. For compatibility purpose, we are using polling 1333 * interval between 8ms & 255ms for low speed endpoints. 1334 */ 1335 if ((port_status == USBA_LOW_SPEED_DEV) && 1336 (interval < MIN_LOW_SPEED_POLL_INTERVAL)) { 1337 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1338 "uhci_bandwidth_adjust: Low speed endpoint's poll interval " 1339 "must be >= %d ms, adjusted", 1340 MIN_LOW_SPEED_POLL_INTERVAL); 1341 1342 interval = MIN_LOW_SPEED_POLL_INTERVAL; 1343 } 1344 1345 /* 1346 * If polling interval is greater than 32ms, 1347 * adjust polling interval equal to 32ms. 1348 */ 1349 if (interval > 32) { 1350 interval = 32; 1351 } 1352 1353 /* 1354 * Find the nearest power of 2 that's less 1355 * than interval. 1356 */ 1357 while ((pow_2(i)) <= interval) { 1358 i++; 1359 } 1360 1361 return (pow_2((i - 1))); 1362 } 1363 1364 1365 /* 1366 * uhci_lattice_height: 1367 * Given the requested bandwidth, find the height in the tree at 1368 * which the nodes for this bandwidth fall. The height is measured 1369 * as the number of nodes from the leaf to the level specified by 1370 * bandwidth The root of the tree is at height TREE_HEIGHT. 1371 */ 1372 static uint_t 1373 uhci_lattice_height(uint_t bandwidth) 1374 { 1375 return (TREE_HEIGHT - (log_2(bandwidth))); 1376 } 1377 1378 1379 static uint_t 1380 uhci_lattice_parent(uint_t node) 1381 { 1382 return (((node % 2) == 0) ? ((node/2) - 1) : (node/2)); 1383 } 1384 1385 1386 /* 1387 * uhci_leftmost_leaf: 1388 * Find the leftmost leaf in the subtree specified by the node. 1389 * Height refers to number of nodes from the bottom of the tree 1390 * to the node, including the node. 1391 */ 1392 static uint_t 1393 uhci_leftmost_leaf(uint_t node, uint_t height) 1394 { 1395 node = pow_2(height + VIRTUAL_TREE_HEIGHT) * (node+1) - 1396 NUM_FRAME_LST_ENTRIES; 1397 return (node); 1398 } 1399 1400 1401 /* 1402 * uhci_insert_qh: 1403 * Add the Queue Head (QH) into the Host Controller's (HC) 1404 * appropriate queue head list. 1405 */ 1406 void 1407 uhci_insert_qh(uhci_state_t *uhcip, usba_pipe_handle_data_t *ph) 1408 { 1409 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 1410 1411 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1412 "uhci_insert_qh:"); 1413 1414 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1415 1416 switch (UHCI_XFER_TYPE(&ph->p_ep)) { 1417 case USB_EP_ATTR_CONTROL: 1418 uhci_insert_ctrl_qh(uhcip, pp); 1419 break; 1420 case USB_EP_ATTR_BULK: 1421 uhci_insert_bulk_qh(uhcip, pp); 1422 break; 1423 case USB_EP_ATTR_INTR: 1424 uhci_insert_intr_qh(uhcip, pp); 1425 break; 1426 case USB_EP_ATTR_ISOCH: 1427 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 1428 "uhci_insert_qh: Illegal request"); 1429 break; 1430 } 1431 } 1432 1433 1434 /* 1435 * uhci_insert_ctrl_qh: 1436 * Insert a control QH into the Host Controller's (HC) control QH list. 1437 */ 1438 static void 1439 uhci_insert_ctrl_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 1440 { 1441 queue_head_t *qh = pp->pp_qh; 1442 1443 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1444 "uhci_insert_ctrl_qh:"); 1445 1446 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1447 1448 if (uhcip->uhci_ctrl_xfers_q_head == uhcip->uhci_ctrl_xfers_q_tail) { 1449 uhcip->uhci_ctrl_xfers_q_head->prev_qh = UHCI_INVALID_PTR; 1450 } 1451 1452 SetQH32(uhcip, qh->link_ptr, 1453 GetQH32(uhcip, uhcip->uhci_ctrl_xfers_q_tail->link_ptr)); 1454 qh->prev_qh = uhcip->uhci_ctrl_xfers_q_tail; 1455 SetQH32(uhcip, uhcip->uhci_ctrl_xfers_q_tail->link_ptr, 1456 QH_PADDR(qh) | HC_QUEUE_HEAD); 1457 uhcip->uhci_ctrl_xfers_q_tail = qh; 1458 1459 } 1460 1461 1462 /* 1463 * uhci_insert_bulk_qh: 1464 * Insert a bulk QH into the Host Controller's (HC) bulk QH list. 1465 */ 1466 static void 1467 uhci_insert_bulk_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 1468 { 1469 queue_head_t *qh = pp->pp_qh; 1470 1471 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1472 "uhci_insert_bulk_qh:"); 1473 1474 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1475 1476 if (uhcip->uhci_bulk_xfers_q_head == uhcip->uhci_bulk_xfers_q_tail) { 1477 uhcip->uhci_bulk_xfers_q_head->prev_qh = UHCI_INVALID_PTR; 1478 } else if (uhcip->uhci_bulk_xfers_q_head->link_ptr == 1479 uhcip->uhci_bulk_xfers_q_tail->link_ptr) { 1480 1481 /* If there is already a loop, we should keep the loop. */ 1482 qh->link_ptr = uhcip->uhci_bulk_xfers_q_tail->link_ptr; 1483 } 1484 1485 qh->prev_qh = uhcip->uhci_bulk_xfers_q_tail; 1486 SetQH32(uhcip, uhcip->uhci_bulk_xfers_q_tail->link_ptr, 1487 QH_PADDR(qh) | HC_QUEUE_HEAD); 1488 uhcip->uhci_bulk_xfers_q_tail = qh; 1489 } 1490 1491 1492 /* 1493 * uhci_insert_intr_qh: 1494 * Insert a periodic Queue head i.e Interrupt queue head into the 1495 * Host Controller's (HC) interrupt lattice tree. 1496 */ 1497 static void 1498 uhci_insert_intr_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 1499 { 1500 uint_t node = pp->pp_node; /* The appropriate node was */ 1501 /* found during the opening */ 1502 /* of the pipe. */ 1503 queue_head_t *qh = pp->pp_qh; 1504 queue_head_t *next_lattice_qh, *lattice_qh; 1505 1506 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1507 "uhci_insert_intr_qh:"); 1508 1509 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1510 1511 /* Find the lattice queue head */ 1512 lattice_qh = &uhcip->uhci_qh_pool_addr[node]; 1513 next_lattice_qh = 1514 QH_VADDR(GetQH32(uhcip, lattice_qh->link_ptr) & QH_LINK_PTR_MASK); 1515 1516 next_lattice_qh->prev_qh = qh; 1517 qh->link_ptr = lattice_qh->link_ptr; 1518 qh->prev_qh = lattice_qh; 1519 SetQH32(uhcip, lattice_qh->link_ptr, QH_PADDR(qh) | HC_QUEUE_HEAD); 1520 pp->pp_data_toggle = 0; 1521 } 1522 1523 1524 /* 1525 * uhci_insert_intr_td: 1526 * Create a TD and a data buffer for an interrupt endpoint. 1527 */ 1528 int 1529 uhci_insert_intr_td( 1530 uhci_state_t *uhcip, 1531 usba_pipe_handle_data_t *ph, 1532 usb_intr_req_t *req, 1533 usb_flags_t flags) 1534 { 1535 int error, pipe_dir; 1536 uint_t length, mps; 1537 uint32_t buf_offs; 1538 uhci_td_t *tmp_td; 1539 usb_intr_req_t *intr_reqp; 1540 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 1541 uhci_trans_wrapper_t *tw; 1542 1543 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1544 "uhci_insert_intr_td: req: 0x%p", (void *)req); 1545 1546 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1547 1548 /* Get the interrupt pipe direction */ 1549 pipe_dir = UHCI_XFER_DIR(&ph->p_ep); 1550 1551 /* Get the current interrupt request pointer */ 1552 if (req) { 1553 length = req->intr_len; 1554 } else { 1555 ASSERT(pipe_dir == USB_EP_DIR_IN); 1556 length = (pp->pp_client_periodic_in_reqp) ? 1557 (((usb_intr_req_t *)pp-> 1558 pp_client_periodic_in_reqp)->intr_len) : 1559 ph->p_ep.wMaxPacketSize; 1560 } 1561 1562 /* Check the size of interrupt request */ 1563 if (length > UHCI_MAX_TD_XFER_SIZE) { 1564 1565 /* the length shouldn't exceed 8K */ 1566 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1567 "uhci_insert_intr_td: Intr request size 0x%x is " 1568 "more than 0x%x", length, UHCI_MAX_TD_XFER_SIZE); 1569 1570 return (USB_INVALID_REQUEST); 1571 } 1572 1573 USB_DPRINTF_L3(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1574 "uhci_insert_intr_td: length: 0x%x", length); 1575 1576 /* Allocate a transaction wrapper */ 1577 if ((tw = uhci_create_transfer_wrapper(uhcip, pp, length, flags)) == 1578 NULL) { 1579 1580 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1581 "uhci_insert_intr_td: TW allocation failed"); 1582 1583 return (USB_NO_RESOURCES); 1584 } 1585 1586 /* 1587 * Initialize the callback and any callback 1588 * data for when the td completes. 1589 */ 1590 tw->tw_handle_td = uhci_handle_intr_td; 1591 tw->tw_handle_callback_value = NULL; 1592 tw->tw_direction = (pipe_dir == USB_EP_DIR_OUT) ? 1593 PID_OUT : PID_IN; 1594 tw->tw_curr_xfer_reqp = (usb_opaque_t)req; 1595 1596 /* 1597 * If it is an Interrupt IN request and interrupt request is NULL, 1598 * allocate the usb interrupt request structure for the current 1599 * interrupt polling request. 1600 */ 1601 if (tw->tw_direction == PID_IN) { 1602 if ((error = uhci_allocate_periodic_in_resource(uhcip, 1603 pp, tw, flags)) != USB_SUCCESS) { 1604 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1605 "uhci_insert_intr_td: Interrupt request structure " 1606 "allocation failed"); 1607 1608 /* free the transfer wrapper */ 1609 uhci_deallocate_tw(uhcip, pp, tw); 1610 1611 return (error); 1612 } 1613 } 1614 1615 intr_reqp = (usb_intr_req_t *)tw->tw_curr_xfer_reqp; 1616 ASSERT(tw->tw_curr_xfer_reqp != NULL); 1617 1618 tw->tw_timeout_cnt = (intr_reqp->intr_attributes & USB_ATTRS_ONE_XFER) ? 1619 intr_reqp->intr_timeout : 0; 1620 1621 /* DATA IN */ 1622 if (tw->tw_direction == PID_IN) { 1623 /* Insert the td onto the queue head */ 1624 error = uhci_insert_hc_td(uhcip, 0, 1625 length, pp, tw, PID_IN, intr_reqp->intr_attributes); 1626 1627 if (error != USB_SUCCESS) { 1628 1629 uhci_deallocate_periodic_in_resource(uhcip, pp, tw); 1630 /* free the transfer wrapper */ 1631 uhci_deallocate_tw(uhcip, pp, tw); 1632 1633 return (USB_NO_RESOURCES); 1634 } 1635 tw->tw_bytes_xfered = 0; 1636 1637 return (USB_SUCCESS); 1638 } 1639 1640 if (req->intr_len) { 1641 /* DATA OUT */ 1642 ASSERT(req->intr_data != NULL); 1643 1644 /* Copy the data into the message */ 1645 ddi_rep_put8(tw->tw_accesshandle, req->intr_data->b_rptr, 1646 (uint8_t *)tw->tw_buf, req->intr_len, DDI_DEV_AUTOINCR); 1647 } 1648 1649 /* set tw->tw_claim flag, so that nobody else works on this tw. */ 1650 tw->tw_claim = UHCI_INTR_HDLR_CLAIMED; 1651 1652 mps = ph->p_ep.wMaxPacketSize; 1653 buf_offs = 0; 1654 1655 /* Insert tds onto the queue head */ 1656 while (length > 0) { 1657 1658 error = uhci_insert_hc_td(uhcip, buf_offs, 1659 (length > mps) ? mps : length, 1660 pp, tw, PID_OUT, 1661 intr_reqp->intr_attributes); 1662 1663 if (error != USB_SUCCESS) { 1664 /* no resource. */ 1665 break; 1666 } 1667 1668 if (length <= mps) { 1669 /* inserted all data. */ 1670 length = 0; 1671 1672 } else { 1673 1674 buf_offs += mps; 1675 length -= mps; 1676 } 1677 } 1678 1679 if (error != USB_SUCCESS) { 1680 1681 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 1682 "uhci_insert_intr_td: allocate td failed, free resource"); 1683 1684 /* remove all the tds */ 1685 while (tw->tw_hctd_head != NULL) { 1686 uhci_delete_td(uhcip, tw->tw_hctd_head); 1687 } 1688 1689 tw->tw_claim = UHCI_NOT_CLAIMED; 1690 uhci_deallocate_tw(uhcip, pp, tw); 1691 1692 return (error); 1693 } 1694 1695 /* allow HC to xfer the tds of this tw */ 1696 tmp_td = tw->tw_hctd_head; 1697 while (tmp_td != NULL) { 1698 1699 SetTD_status(uhcip, tmp_td, UHCI_TD_ACTIVE); 1700 tmp_td = tmp_td->tw_td_next; 1701 } 1702 1703 tw->tw_bytes_xfered = 0; 1704 tw->tw_claim = UHCI_NOT_CLAIMED; 1705 1706 return (error); 1707 } 1708 1709 1710 /* 1711 * uhci_create_transfer_wrapper: 1712 * Create a Transaction Wrapper (TW) for non-isoc transfer types. 1713 * This involves the allocating of DMA resources. 1714 * 1715 * For non-isoc transfers, one DMA handle and one DMA buffer are 1716 * allocated per transfer. The DMA buffer may contain multiple 1717 * DMA cookies and the cookies should meet certain alignment 1718 * requirement to be able to fit in the multiple TDs. The alignment 1719 * needs to ensure: 1720 * 1. the size of a cookie be larger than max TD length (0x500) 1721 * 2. the size of a cookie be a multiple of wMaxPacketSize of the 1722 * ctrl/bulk pipes 1723 * 1724 * wMaxPacketSize for ctrl and bulk pipes may be 8, 16, 32 or 64 bytes. 1725 * So the alignment should be a multiple of 64. wMaxPacketSize for intr 1726 * pipes is a little different since it only specifies the max to be 1727 * 64 bytes, but as long as an intr transfer is limited to max TD length, 1728 * any alignment can work if the cookie size is larger than max TD length. 1729 * 1730 * Considering the above conditions, 2K alignment is used. 4K alignment 1731 * should also be fine. 1732 */ 1733 static uhci_trans_wrapper_t * 1734 uhci_create_transfer_wrapper( 1735 uhci_state_t *uhcip, 1736 uhci_pipe_private_t *pp, 1737 size_t length, 1738 usb_flags_t usb_flags) 1739 { 1740 size_t real_length; 1741 uhci_trans_wrapper_t *tw; 1742 ddi_device_acc_attr_t dev_attr; 1743 ddi_dma_attr_t dma_attr; 1744 int kmem_flag; 1745 int (*dmamem_wait)(caddr_t); 1746 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 1747 1748 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1749 "uhci_create_transfer_wrapper: length = 0x%lx flags = 0x%x", 1750 length, usb_flags); 1751 1752 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1753 1754 /* isochronous pipe should not call into this function */ 1755 if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_ISOCH) { 1756 1757 return (NULL); 1758 } 1759 1760 /* SLEEP flag should not be used in interrupt context */ 1761 if (servicing_interrupt()) { 1762 kmem_flag = KM_NOSLEEP; 1763 dmamem_wait = DDI_DMA_DONTWAIT; 1764 } else { 1765 kmem_flag = KM_SLEEP; 1766 dmamem_wait = DDI_DMA_SLEEP; 1767 } 1768 1769 /* Allocate space for the transfer wrapper */ 1770 if ((tw = kmem_zalloc(sizeof (uhci_trans_wrapper_t), kmem_flag)) == 1771 NULL) { 1772 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1773 "uhci_create_transfer_wrapper: kmem_alloc failed"); 1774 1775 return (NULL); 1776 } 1777 1778 /* zero-length packet doesn't need to allocate dma memory */ 1779 if (length == 0) { 1780 1781 goto dmadone; 1782 } 1783 1784 /* allow sg lists for transfer wrapper dma memory */ 1785 bcopy(&uhcip->uhci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t)); 1786 dma_attr.dma_attr_sgllen = UHCI_DMA_ATTR_SGLLEN; 1787 dma_attr.dma_attr_align = UHCI_DMA_ATTR_ALIGN; 1788 1789 /* Store the transfer length */ 1790 tw->tw_length = length; 1791 1792 /* Allocate the DMA handle */ 1793 if (ddi_dma_alloc_handle(uhcip->uhci_dip, &dma_attr, dmamem_wait, 1794 0, &tw->tw_dmahandle) != DDI_SUCCESS) { 1795 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1796 "uhci_create_transfer_wrapper: Alloc handle failed"); 1797 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 1798 1799 return (NULL); 1800 } 1801 1802 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 1803 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 1804 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1805 1806 /* Allocate the memory */ 1807 if (ddi_dma_mem_alloc(tw->tw_dmahandle, tw->tw_length, &dev_attr, 1808 DDI_DMA_CONSISTENT, dmamem_wait, NULL, (caddr_t *)&tw->tw_buf, 1809 &real_length, &tw->tw_accesshandle) != DDI_SUCCESS) { 1810 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1811 "uhci_create_transfer_wrapper: dma_mem_alloc fail"); 1812 ddi_dma_free_handle(&tw->tw_dmahandle); 1813 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 1814 1815 return (NULL); 1816 } 1817 1818 ASSERT(real_length >= length); 1819 1820 /* Bind the handle */ 1821 if (ddi_dma_addr_bind_handle(tw->tw_dmahandle, NULL, 1822 (caddr_t)tw->tw_buf, real_length, DDI_DMA_RDWR|DDI_DMA_CONSISTENT, 1823 dmamem_wait, NULL, &tw->tw_cookie, &tw->tw_ncookies) != 1824 DDI_DMA_MAPPED) { 1825 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1826 "uhci_create_transfer_wrapper: Bind handle failed"); 1827 ddi_dma_mem_free(&tw->tw_accesshandle); 1828 ddi_dma_free_handle(&tw->tw_dmahandle); 1829 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 1830 1831 return (NULL); 1832 } 1833 1834 tw->tw_cookie_idx = 0; 1835 tw->tw_dma_offs = 0; 1836 1837 dmadone: 1838 /* 1839 * Only allow one wrapper to be added at a time. Insert the 1840 * new transaction wrapper into the list for this pipe. 1841 */ 1842 if (pp->pp_tw_head == NULL) { 1843 pp->pp_tw_head = tw; 1844 pp->pp_tw_tail = tw; 1845 } else { 1846 pp->pp_tw_tail->tw_next = tw; 1847 pp->pp_tw_tail = tw; 1848 ASSERT(tw->tw_next == NULL); 1849 } 1850 1851 /* Store a back pointer to the pipe private structure */ 1852 tw->tw_pipe_private = pp; 1853 1854 /* Store the transfer type - synchronous or asynchronous */ 1855 tw->tw_flags = usb_flags; 1856 1857 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1858 "uhci_create_transfer_wrapper: tw = 0x%p, ncookies = %u", 1859 (void *)tw, tw->tw_ncookies); 1860 1861 return (tw); 1862 } 1863 1864 1865 /* 1866 * uhci_insert_hc_td: 1867 * Insert a Transfer Descriptor (TD) on an QH. 1868 */ 1869 int 1870 uhci_insert_hc_td( 1871 uhci_state_t *uhcip, 1872 uint32_t buffer_offset, 1873 size_t hcgtd_length, 1874 uhci_pipe_private_t *pp, 1875 uhci_trans_wrapper_t *tw, 1876 uchar_t PID, 1877 usb_req_attrs_t attrs) 1878 { 1879 uhci_td_t *td, *current_dummy; 1880 queue_head_t *qh = pp->pp_qh; 1881 1882 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1883 1884 if ((td = uhci_allocate_td_from_pool(uhcip)) == NULL) { 1885 1886 return (USB_NO_RESOURCES); 1887 } 1888 1889 current_dummy = qh->td_tailp; 1890 1891 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 1892 "uhci_insert_hc_td: td %p, attrs = 0x%x", (void *)td, attrs); 1893 1894 /* 1895 * Fill in the current dummy td and 1896 * add the new dummy to the end. 1897 */ 1898 uhci_fill_in_td(uhcip, td, current_dummy, buffer_offset, 1899 hcgtd_length, pp, PID, attrs, tw); 1900 1901 /* 1902 * Allow HC hardware xfer the td, except interrupt out td. 1903 */ 1904 if ((tw->tw_handle_td != uhci_handle_intr_td) || (PID != PID_OUT)) { 1905 1906 SetTD_status(uhcip, current_dummy, UHCI_TD_ACTIVE); 1907 } 1908 1909 /* Insert this td onto the tw */ 1910 1911 if (tw->tw_hctd_head == NULL) { 1912 ASSERT(tw->tw_hctd_tail == NULL); 1913 tw->tw_hctd_head = current_dummy; 1914 tw->tw_hctd_tail = current_dummy; 1915 } else { 1916 /* Add the td to the end of the list */ 1917 tw->tw_hctd_tail->tw_td_next = current_dummy; 1918 tw->tw_hctd_tail = current_dummy; 1919 } 1920 1921 /* 1922 * Insert the TD on to the QH. When this occurs, 1923 * the Host Controller will see the newly filled in TD 1924 */ 1925 current_dummy->outst_td_next = NULL; 1926 current_dummy->outst_td_prev = uhcip->uhci_outst_tds_tail; 1927 if (uhcip->uhci_outst_tds_head == NULL) { 1928 uhcip->uhci_outst_tds_head = current_dummy; 1929 } else { 1930 uhcip->uhci_outst_tds_tail->outst_td_next = current_dummy; 1931 } 1932 uhcip->uhci_outst_tds_tail = current_dummy; 1933 current_dummy->tw = tw; 1934 1935 return (USB_SUCCESS); 1936 } 1937 1938 1939 /* 1940 * uhci_fill_in_td: 1941 * Fill in the fields of a Transfer Descriptor (TD). 1942 */ 1943 static void 1944 uhci_fill_in_td( 1945 uhci_state_t *uhcip, 1946 uhci_td_t *td, 1947 uhci_td_t *current_dummy, 1948 uint32_t buffer_offset, 1949 size_t length, 1950 uhci_pipe_private_t *pp, 1951 uchar_t PID, 1952 usb_req_attrs_t attrs, 1953 uhci_trans_wrapper_t *tw) 1954 { 1955 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 1956 uint32_t buf_addr; 1957 1958 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 1959 "uhci_fill_in_td: td 0x%p buf_offs 0x%x len 0x%lx " 1960 "attrs 0x%x", (void *)td, buffer_offset, length, attrs); 1961 1962 /* 1963 * If this is an isochronous TD, just return 1964 */ 1965 if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_ISOCH) { 1966 1967 return; 1968 } 1969 1970 /* The maximum transfer length of UHCI cannot exceed 0x500 bytes */ 1971 ASSERT(length <= UHCI_MAX_TD_XFER_SIZE); 1972 1973 bzero((char *)td, sizeof (uhci_td_t)); /* Clear the TD */ 1974 SetTD32(uhcip, current_dummy->link_ptr, TD_PADDR(td)); 1975 1976 if (attrs & USB_ATTRS_SHORT_XFER_OK) { 1977 SetTD_spd(uhcip, current_dummy, 1); 1978 } 1979 1980 mutex_enter(&ph->p_usba_device->usb_mutex); 1981 if (ph->p_usba_device->usb_port_status == USBA_LOW_SPEED_DEV) { 1982 SetTD_ls(uhcip, current_dummy, LOW_SPEED_DEVICE); 1983 } 1984 1985 SetTD_c_err(uhcip, current_dummy, UHCI_MAX_ERR_COUNT); 1986 SetTD_mlen(uhcip, current_dummy, 1987 (length == 0) ? ZERO_LENGTH : (length - 1)); 1988 SetTD_dtogg(uhcip, current_dummy, pp->pp_data_toggle); 1989 1990 /* Adjust the data toggle bit */ 1991 ADJ_DATA_TOGGLE(pp); 1992 1993 SetTD_devaddr(uhcip, current_dummy, ph->p_usba_device->usb_addr); 1994 SetTD_endpt(uhcip, current_dummy, 1995 ph->p_ep.bEndpointAddress & END_POINT_ADDRESS_MASK); 1996 SetTD_PID(uhcip, current_dummy, PID); 1997 SetTD_ioc(uhcip, current_dummy, INTERRUPT_ON_COMPLETION); 1998 1999 buf_addr = uhci_get_tw_paddr_by_offs(uhcip, buffer_offset, length, tw); 2000 SetTD32(uhcip, current_dummy->buffer_address, buf_addr); 2001 2002 td->qh_td_prev = current_dummy; 2003 current_dummy->qh_td_prev = NULL; 2004 pp->pp_qh->td_tailp = td; 2005 mutex_exit(&ph->p_usba_device->usb_mutex); 2006 } 2007 2008 /* 2009 * uhci_get_tw_paddr_by_offs: 2010 * Walk through the DMA cookies of a TW buffer to retrieve 2011 * the device address used for a TD. 2012 * 2013 * buffer_offset - the starting offset into the TW buffer, where the 2014 * TD should transfer from. When a TW has more than 2015 * one TD, the TDs must be filled in increasing order. 2016 */ 2017 static uint32_t 2018 uhci_get_tw_paddr_by_offs( 2019 uhci_state_t *uhcip, 2020 uint32_t buffer_offset, 2021 size_t length, 2022 uhci_trans_wrapper_t *tw) 2023 { 2024 uint32_t buf_addr; 2025 int rem_len; 2026 2027 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2028 "uhci_get_tw_paddr_by_offs: buf_offs 0x%x len 0x%lx", 2029 buffer_offset, length); 2030 2031 /* 2032 * TDs must be filled in increasing DMA offset order. 2033 * tw_dma_offs is initialized to be 0 at TW creation and 2034 * is only increased in this function. 2035 */ 2036 ASSERT(length == 0 || buffer_offset >= tw->tw_dma_offs); 2037 2038 if (length == 0) { 2039 buf_addr = 0; 2040 2041 return (buf_addr); 2042 } 2043 2044 /* 2045 * Advance to the next DMA cookie until finding the cookie 2046 * that buffer_offset falls in. 2047 * It is very likely this loop will never repeat more than 2048 * once. It is here just to accommodate the case buffer_offset 2049 * is increased by multiple cookies during two consecutive 2050 * calls into this function. In that case, the interim DMA 2051 * buffer is allowed to be skipped. 2052 */ 2053 while ((tw->tw_dma_offs + tw->tw_cookie.dmac_size) <= 2054 buffer_offset) { 2055 /* 2056 * tw_dma_offs always points to the starting offset 2057 * of a cookie 2058 */ 2059 tw->tw_dma_offs += tw->tw_cookie.dmac_size; 2060 ddi_dma_nextcookie(tw->tw_dmahandle, &tw->tw_cookie); 2061 tw->tw_cookie_idx++; 2062 ASSERT(tw->tw_cookie_idx < tw->tw_ncookies); 2063 } 2064 2065 /* 2066 * Counting the remained buffer length to be filled in 2067 * the TDs for current DMA cookie 2068 */ 2069 rem_len = (tw->tw_dma_offs + tw->tw_cookie.dmac_size) - 2070 buffer_offset; 2071 2072 /* Calculate the beginning address of the buffer */ 2073 ASSERT(length <= rem_len); 2074 buf_addr = (buffer_offset - tw->tw_dma_offs) + 2075 tw->tw_cookie.dmac_address; 2076 2077 USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2078 "uhci_get_tw_paddr_by_offs: dmac_addr 0x%x dmac_size " 2079 "0x%lx idx %d", buf_addr, tw->tw_cookie.dmac_size, 2080 tw->tw_cookie_idx); 2081 2082 return (buf_addr); 2083 } 2084 2085 2086 /* 2087 * uhci_modify_td_active_bits: 2088 * Sets active bit in all the tds of QH to INACTIVE so that 2089 * the HC stops processing the TD's related to the QH. 2090 */ 2091 void 2092 uhci_modify_td_active_bits( 2093 uhci_state_t *uhcip, 2094 uhci_pipe_private_t *pp) 2095 { 2096 uhci_td_t *td_head; 2097 usb_ep_descr_t *ept = &pp->pp_pipe_handle->p_ep; 2098 uhci_trans_wrapper_t *tw_head = pp->pp_tw_head; 2099 2100 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2101 "uhci_modify_td_active_bits: tw head %p", (void *)tw_head); 2102 2103 while (tw_head != NULL) { 2104 tw_head->tw_claim = UHCI_MODIFY_TD_BITS_CLAIMED; 2105 td_head = tw_head->tw_hctd_head; 2106 2107 while (td_head) { 2108 if (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_ISOCH) { 2109 SetTD_status(uhcip, td_head, 2110 GetTD_status(uhcip, td_head) & TD_INACTIVE); 2111 } else { 2112 SetTD32(uhcip, td_head->link_ptr, 2113 GetTD32(uhcip, td_head->link_ptr) | 2114 HC_END_OF_LIST); 2115 } 2116 2117 td_head = td_head->tw_td_next; 2118 } 2119 tw_head = tw_head->tw_next; 2120 } 2121 } 2122 2123 2124 /* 2125 * uhci_insert_ctrl_td: 2126 * Create a TD and a data buffer for a control Queue Head. 2127 */ 2128 int 2129 uhci_insert_ctrl_td( 2130 uhci_state_t *uhcip, 2131 usba_pipe_handle_data_t *ph, 2132 usb_ctrl_req_t *ctrl_reqp, 2133 usb_flags_t flags) 2134 { 2135 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 2136 uhci_trans_wrapper_t *tw; 2137 size_t ctrl_buf_size; 2138 2139 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2140 "uhci_insert_ctrl_td: timeout: 0x%x", ctrl_reqp->ctrl_timeout); 2141 2142 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2143 2144 /* 2145 * If we have a control data phase, make the data buffer start 2146 * on the next 64-byte boundary so as to ensure the DMA cookie 2147 * can fit in the multiple TDs. The buffer in the range of 2148 * [SETUP_SIZE, UHCI_CTRL_EPT_MAX_SIZE) is just for padding 2149 * and not to be transferred. 2150 */ 2151 if (ctrl_reqp->ctrl_wLength) { 2152 ctrl_buf_size = UHCI_CTRL_EPT_MAX_SIZE + 2153 ctrl_reqp->ctrl_wLength; 2154 } else { 2155 ctrl_buf_size = SETUP_SIZE; 2156 } 2157 2158 /* Allocate a transaction wrapper */ 2159 if ((tw = uhci_create_transfer_wrapper(uhcip, pp, 2160 ctrl_buf_size, flags)) == NULL) { 2161 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2162 "uhci_insert_ctrl_td: TW allocation failed"); 2163 2164 return (USB_NO_RESOURCES); 2165 } 2166 2167 pp->pp_data_toggle = 0; 2168 2169 tw->tw_curr_xfer_reqp = (usb_opaque_t)ctrl_reqp; 2170 tw->tw_bytes_xfered = 0; 2171 tw->tw_bytes_pending = ctrl_reqp->ctrl_wLength; 2172 tw->tw_timeout_cnt = max(UHCI_CTRL_TIMEOUT, ctrl_reqp->ctrl_timeout); 2173 2174 /* 2175 * Initialize the callback and any callback 2176 * data for when the td completes. 2177 */ 2178 tw->tw_handle_td = uhci_handle_ctrl_td; 2179 tw->tw_handle_callback_value = NULL; 2180 2181 if ((uhci_create_setup_pkt(uhcip, pp, tw)) != USB_SUCCESS) { 2182 tw->tw_ctrl_state = 0; 2183 2184 /* free the transfer wrapper */ 2185 uhci_deallocate_tw(uhcip, pp, tw); 2186 2187 return (USB_NO_RESOURCES); 2188 } 2189 2190 tw->tw_ctrl_state = SETUP; 2191 2192 return (USB_SUCCESS); 2193 } 2194 2195 2196 /* 2197 * uhci_create_setup_pkt: 2198 * create a setup packet to initiate a control transfer. 2199 * 2200 * OHCI driver has seen the case where devices fail if there is 2201 * more than one control transfer to the device within a frame. 2202 * So, the UHCI ensures that only one TD will be put on the control 2203 * pipe to one device (to be consistent with OHCI driver). 2204 */ 2205 static int 2206 uhci_create_setup_pkt( 2207 uhci_state_t *uhcip, 2208 uhci_pipe_private_t *pp, 2209 uhci_trans_wrapper_t *tw) 2210 { 2211 int sdata; 2212 usb_ctrl_req_t *req = (usb_ctrl_req_t *)tw->tw_curr_xfer_reqp; 2213 2214 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2215 "uhci_create_setup_pkt: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%p", 2216 req->ctrl_bmRequestType, req->ctrl_bRequest, req->ctrl_wValue, 2217 req->ctrl_wIndex, req->ctrl_wLength, (void *)req->ctrl_data); 2218 2219 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2220 ASSERT(tw != NULL); 2221 2222 /* Create the first four bytes of the setup packet */ 2223 sdata = (req->ctrl_bmRequestType | (req->ctrl_bRequest << 8) | 2224 (req->ctrl_wValue << 16)); 2225 ddi_put32(tw->tw_accesshandle, (uint_t *)tw->tw_buf, sdata); 2226 2227 /* Create the second four bytes */ 2228 sdata = (uint32_t)(req->ctrl_wIndex | (req->ctrl_wLength << 16)); 2229 ddi_put32(tw->tw_accesshandle, 2230 (uint_t *)(tw->tw_buf + sizeof (uint_t)), sdata); 2231 2232 /* 2233 * The TD's are placed on the QH one at a time. 2234 * Once this TD is placed on the done list, the 2235 * data or status phase TD will be enqueued. 2236 */ 2237 if ((uhci_insert_hc_td(uhcip, 0, SETUP_SIZE, 2238 pp, tw, PID_SETUP, req->ctrl_attributes)) != USB_SUCCESS) { 2239 2240 return (USB_NO_RESOURCES); 2241 } 2242 2243 USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2244 "Create_setup: pp = 0x%p, attrs = 0x%x", (void *)pp, 2245 req->ctrl_attributes); 2246 2247 /* 2248 * If this control transfer has a data phase, record the 2249 * direction. If the data phase is an OUT transaction , 2250 * copy the data into the buffer of the transfer wrapper. 2251 */ 2252 if (req->ctrl_wLength != 0) { 2253 /* There is a data stage. Find the direction */ 2254 if (req->ctrl_bmRequestType & USB_DEV_REQ_DEV_TO_HOST) { 2255 tw->tw_direction = PID_IN; 2256 } else { 2257 tw->tw_direction = PID_OUT; 2258 2259 /* Copy the data into the buffer */ 2260 ddi_rep_put8(tw->tw_accesshandle, 2261 req->ctrl_data->b_rptr, 2262 (uint8_t *)(tw->tw_buf + UHCI_CTRL_EPT_MAX_SIZE), 2263 req->ctrl_wLength, 2264 DDI_DEV_AUTOINCR); 2265 } 2266 } 2267 2268 return (USB_SUCCESS); 2269 } 2270 2271 2272 /* 2273 * uhci_create_stats: 2274 * Allocate and initialize the uhci kstat structures 2275 */ 2276 void 2277 uhci_create_stats(uhci_state_t *uhcip) 2278 { 2279 int i; 2280 char kstatname[KSTAT_STRLEN]; 2281 char *usbtypes[USB_N_COUNT_KSTATS] = 2282 {"ctrl", "isoch", "bulk", "intr"}; 2283 uint_t instance = uhcip->uhci_instance; 2284 const char *dname = ddi_driver_name(uhcip->uhci_dip); 2285 uhci_intrs_stats_t *isp; 2286 2287 if (UHCI_INTRS_STATS(uhcip) == NULL) { 2288 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,intrs", 2289 dname, instance); 2290 UHCI_INTRS_STATS(uhcip) = kstat_create("usba", instance, 2291 kstatname, "usb_interrupts", KSTAT_TYPE_NAMED, 2292 sizeof (uhci_intrs_stats_t) / sizeof (kstat_named_t), 2293 KSTAT_FLAG_PERSISTENT); 2294 2295 if (UHCI_INTRS_STATS(uhcip) != NULL) { 2296 isp = UHCI_INTRS_STATS_DATA(uhcip); 2297 kstat_named_init(&isp->uhci_intrs_hc_halted, 2298 "HC Halted", KSTAT_DATA_UINT64); 2299 kstat_named_init(&isp->uhci_intrs_hc_process_err, 2300 "HC Process Errors", KSTAT_DATA_UINT64); 2301 kstat_named_init(&isp->uhci_intrs_host_sys_err, 2302 "Host Sys Errors", KSTAT_DATA_UINT64); 2303 kstat_named_init(&isp->uhci_intrs_resume_detected, 2304 "Resume Detected", KSTAT_DATA_UINT64); 2305 kstat_named_init(&isp->uhci_intrs_usb_err_intr, 2306 "USB Error", KSTAT_DATA_UINT64); 2307 kstat_named_init(&isp->uhci_intrs_usb_intr, 2308 "USB Interrupts", KSTAT_DATA_UINT64); 2309 kstat_named_init(&isp->uhci_intrs_total, 2310 "Total Interrupts", KSTAT_DATA_UINT64); 2311 kstat_named_init(&isp->uhci_intrs_not_claimed, 2312 "Not Claimed", KSTAT_DATA_UINT64); 2313 2314 UHCI_INTRS_STATS(uhcip)->ks_private = uhcip; 2315 UHCI_INTRS_STATS(uhcip)->ks_update = nulldev; 2316 kstat_install(UHCI_INTRS_STATS(uhcip)); 2317 } 2318 } 2319 2320 if (UHCI_TOTAL_STATS(uhcip) == NULL) { 2321 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,total", 2322 dname, instance); 2323 UHCI_TOTAL_STATS(uhcip) = kstat_create("usba", instance, 2324 kstatname, "usb_byte_count", KSTAT_TYPE_IO, 1, 2325 KSTAT_FLAG_PERSISTENT); 2326 2327 if (UHCI_TOTAL_STATS(uhcip) != NULL) { 2328 kstat_install(UHCI_TOTAL_STATS(uhcip)); 2329 } 2330 } 2331 2332 for (i = 0; i < USB_N_COUNT_KSTATS; i++) { 2333 if (uhcip->uhci_count_stats[i] == NULL) { 2334 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,%s", 2335 dname, instance, usbtypes[i]); 2336 uhcip->uhci_count_stats[i] = kstat_create("usba", 2337 instance, kstatname, "usb_byte_count", 2338 KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 2339 2340 if (uhcip->uhci_count_stats[i] != NULL) { 2341 kstat_install(uhcip->uhci_count_stats[i]); 2342 } 2343 } 2344 } 2345 } 2346 2347 2348 /* 2349 * uhci_destroy_stats: 2350 * Clean up uhci kstat structures 2351 */ 2352 void 2353 uhci_destroy_stats(uhci_state_t *uhcip) 2354 { 2355 int i; 2356 2357 if (UHCI_INTRS_STATS(uhcip)) { 2358 kstat_delete(UHCI_INTRS_STATS(uhcip)); 2359 UHCI_INTRS_STATS(uhcip) = NULL; 2360 } 2361 2362 if (UHCI_TOTAL_STATS(uhcip)) { 2363 kstat_delete(UHCI_TOTAL_STATS(uhcip)); 2364 UHCI_TOTAL_STATS(uhcip) = NULL; 2365 } 2366 2367 for (i = 0; i < USB_N_COUNT_KSTATS; i++) { 2368 if (uhcip->uhci_count_stats[i]) { 2369 kstat_delete(uhcip->uhci_count_stats[i]); 2370 uhcip->uhci_count_stats[i] = NULL; 2371 } 2372 } 2373 } 2374 2375 2376 void 2377 uhci_do_intrs_stats(uhci_state_t *uhcip, int val) 2378 { 2379 if (UHCI_INTRS_STATS(uhcip) == NULL) { 2380 2381 return; 2382 } 2383 2384 UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_total.value.ui64++; 2385 switch (val) { 2386 case USBSTS_REG_HC_HALTED: 2387 UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_hc_halted.value.ui64++; 2388 break; 2389 case USBSTS_REG_HC_PROCESS_ERR: 2390 UHCI_INTRS_STATS_DATA(uhcip)-> 2391 uhci_intrs_hc_process_err.value.ui64++; 2392 break; 2393 case USBSTS_REG_HOST_SYS_ERR: 2394 UHCI_INTRS_STATS_DATA(uhcip)-> 2395 uhci_intrs_host_sys_err.value.ui64++; 2396 break; 2397 case USBSTS_REG_RESUME_DETECT: 2398 UHCI_INTRS_STATS_DATA(uhcip)-> 2399 uhci_intrs_resume_detected.value.ui64++; 2400 break; 2401 case USBSTS_REG_USB_ERR_INTR: 2402 UHCI_INTRS_STATS_DATA(uhcip)-> 2403 uhci_intrs_usb_err_intr.value.ui64++; 2404 break; 2405 case USBSTS_REG_USB_INTR: 2406 UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_usb_intr.value.ui64++; 2407 break; 2408 default: 2409 UHCI_INTRS_STATS_DATA(uhcip)-> 2410 uhci_intrs_not_claimed.value.ui64++; 2411 break; 2412 } 2413 } 2414 2415 2416 void 2417 uhci_do_byte_stats(uhci_state_t *uhcip, size_t len, uint8_t attr, uint8_t addr) 2418 { 2419 uint8_t type = attr & USB_EP_ATTR_MASK; 2420 uint8_t dir = addr & USB_EP_DIR_MASK; 2421 2422 switch (dir) { 2423 case USB_EP_DIR_IN: 2424 UHCI_TOTAL_STATS_DATA(uhcip)->reads++; 2425 UHCI_TOTAL_STATS_DATA(uhcip)->nread += len; 2426 switch (type) { 2427 case USB_EP_ATTR_CONTROL: 2428 UHCI_CTRL_STATS(uhcip)->reads++; 2429 UHCI_CTRL_STATS(uhcip)->nread += len; 2430 break; 2431 case USB_EP_ATTR_BULK: 2432 UHCI_BULK_STATS(uhcip)->reads++; 2433 UHCI_BULK_STATS(uhcip)->nread += len; 2434 break; 2435 case USB_EP_ATTR_INTR: 2436 UHCI_INTR_STATS(uhcip)->reads++; 2437 UHCI_INTR_STATS(uhcip)->nread += len; 2438 break; 2439 case USB_EP_ATTR_ISOCH: 2440 UHCI_ISOC_STATS(uhcip)->reads++; 2441 UHCI_ISOC_STATS(uhcip)->nread += len; 2442 break; 2443 } 2444 break; 2445 case USB_EP_DIR_OUT: 2446 UHCI_TOTAL_STATS_DATA(uhcip)->writes++; 2447 UHCI_TOTAL_STATS_DATA(uhcip)->nwritten += len; 2448 switch (type) { 2449 case USB_EP_ATTR_CONTROL: 2450 UHCI_CTRL_STATS(uhcip)->writes++; 2451 UHCI_CTRL_STATS(uhcip)->nwritten += len; 2452 break; 2453 case USB_EP_ATTR_BULK: 2454 UHCI_BULK_STATS(uhcip)->writes++; 2455 UHCI_BULK_STATS(uhcip)->nwritten += len; 2456 break; 2457 case USB_EP_ATTR_INTR: 2458 UHCI_INTR_STATS(uhcip)->writes++; 2459 UHCI_INTR_STATS(uhcip)->nwritten += len; 2460 break; 2461 case USB_EP_ATTR_ISOCH: 2462 UHCI_ISOC_STATS(uhcip)->writes++; 2463 UHCI_ISOC_STATS(uhcip)->nwritten += len; 2464 break; 2465 } 2466 break; 2467 } 2468 } 2469 2470 2471 /* 2472 * uhci_free_tw: 2473 * Free the Transfer Wrapper (TW). 2474 */ 2475 void 2476 uhci_free_tw(uhci_state_t *uhcip, uhci_trans_wrapper_t *tw) 2477 { 2478 int rval, i; 2479 2480 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, "uhci_free_tw:"); 2481 2482 ASSERT(tw != NULL); 2483 2484 if (tw->tw_isoc_strtlen > 0) { 2485 ASSERT(tw->tw_isoc_bufs != NULL); 2486 for (i = 0; i < tw->tw_ncookies; i++) { 2487 rval = ddi_dma_unbind_handle( 2488 tw->tw_isoc_bufs[i].dma_handle); 2489 ASSERT(rval == USB_SUCCESS); 2490 ddi_dma_mem_free(&tw->tw_isoc_bufs[i].mem_handle); 2491 ddi_dma_free_handle(&tw->tw_isoc_bufs[i].dma_handle); 2492 } 2493 kmem_free(tw->tw_isoc_bufs, tw->tw_isoc_strtlen); 2494 } else if (tw->tw_dmahandle != NULL) { 2495 rval = ddi_dma_unbind_handle(tw->tw_dmahandle); 2496 ASSERT(rval == DDI_SUCCESS); 2497 2498 ddi_dma_mem_free(&tw->tw_accesshandle); 2499 ddi_dma_free_handle(&tw->tw_dmahandle); 2500 } 2501 2502 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 2503 } 2504 2505 2506 /* 2507 * uhci_deallocate_tw: 2508 * Deallocate of a Transaction Wrapper (TW) and this involves 2509 * the freeing of DMA resources. 2510 */ 2511 void 2512 uhci_deallocate_tw(uhci_state_t *uhcip, 2513 uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw) 2514 { 2515 uhci_trans_wrapper_t *head; 2516 2517 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2518 "uhci_deallocate_tw:"); 2519 2520 /* 2521 * If the transfer wrapper has no Host Controller (HC) 2522 * Transfer Descriptors (TD) associated with it, then 2523 * remove the transfer wrapper. The transfers are done 2524 * in FIFO order, so this should be the first transfer 2525 * wrapper on the list. 2526 */ 2527 if (tw->tw_hctd_head != NULL) { 2528 ASSERT(tw->tw_hctd_tail != NULL); 2529 2530 return; 2531 } 2532 2533 ASSERT(tw->tw_hctd_tail == NULL); 2534 ASSERT(pp->pp_tw_head != NULL); 2535 2536 /* 2537 * If pp->pp_tw_head is NULL, set the tail also to NULL. 2538 */ 2539 head = pp->pp_tw_head; 2540 2541 if (head == tw) { 2542 pp->pp_tw_head = head->tw_next; 2543 if (pp->pp_tw_head == NULL) { 2544 pp->pp_tw_tail = NULL; 2545 } 2546 } else { 2547 while (head->tw_next != tw) 2548 head = head->tw_next; 2549 head->tw_next = tw->tw_next; 2550 if (tw->tw_next == NULL) { 2551 pp->pp_tw_tail = head; 2552 } 2553 } 2554 uhci_free_tw(uhcip, tw); 2555 } 2556 2557 2558 void 2559 uhci_delete_td(uhci_state_t *uhcip, uhci_td_t *td) 2560 { 2561 uhci_td_t *tmp_td; 2562 uhci_trans_wrapper_t *tw = td->tw; 2563 2564 if ((td->outst_td_next == NULL) && (td->outst_td_prev == NULL)) { 2565 uhcip->uhci_outst_tds_head = NULL; 2566 uhcip->uhci_outst_tds_tail = NULL; 2567 } else if (td->outst_td_next == NULL) { 2568 td->outst_td_prev->outst_td_next = NULL; 2569 uhcip->uhci_outst_tds_tail = td->outst_td_prev; 2570 } else if (td->outst_td_prev == NULL) { 2571 td->outst_td_next->outst_td_prev = NULL; 2572 uhcip->uhci_outst_tds_head = td->outst_td_next; 2573 } else { 2574 td->outst_td_prev->outst_td_next = td->outst_td_next; 2575 td->outst_td_next->outst_td_prev = td->outst_td_prev; 2576 } 2577 2578 tmp_td = tw->tw_hctd_head; 2579 2580 if (tmp_td != td) { 2581 while (tmp_td->tw_td_next != td) { 2582 tmp_td = tmp_td->tw_td_next; 2583 } 2584 ASSERT(tmp_td); 2585 tmp_td->tw_td_next = td->tw_td_next; 2586 if (td->tw_td_next == NULL) { 2587 tw->tw_hctd_tail = tmp_td; 2588 } 2589 } else { 2590 tw->tw_hctd_head = tw->tw_hctd_head->tw_td_next; 2591 if (tw->tw_hctd_head == NULL) { 2592 tw->tw_hctd_tail = NULL; 2593 } 2594 } 2595 2596 td->flag = TD_FLAG_FREE; 2597 } 2598 2599 2600 void 2601 uhci_remove_tds_tws( 2602 uhci_state_t *uhcip, 2603 usba_pipe_handle_data_t *ph) 2604 { 2605 usb_opaque_t curr_reqp; 2606 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 2607 usb_ep_descr_t *ept = &pp->pp_pipe_handle->p_ep; 2608 uhci_trans_wrapper_t *tw_tmp; 2609 uhci_trans_wrapper_t *tw_head = pp->pp_tw_head; 2610 2611 while (tw_head != NULL) { 2612 tw_tmp = tw_head; 2613 tw_head = tw_head->tw_next; 2614 2615 curr_reqp = tw_tmp->tw_curr_xfer_reqp; 2616 if (curr_reqp) { 2617 /* do this for control/bulk/intr */ 2618 if ((tw_tmp->tw_direction == PID_IN) && 2619 (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_INTR)) { 2620 uhci_deallocate_periodic_in_resource(uhcip, 2621 pp, tw_tmp); 2622 } else { 2623 uhci_hcdi_callback(uhcip, pp, 2624 pp->pp_pipe_handle, tw_tmp, USB_CR_FLUSHED); 2625 } 2626 } /* end of curr_reqp */ 2627 2628 if (tw_tmp->tw_claim != UHCI_MODIFY_TD_BITS_CLAIMED) { 2629 continue; 2630 } 2631 2632 while (tw_tmp->tw_hctd_head != NULL) { 2633 uhci_delete_td(uhcip, tw_tmp->tw_hctd_head); 2634 } 2635 2636 uhci_deallocate_tw(uhcip, pp, tw_tmp); 2637 } 2638 } 2639 2640 2641 /* 2642 * uhci_remove_qh: 2643 * Remove the Queue Head from the Host Controller's 2644 * appropriate QH list. 2645 */ 2646 void 2647 uhci_remove_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 2648 { 2649 uhci_td_t *dummy_td; 2650 2651 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2652 2653 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2654 "uhci_remove_qh:"); 2655 2656 dummy_td = pp->pp_qh->td_tailp; 2657 dummy_td->flag = TD_FLAG_FREE; 2658 2659 switch (UHCI_XFER_TYPE(&pp->pp_pipe_handle->p_ep)) { 2660 case USB_EP_ATTR_CONTROL: 2661 uhci_remove_ctrl_qh(uhcip, pp); 2662 break; 2663 case USB_EP_ATTR_BULK: 2664 uhci_remove_bulk_qh(uhcip, pp); 2665 break; 2666 case USB_EP_ATTR_INTR: 2667 uhci_remove_intr_qh(uhcip, pp); 2668 break; 2669 } 2670 } 2671 2672 2673 static void 2674 uhci_remove_intr_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 2675 { 2676 queue_head_t *qh = pp->pp_qh; 2677 queue_head_t *next_lattice_qh = 2678 QH_VADDR(GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK); 2679 2680 qh->prev_qh->link_ptr = qh->link_ptr; 2681 next_lattice_qh->prev_qh = qh->prev_qh; 2682 qh->qh_flag = QUEUE_HEAD_FLAG_FREE; 2683 2684 } 2685 2686 /* 2687 * uhci_remove_bulk_qh: 2688 * Remove a bulk QH from the Host Controller's QH list. There may be a 2689 * loop for bulk QHs, we must care about this while removing a bulk QH. 2690 */ 2691 static void 2692 uhci_remove_bulk_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 2693 { 2694 queue_head_t *qh = pp->pp_qh; 2695 queue_head_t *next_lattice_qh; 2696 uint32_t paddr; 2697 2698 paddr = (GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK); 2699 next_lattice_qh = (qh == uhcip->uhci_bulk_xfers_q_tail) ? 2700 0 : QH_VADDR(paddr); 2701 2702 if ((qh == uhcip->uhci_bulk_xfers_q_tail) && 2703 (qh->prev_qh == uhcip->uhci_bulk_xfers_q_head)) { 2704 SetQH32(uhcip, qh->prev_qh->link_ptr, HC_END_OF_LIST); 2705 } else { 2706 qh->prev_qh->link_ptr = qh->link_ptr; 2707 } 2708 2709 if (next_lattice_qh == NULL) { 2710 uhcip->uhci_bulk_xfers_q_tail = qh->prev_qh; 2711 } else { 2712 next_lattice_qh->prev_qh = qh->prev_qh; 2713 } 2714 2715 qh->qh_flag = QUEUE_HEAD_FLAG_FREE; 2716 2717 } 2718 2719 2720 static void 2721 uhci_remove_ctrl_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 2722 { 2723 queue_head_t *qh = pp->pp_qh; 2724 queue_head_t *next_lattice_qh = 2725 QH_VADDR(GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK); 2726 2727 qh->prev_qh->link_ptr = qh->link_ptr; 2728 if (next_lattice_qh->prev_qh != NULL) { 2729 next_lattice_qh->prev_qh = qh->prev_qh; 2730 } else { 2731 uhcip->uhci_ctrl_xfers_q_tail = qh->prev_qh; 2732 } 2733 2734 qh->qh_flag = QUEUE_HEAD_FLAG_FREE; 2735 } 2736 2737 2738 /* 2739 * uhci_allocate_td_from_pool: 2740 * Allocate a Transfer Descriptor (TD) from the TD buffer pool. 2741 */ 2742 static uhci_td_t * 2743 uhci_allocate_td_from_pool(uhci_state_t *uhcip) 2744 { 2745 int index; 2746 uhci_td_t *td; 2747 2748 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2749 2750 /* 2751 * Search for a blank Transfer Descriptor (TD) 2752 * in the TD buffer pool. 2753 */ 2754 for (index = 0; index < uhci_td_pool_size; index ++) { 2755 if (uhcip->uhci_td_pool_addr[index].flag == TD_FLAG_FREE) { 2756 break; 2757 } 2758 } 2759 2760 if (index == uhci_td_pool_size) { 2761 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2762 "uhci_allocate_td_from_pool: TD exhausted"); 2763 2764 return (NULL); 2765 } 2766 2767 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2768 "uhci_allocate_td_from_pool: Allocated %d", index); 2769 2770 /* Create a new dummy for the end of the TD list */ 2771 td = &uhcip->uhci_td_pool_addr[index]; 2772 2773 /* Mark the newly allocated TD as a dummy */ 2774 td->flag = TD_FLAG_DUMMY; 2775 td->qh_td_prev = NULL; 2776 2777 return (td); 2778 } 2779 2780 2781 /* 2782 * uhci_insert_bulk_td: 2783 */ 2784 int 2785 uhci_insert_bulk_td( 2786 uhci_state_t *uhcip, 2787 usba_pipe_handle_data_t *ph, 2788 usb_bulk_req_t *req, 2789 usb_flags_t flags) 2790 { 2791 size_t length; 2792 uint_t mps; /* MaxPacketSize */ 2793 uint_t num_bulk_tds, i, j; 2794 uint32_t buf_offs; 2795 uhci_td_t *bulk_td_ptr; 2796 uhci_td_t *current_dummy, *tmp_td; 2797 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 2798 uhci_trans_wrapper_t *tw; 2799 uhci_bulk_isoc_xfer_t *bulk_xfer_info; 2800 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 2801 2802 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2803 "uhci_insert_bulk_td: req: 0x%p, flags = 0x%x", (void *)req, flags); 2804 2805 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2806 2807 /* 2808 * Create transfer wrapper 2809 */ 2810 if ((tw = uhci_create_transfer_wrapper(uhcip, pp, req->bulk_len, 2811 flags)) == NULL) { 2812 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2813 "uhci_insert_bulk_td: TW allocation failed"); 2814 2815 return (USB_NO_RESOURCES); 2816 } 2817 2818 tw->tw_bytes_xfered = 0; 2819 tw->tw_bytes_pending = req->bulk_len; 2820 tw->tw_handle_td = uhci_handle_bulk_td; 2821 tw->tw_handle_callback_value = (usb_opaque_t)req->bulk_data; 2822 tw->tw_timeout_cnt = req->bulk_timeout; 2823 tw->tw_data = req->bulk_data; 2824 tw->tw_curr_xfer_reqp = (usb_opaque_t)req; 2825 2826 /* Get the bulk pipe direction */ 2827 tw->tw_direction = (UHCI_XFER_DIR(&ph->p_ep) == USB_EP_DIR_OUT) ? 2828 PID_OUT : PID_IN; 2829 2830 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2831 "uhci_insert_bulk_td: direction: 0x%x", tw->tw_direction); 2832 2833 /* If the DATA OUT, copy the data into transfer buffer. */ 2834 if (tw->tw_direction == PID_OUT) { 2835 if (req->bulk_len) { 2836 ASSERT(req->bulk_data != NULL); 2837 2838 /* Copy the data into the message */ 2839 ddi_rep_put8(tw->tw_accesshandle, 2840 req->bulk_data->b_rptr, 2841 (uint8_t *)tw->tw_buf, 2842 req->bulk_len, DDI_DEV_AUTOINCR); 2843 } 2844 } 2845 2846 /* Get the max packet size. */ 2847 length = mps = pp->pp_pipe_handle->p_ep.wMaxPacketSize; 2848 2849 /* 2850 * Calculate number of TD's to insert in the current frame interval. 2851 * Max number TD's allowed (driver implementation) is 128 2852 * in one frame interval. Once all the TD's are completed 2853 * then the remaining TD's will be inserted into the lattice 2854 * in the uhci_handle_bulk_td(). 2855 */ 2856 if ((tw->tw_bytes_pending / mps) >= MAX_NUM_BULK_TDS_PER_XFER) { 2857 num_bulk_tds = MAX_NUM_BULK_TDS_PER_XFER; 2858 } else { 2859 num_bulk_tds = (tw->tw_bytes_pending / mps); 2860 2861 if (tw->tw_bytes_pending % mps || tw->tw_bytes_pending == 0) { 2862 num_bulk_tds++; 2863 length = (tw->tw_bytes_pending % mps); 2864 } 2865 } 2866 2867 /* 2868 * Allocate memory for the bulk xfer information structure 2869 */ 2870 if ((bulk_xfer_info = kmem_zalloc( 2871 sizeof (uhci_bulk_isoc_xfer_t), KM_NOSLEEP)) == NULL) { 2872 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2873 "uhci_insert_bulk_td: kmem_zalloc failed"); 2874 2875 /* Free the transfer wrapper */ 2876 uhci_deallocate_tw(uhcip, pp, tw); 2877 2878 return (USB_FAILURE); 2879 } 2880 2881 /* Allocate memory for the bulk TD's */ 2882 if (uhci_alloc_bulk_isoc_tds(uhcip, num_bulk_tds, bulk_xfer_info) != 2883 USB_SUCCESS) { 2884 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2885 "uhci_insert_bulk_td: alloc_bulk_isoc_tds failed"); 2886 2887 kmem_free(bulk_xfer_info, sizeof (uhci_bulk_isoc_xfer_t)); 2888 2889 /* Free the transfer wrapper */ 2890 uhci_deallocate_tw(uhcip, pp, tw); 2891 2892 return (USB_FAILURE); 2893 } 2894 2895 td_pool_ptr = &bulk_xfer_info->td_pools[0]; 2896 bulk_td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 2897 bulk_td_ptr[0].qh_td_prev = NULL; 2898 current_dummy = pp->pp_qh->td_tailp; 2899 buf_offs = 0; 2900 pp->pp_qh->bulk_xfer_info = bulk_xfer_info; 2901 2902 /* Fill up all the bulk TD's */ 2903 for (i = 0; i < bulk_xfer_info->num_pools; i++) { 2904 for (j = 0; j < (td_pool_ptr->num_tds - 1); j++) { 2905 uhci_fill_in_bulk_isoc_td(uhcip, &bulk_td_ptr[j], 2906 &bulk_td_ptr[j+1], BULKTD_PADDR(td_pool_ptr, 2907 &bulk_td_ptr[j+1]), ph, buf_offs, mps, tw); 2908 buf_offs += mps; 2909 } 2910 2911 /* fill in the last TD */ 2912 if (i == (bulk_xfer_info->num_pools - 1)) { 2913 uhci_fill_in_bulk_isoc_td(uhcip, &bulk_td_ptr[j], 2914 current_dummy, TD_PADDR(current_dummy), 2915 ph, buf_offs, length, tw); 2916 } else { 2917 /* fill in the TD at the tail of a pool */ 2918 tmp_td = &bulk_td_ptr[j]; 2919 td_pool_ptr = &bulk_xfer_info->td_pools[i + 1]; 2920 bulk_td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 2921 uhci_fill_in_bulk_isoc_td(uhcip, tmp_td, 2922 &bulk_td_ptr[0], BULKTD_PADDR(td_pool_ptr, 2923 &bulk_td_ptr[0]), ph, buf_offs, mps, tw); 2924 buf_offs += mps; 2925 } 2926 } 2927 2928 bulk_xfer_info->num_tds = num_bulk_tds; 2929 2930 /* 2931 * Point the end of the lattice tree to the start of the bulk xfers 2932 * queue head. This allows the HC to execute the same Queue Head/TD 2933 * in the same frame. There are some bulk devices, which NAKs after 2934 * completing each TD. As a result, the performance on such devices 2935 * is very bad. This loop will provide a chance to execute NAk'ed 2936 * bulk TDs again in the same frame. 2937 */ 2938 if (uhcip->uhci_pending_bulk_cmds++ == 0) { 2939 uhcip->uhci_bulk_xfers_q_tail->link_ptr = 2940 uhcip->uhci_bulk_xfers_q_head->link_ptr; 2941 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 2942 "uhci_insert_bulk_td: count = %d no tds %d", 2943 uhcip->uhci_pending_bulk_cmds, num_bulk_tds); 2944 } 2945 2946 /* Insert on the bulk queue head for the execution by HC */ 2947 SetQH32(uhcip, pp->pp_qh->element_ptr, 2948 bulk_xfer_info->td_pools[0].cookie.dmac_address); 2949 2950 return (USB_SUCCESS); 2951 } 2952 2953 2954 /* 2955 * uhci_fill_in_bulk_isoc_td 2956 * Fills the bulk/isoc TD 2957 * 2958 * offset - different meanings for bulk and isoc TDs: 2959 * starting offset into the TW buffer for a bulk TD 2960 * and the index into the isoc packet list for an isoc TD 2961 */ 2962 void 2963 uhci_fill_in_bulk_isoc_td(uhci_state_t *uhcip, uhci_td_t *current_td, 2964 uhci_td_t *next_td, 2965 uint32_t next_td_paddr, 2966 usba_pipe_handle_data_t *ph, 2967 uint_t offset, 2968 uint_t length, 2969 uhci_trans_wrapper_t *tw) 2970 { 2971 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 2972 usb_ep_descr_t *ept = &pp->pp_pipe_handle->p_ep; 2973 uint32_t buf_addr; 2974 2975 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2976 "uhci_fill_in_bulk_isoc_td: tw 0x%p offs 0x%x length 0x%x", 2977 (void *)tw, offset, length); 2978 2979 bzero((char *)current_td, sizeof (uhci_td_t)); 2980 SetTD32(uhcip, current_td->link_ptr, next_td_paddr | HC_DEPTH_FIRST); 2981 2982 switch (UHCI_XFER_TYPE(ept)) { 2983 case USB_EP_ATTR_ISOCH: 2984 if (((usb_isoc_req_t *)tw->tw_curr_xfer_reqp)->isoc_attributes 2985 & USB_ATTRS_SHORT_XFER_OK) { 2986 SetTD_spd(uhcip, current_td, 1); 2987 } 2988 break; 2989 case USB_EP_ATTR_BULK: 2990 if (((usb_bulk_req_t *)tw->tw_curr_xfer_reqp)->bulk_attributes 2991 & USB_ATTRS_SHORT_XFER_OK) { 2992 SetTD_spd(uhcip, current_td, 1); 2993 } 2994 break; 2995 } 2996 2997 mutex_enter(&ph->p_usba_device->usb_mutex); 2998 2999 SetTD_c_err(uhcip, current_td, UHCI_MAX_ERR_COUNT); 3000 SetTD_status(uhcip, current_td, UHCI_TD_ACTIVE); 3001 SetTD_ioc(uhcip, current_td, INTERRUPT_ON_COMPLETION); 3002 SetTD_mlen(uhcip, current_td, 3003 (length == 0) ? ZERO_LENGTH : (length - 1)); 3004 SetTD_dtogg(uhcip, current_td, pp->pp_data_toggle); 3005 SetTD_devaddr(uhcip, current_td, ph->p_usba_device->usb_addr); 3006 SetTD_endpt(uhcip, current_td, ph->p_ep.bEndpointAddress & 3007 END_POINT_ADDRESS_MASK); 3008 SetTD_PID(uhcip, current_td, tw->tw_direction); 3009 3010 /* Get the right buffer address for the current TD */ 3011 switch (UHCI_XFER_TYPE(ept)) { 3012 case USB_EP_ATTR_ISOCH: 3013 buf_addr = tw->tw_isoc_bufs[offset].cookie.dmac_address; 3014 break; 3015 case USB_EP_ATTR_BULK: 3016 buf_addr = uhci_get_tw_paddr_by_offs(uhcip, offset, 3017 length, tw); 3018 break; 3019 } 3020 SetTD32(uhcip, current_td->buffer_address, buf_addr); 3021 3022 /* 3023 * Adjust the data toggle. 3024 * The data toggle bit must always be 0 for isoc transfers. 3025 * And set the "iso" bit in the TD for isoc transfers. 3026 */ 3027 if (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_ISOCH) { 3028 pp->pp_data_toggle = 0; 3029 SetTD_iso(uhcip, current_td, 1); 3030 } else { 3031 ADJ_DATA_TOGGLE(pp); 3032 next_td->qh_td_prev = current_td; 3033 pp->pp_qh->td_tailp = next_td; 3034 } 3035 3036 current_td->outst_td_next = NULL; 3037 current_td->outst_td_prev = uhcip->uhci_outst_tds_tail; 3038 if (uhcip->uhci_outst_tds_head == NULL) { 3039 uhcip->uhci_outst_tds_head = current_td; 3040 } else { 3041 uhcip->uhci_outst_tds_tail->outst_td_next = current_td; 3042 } 3043 uhcip->uhci_outst_tds_tail = current_td; 3044 current_td->tw = tw; 3045 3046 if (tw->tw_hctd_head == NULL) { 3047 ASSERT(tw->tw_hctd_tail == NULL); 3048 tw->tw_hctd_head = current_td; 3049 tw->tw_hctd_tail = current_td; 3050 } else { 3051 /* Add the td to the end of the list */ 3052 tw->tw_hctd_tail->tw_td_next = current_td; 3053 tw->tw_hctd_tail = current_td; 3054 } 3055 3056 mutex_exit(&ph->p_usba_device->usb_mutex); 3057 } 3058 3059 3060 /* 3061 * uhci_alloc_bulk_isoc_tds: 3062 * - Allocates the isoc/bulk TD pools. It will allocate one whole 3063 * pool to store all the TDs if the system allows. Only when the 3064 * first allocation fails, it tries to allocate several small 3065 * pools with each pool limited in physical page size. 3066 */ 3067 static int 3068 uhci_alloc_bulk_isoc_tds( 3069 uhci_state_t *uhcip, 3070 uint_t num_tds, 3071 uhci_bulk_isoc_xfer_t *info) 3072 { 3073 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3074 "uhci_alloc_bulk_isoc_tds: num_tds: 0x%x info: 0x%p", 3075 num_tds, (void *)info); 3076 3077 info->num_pools = 1; 3078 /* allocate as a whole pool at the first time */ 3079 if (uhci_alloc_memory_for_tds(uhcip, num_tds, info) != 3080 USB_SUCCESS) { 3081 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3082 "alloc_memory_for_tds failed: num_tds %d num_pools %d", 3083 num_tds, info->num_pools); 3084 3085 /* reduce the td number per pool and alloc again */ 3086 info->num_pools = num_tds / UHCI_MAX_TD_NUM_PER_POOL; 3087 if (num_tds % UHCI_MAX_TD_NUM_PER_POOL) { 3088 info->num_pools++; 3089 } 3090 3091 if (uhci_alloc_memory_for_tds(uhcip, num_tds, info) != 3092 USB_SUCCESS) { 3093 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3094 "alloc_memory_for_tds failed: num_tds %d " 3095 "num_pools %d", num_tds, info->num_pools); 3096 3097 return (USB_NO_RESOURCES); 3098 } 3099 } 3100 3101 return (USB_SUCCESS); 3102 } 3103 3104 3105 /* 3106 * uhci_alloc_memory_for_tds: 3107 * - Allocates memory for the isoc/bulk td pools. 3108 */ 3109 static int 3110 uhci_alloc_memory_for_tds( 3111 uhci_state_t *uhcip, 3112 uint_t num_tds, 3113 uhci_bulk_isoc_xfer_t *info) 3114 { 3115 int result, i, j, err; 3116 size_t real_length; 3117 uint_t ccount, num; 3118 ddi_device_acc_attr_t dev_attr; 3119 uhci_bulk_isoc_td_pool_t *td_pool_ptr1, *td_pool_ptr2; 3120 3121 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3122 "uhci_alloc_memory_for_tds: num_tds: 0x%x info: 0x%p " 3123 "num_pools: %u", num_tds, (void *)info, info->num_pools); 3124 3125 /* The host controller will be little endian */ 3126 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 3127 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 3128 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 3129 3130 /* Allocate the TD pool structures */ 3131 if ((info->td_pools = kmem_zalloc( 3132 (sizeof (uhci_bulk_isoc_td_pool_t) * info->num_pools), 3133 KM_SLEEP)) == NULL) { 3134 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3135 "uhci_alloc_memory_for_tds: alloc td_pools failed"); 3136 3137 return (USB_FAILURE); 3138 } 3139 3140 for (i = 0; i < info->num_pools; i++) { 3141 if (info->num_pools == 1) { 3142 num = num_tds; 3143 } else if (i < (info->num_pools - 1)) { 3144 num = UHCI_MAX_TD_NUM_PER_POOL; 3145 } else { 3146 num = (num_tds % UHCI_MAX_TD_NUM_PER_POOL); 3147 } 3148 3149 td_pool_ptr1 = &info->td_pools[i]; 3150 3151 /* Allocate the bulk TD pool DMA handle */ 3152 if (ddi_dma_alloc_handle(uhcip->uhci_dip, 3153 &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 0, 3154 &td_pool_ptr1->dma_handle) != DDI_SUCCESS) { 3155 3156 for (j = 0; j < i; j++) { 3157 td_pool_ptr2 = &info->td_pools[j]; 3158 result = ddi_dma_unbind_handle( 3159 td_pool_ptr2->dma_handle); 3160 ASSERT(result == DDI_SUCCESS); 3161 ddi_dma_mem_free(&td_pool_ptr2->mem_handle); 3162 ddi_dma_free_handle(&td_pool_ptr2->dma_handle); 3163 } 3164 3165 kmem_free(info->td_pools, 3166 (sizeof (uhci_bulk_isoc_td_pool_t) * 3167 info->num_pools)); 3168 3169 return (USB_FAILURE); 3170 } 3171 3172 /* Allocate the memory for the bulk TD pool */ 3173 if (ddi_dma_mem_alloc(td_pool_ptr1->dma_handle, 3174 num * sizeof (uhci_td_t), &dev_attr, 3175 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0, 3176 &td_pool_ptr1->pool_addr, &real_length, 3177 &td_pool_ptr1->mem_handle) != DDI_SUCCESS) { 3178 3179 ddi_dma_free_handle(&td_pool_ptr1->dma_handle); 3180 3181 for (j = 0; j < i; j++) { 3182 td_pool_ptr2 = &info->td_pools[j]; 3183 result = ddi_dma_unbind_handle( 3184 td_pool_ptr2->dma_handle); 3185 ASSERT(result == DDI_SUCCESS); 3186 ddi_dma_mem_free(&td_pool_ptr2->mem_handle); 3187 ddi_dma_free_handle(&td_pool_ptr2->dma_handle); 3188 } 3189 3190 kmem_free(info->td_pools, 3191 (sizeof (uhci_bulk_isoc_td_pool_t) * 3192 info->num_pools)); 3193 3194 return (USB_FAILURE); 3195 } 3196 3197 /* Map the bulk TD pool into the I/O address space */ 3198 result = ddi_dma_addr_bind_handle(td_pool_ptr1->dma_handle, 3199 NULL, (caddr_t)td_pool_ptr1->pool_addr, real_length, 3200 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 3201 &td_pool_ptr1->cookie, &ccount); 3202 3203 /* Process the result */ 3204 err = USB_SUCCESS; 3205 3206 if (result != DDI_DMA_MAPPED) { 3207 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3208 "uhci_allocate_memory_for_tds: Result = %d", 3209 result); 3210 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, 3211 result); 3212 3213 err = USB_FAILURE; 3214 } 3215 3216 if ((result == DDI_DMA_MAPPED) && (ccount != 1)) { 3217 /* The cookie count should be 1 */ 3218 USB_DPRINTF_L2(PRINT_MASK_ATTA, 3219 uhcip->uhci_log_hdl, 3220 "uhci_allocate_memory_for_tds: " 3221 "More than 1 cookie"); 3222 3223 result = ddi_dma_unbind_handle( 3224 td_pool_ptr1->dma_handle); 3225 ASSERT(result == DDI_SUCCESS); 3226 3227 err = USB_FAILURE; 3228 } 3229 3230 if (err == USB_FAILURE) { 3231 3232 ddi_dma_mem_free(&td_pool_ptr1->mem_handle); 3233 ddi_dma_free_handle(&td_pool_ptr1->dma_handle); 3234 3235 for (j = 0; j < i; j++) { 3236 td_pool_ptr2 = &info->td_pools[j]; 3237 result = ddi_dma_unbind_handle( 3238 td_pool_ptr2->dma_handle); 3239 ASSERT(result == DDI_SUCCESS); 3240 ddi_dma_mem_free(&td_pool_ptr2->mem_handle); 3241 ddi_dma_free_handle(&td_pool_ptr2->dma_handle); 3242 } 3243 3244 kmem_free(info->td_pools, 3245 (sizeof (uhci_bulk_isoc_td_pool_t) * 3246 info->num_pools)); 3247 3248 return (USB_FAILURE); 3249 } 3250 3251 bzero((void *)td_pool_ptr1->pool_addr, 3252 num * sizeof (uhci_td_t)); 3253 td_pool_ptr1->num_tds = num; 3254 } 3255 3256 return (USB_SUCCESS); 3257 } 3258 3259 3260 /* 3261 * uhci_handle_bulk_td: 3262 * 3263 * Handles the completed bulk transfer descriptors 3264 */ 3265 void 3266 uhci_handle_bulk_td(uhci_state_t *uhcip, uhci_td_t *td) 3267 { 3268 uint_t num_bulk_tds, index, td_count, j; 3269 usb_cr_t error; 3270 uint_t length, bytes_xfered; 3271 ushort_t MaxPacketSize; 3272 uint32_t buf_offs, paddr; 3273 uhci_td_t *bulk_td_ptr, *current_dummy, *td_head; 3274 uhci_td_t *tmp_td; 3275 queue_head_t *qh, *next_qh; 3276 uhci_trans_wrapper_t *tw = td->tw; 3277 uhci_pipe_private_t *pp = tw->tw_pipe_private; 3278 uhci_bulk_isoc_xfer_t *bulk_xfer_info; 3279 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 3280 usba_pipe_handle_data_t *ph; 3281 3282 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3283 "uhci_handle_bulk_td: td = 0x%p tw = 0x%p", (void *)td, (void *)tw); 3284 3285 /* 3286 * Update the tw_bytes_pending, and tw_bytes_xfered 3287 */ 3288 bytes_xfered = ZERO_LENGTH; 3289 3290 /* 3291 * Check whether there are any errors occurred in the xfer. 3292 * If so, update the data_toggle for the queue head and 3293 * return error to the upper layer. 3294 */ 3295 if (GetTD_status(uhcip, td) & TD_STATUS_MASK) { 3296 uhci_handle_bulk_td_errors(uhcip, td); 3297 3298 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3299 "uhci_handle_bulk_td: error; data toggle: 0x%x", 3300 pp->pp_data_toggle); 3301 3302 return; 3303 } 3304 3305 /* 3306 * Update the tw_bytes_pending, and tw_bytes_xfered 3307 */ 3308 bytes_xfered = GetTD_alen(uhcip, td); 3309 if (bytes_xfered != ZERO_LENGTH) { 3310 tw->tw_bytes_pending -= (bytes_xfered + 1); 3311 tw->tw_bytes_xfered += (bytes_xfered + 1); 3312 } 3313 3314 /* 3315 * Get Bulk pipe information and pipe handle 3316 */ 3317 bulk_xfer_info = pp->pp_qh->bulk_xfer_info; 3318 ph = tw->tw_pipe_private->pp_pipe_handle; 3319 3320 /* 3321 * Check whether data underrun occurred. 3322 * If so, complete the transfer 3323 * Update the data toggle bit 3324 */ 3325 if (bytes_xfered != GetTD_mlen(uhcip, td)) { 3326 bulk_xfer_info->num_tds = 1; 3327 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3328 "uhci_handle_bulk_td: Data underrun occured"); 3329 3330 pp->pp_data_toggle = GetTD_dtogg(uhcip, td) == 0 ? 1 : 0; 3331 } 3332 3333 /* 3334 * If the TD's in the current frame are completed, then check 3335 * whether we have any more bytes to xfer. If so, insert TD's. 3336 * If no more bytes needs to be transferred, then do callback to the 3337 * upper layer. 3338 * If the TD's in the current frame are not completed, then 3339 * just delete the TD from the linked lists. 3340 */ 3341 USB_DPRINTF_L3(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3342 "uhci_handle_bulk_td: completed TD data toggle: 0x%x", 3343 GetTD_dtogg(uhcip, td)); 3344 3345 if (--bulk_xfer_info->num_tds == 0) { 3346 uhci_delete_td(uhcip, td); 3347 3348 if ((tw->tw_bytes_pending) && 3349 (GetTD_mlen(uhcip, td) - GetTD_alen(uhcip, td) == 0)) { 3350 3351 MaxPacketSize = pp->pp_pipe_handle->p_ep.wMaxPacketSize; 3352 length = MaxPacketSize; 3353 3354 qh = pp->pp_qh; 3355 paddr = GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK; 3356 if (GetQH32(uhcip, qh->link_ptr) != 3357 GetQH32(uhcip, 3358 uhcip->uhci_bulk_xfers_q_head->link_ptr)) { 3359 next_qh = QH_VADDR(paddr); 3360 SetQH32(uhcip, qh->prev_qh->link_ptr, 3361 paddr|(0x2)); 3362 next_qh->prev_qh = qh->prev_qh; 3363 SetQH32(uhcip, qh->link_ptr, 3364 GetQH32(uhcip, 3365 uhcip->uhci_bulk_xfers_q_head->link_ptr)); 3366 qh->prev_qh = uhcip->uhci_bulk_xfers_q_tail; 3367 SetQH32(uhcip, 3368 uhcip->uhci_bulk_xfers_q_tail->link_ptr, 3369 QH_PADDR(qh) | 0x2); 3370 uhcip->uhci_bulk_xfers_q_tail = qh; 3371 } 3372 3373 if ((tw->tw_bytes_pending / MaxPacketSize) >= 3374 MAX_NUM_BULK_TDS_PER_XFER) { 3375 num_bulk_tds = MAX_NUM_BULK_TDS_PER_XFER; 3376 } else { 3377 num_bulk_tds = 3378 (tw->tw_bytes_pending / MaxPacketSize); 3379 if (tw->tw_bytes_pending % MaxPacketSize) { 3380 num_bulk_tds++; 3381 length = (tw->tw_bytes_pending % 3382 MaxPacketSize); 3383 } 3384 } 3385 3386 current_dummy = pp->pp_qh->td_tailp; 3387 td_pool_ptr = &bulk_xfer_info->td_pools[0]; 3388 bulk_td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 3389 buf_offs = tw->tw_bytes_xfered; 3390 td_count = num_bulk_tds; 3391 index = 0; 3392 3393 /* reuse the TDs to transfer more data */ 3394 while (td_count > 0) { 3395 for (j = 0; 3396 (j < (td_pool_ptr->num_tds - 1)) && 3397 (td_count > 1); j++, td_count--) { 3398 uhci_fill_in_bulk_isoc_td(uhcip, 3399 &bulk_td_ptr[j], &bulk_td_ptr[j+1], 3400 BULKTD_PADDR(td_pool_ptr, 3401 &bulk_td_ptr[j+1]), ph, buf_offs, 3402 MaxPacketSize, tw); 3403 buf_offs += MaxPacketSize; 3404 } 3405 3406 if (td_count == 1) { 3407 uhci_fill_in_bulk_isoc_td(uhcip, 3408 &bulk_td_ptr[j], current_dummy, 3409 TD_PADDR(current_dummy), ph, 3410 buf_offs, length, tw); 3411 3412 break; 3413 } else { 3414 tmp_td = &bulk_td_ptr[j]; 3415 ASSERT(index < 3416 (bulk_xfer_info->num_pools - 1)); 3417 td_pool_ptr = &bulk_xfer_info-> 3418 td_pools[index + 1]; 3419 bulk_td_ptr = (uhci_td_t *) 3420 td_pool_ptr->pool_addr; 3421 uhci_fill_in_bulk_isoc_td(uhcip, 3422 tmp_td, &bulk_td_ptr[0], 3423 BULKTD_PADDR(td_pool_ptr, 3424 &bulk_td_ptr[0]), ph, buf_offs, 3425 MaxPacketSize, tw); 3426 buf_offs += MaxPacketSize; 3427 td_count--; 3428 index++; 3429 } 3430 } 3431 3432 pp->pp_qh->bulk_xfer_info = bulk_xfer_info; 3433 bulk_xfer_info->num_tds = num_bulk_tds; 3434 SetQH32(uhcip, pp->pp_qh->element_ptr, 3435 bulk_xfer_info->td_pools[0].cookie.dmac_address); 3436 } else { 3437 usba_pipe_handle_data_t *usb_pp = pp->pp_pipe_handle; 3438 3439 pp->pp_qh->bulk_xfer_info = NULL; 3440 3441 if (tw->tw_bytes_pending) { 3442 /* Update the element pointer */ 3443 SetQH32(uhcip, pp->pp_qh->element_ptr, 3444 TD_PADDR(pp->pp_qh->td_tailp)); 3445 3446 /* Remove all the tds */ 3447 td_head = tw->tw_hctd_head; 3448 while (td_head != NULL) { 3449 uhci_delete_td(uhcip, td_head); 3450 td_head = tw->tw_hctd_head; 3451 } 3452 } 3453 3454 if (tw->tw_direction == PID_IN) { 3455 usb_req_attrs_t attrs = ((usb_bulk_req_t *) 3456 tw->tw_curr_xfer_reqp)->bulk_attributes; 3457 3458 error = USB_CR_OK; 3459 3460 /* Data run occurred */ 3461 if (tw->tw_bytes_pending && 3462 (!(attrs & USB_ATTRS_SHORT_XFER_OK))) { 3463 error = USB_CR_DATA_UNDERRUN; 3464 } 3465 3466 uhci_sendup_td_message(uhcip, error, tw); 3467 } else { 3468 uhci_do_byte_stats(uhcip, tw->tw_length, 3469 usb_pp->p_ep.bmAttributes, 3470 usb_pp->p_ep.bEndpointAddress); 3471 3472 /* Data underrun occurred */ 3473 if (tw->tw_bytes_pending) { 3474 3475 tw->tw_data->b_rptr += 3476 tw->tw_bytes_xfered; 3477 3478 USB_DPRINTF_L2(PRINT_MASK_ATTA, 3479 uhcip->uhci_log_hdl, 3480 "uhci_handle_bulk_td: " 3481 "data underrun occurred"); 3482 3483 uhci_hcdi_callback(uhcip, pp, 3484 tw->tw_pipe_private->pp_pipe_handle, 3485 tw, USB_CR_DATA_UNDERRUN); 3486 } else { 3487 uhci_hcdi_callback(uhcip, pp, 3488 tw->tw_pipe_private->pp_pipe_handle, 3489 tw, USB_CR_OK); 3490 } 3491 } /* direction */ 3492 3493 /* Deallocate DMA memory */ 3494 uhci_deallocate_tw(uhcip, pp, tw); 3495 for (j = 0; j < bulk_xfer_info->num_pools; j++) { 3496 td_pool_ptr = &bulk_xfer_info->td_pools[j]; 3497 (void) ddi_dma_unbind_handle( 3498 td_pool_ptr->dma_handle); 3499 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 3500 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 3501 } 3502 kmem_free(bulk_xfer_info->td_pools, 3503 (sizeof (uhci_bulk_isoc_td_pool_t) * 3504 bulk_xfer_info->num_pools)); 3505 kmem_free(bulk_xfer_info, 3506 sizeof (uhci_bulk_isoc_xfer_t)); 3507 3508 /* 3509 * When there are no pending bulk commands, point the 3510 * end of the lattice tree to NULL. This will make sure 3511 * that the HC control does not loop anymore and PCI 3512 * bus is not affected. 3513 */ 3514 if (--uhcip->uhci_pending_bulk_cmds == 0) { 3515 uhcip->uhci_bulk_xfers_q_tail->link_ptr = 3516 HC_END_OF_LIST; 3517 USB_DPRINTF_L3(PRINT_MASK_ATTA, 3518 uhcip->uhci_log_hdl, 3519 "uhci_handle_bulk_td: count = %d", 3520 uhcip->uhci_pending_bulk_cmds); 3521 } 3522 } 3523 } else { 3524 uhci_delete_td(uhcip, td); 3525 } 3526 } 3527 3528 3529 void 3530 uhci_handle_bulk_td_errors(uhci_state_t *uhcip, uhci_td_t *td) 3531 { 3532 usb_cr_t usb_err; 3533 uint32_t paddr_tail, element_ptr, paddr; 3534 uhci_td_t *next_td; 3535 uhci_pipe_private_t *pp; 3536 uhci_trans_wrapper_t *tw = td->tw; 3537 usba_pipe_handle_data_t *ph; 3538 uhci_bulk_isoc_td_pool_t *td_pool_ptr = NULL; 3539 3540 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3541 "uhci_handle_bulk_td_errors: td = %p", (void *)td); 3542 3543 #ifdef DEBUG 3544 uhci_print_td(uhcip, td); 3545 #endif 3546 3547 tw = td->tw; 3548 ph = tw->tw_pipe_private->pp_pipe_handle; 3549 pp = (uhci_pipe_private_t *)ph->p_hcd_private; 3550 3551 /* 3552 * Find the type of error occurred and return the error 3553 * to the upper layer. And adjust the data toggle. 3554 */ 3555 element_ptr = GetQH32(uhcip, pp->pp_qh->element_ptr) & 3556 QH_ELEMENT_PTR_MASK; 3557 paddr_tail = TD_PADDR(pp->pp_qh->td_tailp); 3558 3559 /* 3560 * If a timeout occurs before a transfer has completed, 3561 * the timeout handler sets the CRC/Timeout bit and clears the Active 3562 * bit in the link_ptr for each td in the transfer. 3563 * It then waits (at least) 1 ms so that any tds the controller might 3564 * have been executing will have completed. 3565 * So at this point element_ptr will point to either: 3566 * 1) the next td for the transfer (which has not been executed, 3567 * and has the CRC/Timeout status bit set and Active bit cleared), 3568 * 2) the dummy td for this qh. 3569 * So if the element_ptr does not point to the dummy td, we know 3570 * it points to the next td that would have been executed. 3571 * That td has the data toggle we want to save. 3572 * All outstanding tds have been marked as CRC/Timeout, 3573 * so it doesn't matter which td we pass to uhci_parse_td_error 3574 * for the error status. 3575 */ 3576 if (element_ptr != paddr_tail) { 3577 paddr = (element_ptr & QH_ELEMENT_PTR_MASK); 3578 uhci_get_bulk_td_by_paddr(uhcip, pp->pp_qh->bulk_xfer_info, 3579 paddr, &td_pool_ptr); 3580 next_td = BULKTD_VADDR(td_pool_ptr, paddr); 3581 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3582 "uhci_handle_bulk_td_errors: next td = %p", 3583 (void *)next_td); 3584 3585 usb_err = uhci_parse_td_error(uhcip, pp, next_td); 3586 } else { 3587 usb_err = uhci_parse_td_error(uhcip, pp, td); 3588 } 3589 3590 /* 3591 * Update the link pointer. 3592 */ 3593 SetQH32(uhcip, pp->pp_qh->element_ptr, TD_PADDR(pp->pp_qh->td_tailp)); 3594 3595 /* 3596 * Send up number of bytes transferred before the error condition. 3597 */ 3598 if ((tw->tw_direction == PID_OUT) && tw->tw_data) { 3599 tw->tw_data->b_rptr += tw->tw_bytes_xfered; 3600 } 3601 3602 uhci_remove_bulk_tds_tws(uhcip, tw->tw_pipe_private, UHCI_IN_ERROR); 3603 3604 /* 3605 * When there are no pending bulk commands, point the end of the 3606 * lattice tree to NULL. This will make sure that the HC control 3607 * does not loop anymore and PCI bus is not affected. 3608 */ 3609 if (--uhcip->uhci_pending_bulk_cmds == 0) { 3610 uhcip->uhci_bulk_xfers_q_tail->link_ptr = HC_END_OF_LIST; 3611 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3612 "uhci_handle_bulk_td_errors: count = %d", 3613 uhcip->uhci_pending_bulk_cmds); 3614 } 3615 3616 uhci_hcdi_callback(uhcip, pp, ph, tw, usb_err); 3617 uhci_deallocate_tw(uhcip, pp, tw); 3618 } 3619 3620 3621 /* 3622 * uhci_get_bulk_td_by_paddr: 3623 * Obtain the address of the TD pool the physical address falls in. 3624 * 3625 * td_pool_pp - pointer to the address of the TD pool containing the paddr 3626 */ 3627 /* ARGSUSED */ 3628 static void 3629 uhci_get_bulk_td_by_paddr( 3630 uhci_state_t *uhcip, 3631 uhci_bulk_isoc_xfer_t *info, 3632 uint32_t paddr, 3633 uhci_bulk_isoc_td_pool_t **td_pool_pp) 3634 { 3635 uint_t i = 0; 3636 3637 while (i < info->num_pools) { 3638 *td_pool_pp = &info->td_pools[i]; 3639 if (((*td_pool_pp)->cookie.dmac_address <= paddr) && 3640 (((*td_pool_pp)->cookie.dmac_address + 3641 (*td_pool_pp)->cookie.dmac_size) > paddr)) { 3642 3643 break; 3644 } 3645 i++; 3646 } 3647 3648 ASSERT(i < info->num_pools); 3649 } 3650 3651 3652 void 3653 uhci_remove_bulk_tds_tws( 3654 uhci_state_t *uhcip, 3655 uhci_pipe_private_t *pp, 3656 int what) 3657 { 3658 uint_t rval, i; 3659 uhci_td_t *head; 3660 uhci_td_t *head_next; 3661 usb_opaque_t curr_reqp; 3662 uhci_bulk_isoc_xfer_t *info; 3663 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 3664 3665 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 3666 3667 if ((info = pp->pp_qh->bulk_xfer_info) == NULL) { 3668 3669 return; 3670 } 3671 3672 head = uhcip->uhci_outst_tds_head; 3673 3674 while (head) { 3675 uhci_trans_wrapper_t *tw_tmp = head->tw; 3676 head_next = head->outst_td_next; 3677 3678 if (pp->pp_qh == tw_tmp->tw_pipe_private->pp_qh) { 3679 curr_reqp = tw_tmp->tw_curr_xfer_reqp; 3680 if (curr_reqp && 3681 ((what == UHCI_IN_CLOSE) || 3682 (what == UHCI_IN_RESET))) { 3683 uhci_hcdi_callback(uhcip, pp, 3684 pp->pp_pipe_handle, 3685 tw_tmp, USB_CR_FLUSHED); 3686 } /* end of curr_reqp */ 3687 3688 uhci_delete_td(uhcip, head); 3689 3690 if (what == UHCI_IN_CLOSE || what == UHCI_IN_RESET) { 3691 ASSERT(info->num_tds > 0); 3692 if (--info->num_tds == 0) { 3693 uhci_deallocate_tw(uhcip, pp, tw_tmp); 3694 3695 /* 3696 * This will make sure that the HC 3697 * does not loop anymore when there 3698 * are no pending bulk commands. 3699 */ 3700 if (--uhcip->uhci_pending_bulk_cmds 3701 == 0) { 3702 uhcip->uhci_bulk_xfers_q_tail-> 3703 link_ptr = HC_END_OF_LIST; 3704 USB_DPRINTF_L3(PRINT_MASK_ATTA, 3705 uhcip->uhci_log_hdl, 3706 "uhci_remove_bulk_tds_tws:" 3707 " count = %d", 3708 uhcip-> 3709 uhci_pending_bulk_cmds); 3710 } 3711 } 3712 } 3713 } 3714 3715 head = head_next; 3716 } 3717 3718 if (what == UHCI_IN_CLOSE || what == UHCI_IN_RESET) { 3719 ASSERT(info->num_tds == 0); 3720 } 3721 3722 for (i = 0; i < info->num_pools; i++) { 3723 td_pool_ptr = &info->td_pools[i]; 3724 rval = ddi_dma_unbind_handle(td_pool_ptr->dma_handle); 3725 ASSERT(rval == DDI_SUCCESS); 3726 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 3727 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 3728 } 3729 kmem_free(info->td_pools, (sizeof (uhci_bulk_isoc_td_pool_t) * 3730 info->num_pools)); 3731 kmem_free(info, sizeof (uhci_bulk_isoc_xfer_t)); 3732 pp->pp_qh->bulk_xfer_info = NULL; 3733 } 3734 3735 3736 /* 3737 * uhci_save_data_toggle () 3738 * Save the data toggle in the usba_device structure 3739 */ 3740 void 3741 uhci_save_data_toggle(uhci_pipe_private_t *pp) 3742 { 3743 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 3744 3745 /* Save the data toggle in the usb devices structure. */ 3746 mutex_enter(&ph->p_mutex); 3747 usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress, 3748 pp->pp_data_toggle); 3749 mutex_exit(&ph->p_mutex); 3750 } 3751 3752 /* 3753 * uhci_create_isoc_transfer_wrapper: 3754 * Create a Transaction Wrapper (TW) for isoc transfer. 3755 * This involves the allocating of DMA resources. 3756 * 3757 * For isoc transfers, one isoc transfer includes multiple packets 3758 * and each packet may have a different length. So each packet is 3759 * transfered by one TD. We only know the individual packet length 3760 * won't exceed 1023 bytes, but we don't know exactly the lengths. 3761 * It is hard to make one physically discontiguous DMA buffer which 3762 * can fit in all the TDs like what can be done to the ctrl/bulk/ 3763 * intr transfers. It is also undesirable to make one physically 3764 * contiguous DMA buffer for all the packets, since this may easily 3765 * fail when the system is in low memory. So an individual DMA 3766 * buffer is allocated for an individual isoc packet and each DMA 3767 * buffer is physically contiguous. An extra structure is allocated 3768 * to save the multiple DMA handles. 3769 */ 3770 static uhci_trans_wrapper_t * 3771 uhci_create_isoc_transfer_wrapper( 3772 uhci_state_t *uhcip, 3773 uhci_pipe_private_t *pp, 3774 usb_isoc_req_t *req, 3775 size_t length, 3776 usb_flags_t usb_flags) 3777 { 3778 int result; 3779 size_t real_length, strtlen, xfer_size; 3780 uhci_trans_wrapper_t *tw; 3781 ddi_device_acc_attr_t dev_attr; 3782 ddi_dma_attr_t dma_attr; 3783 int kmem_flag; 3784 int (*dmamem_wait)(caddr_t); 3785 uint_t i, j, ccount; 3786 usb_isoc_req_t *tmp_req = req; 3787 3788 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 3789 3790 if (UHCI_XFER_TYPE(&pp->pp_pipe_handle->p_ep) != USB_EP_ATTR_ISOCH) { 3791 3792 return (NULL); 3793 } 3794 3795 if ((req == NULL) && (UHCI_XFER_DIR(&pp->pp_pipe_handle->p_ep) == 3796 USB_EP_DIR_IN)) { 3797 tmp_req = (usb_isoc_req_t *)pp->pp_client_periodic_in_reqp; 3798 } 3799 3800 if (tmp_req == NULL) { 3801 3802 return (NULL); 3803 } 3804 3805 3806 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3807 "uhci_create_isoc_transfer_wrapper: length = 0x%lx flags = 0x%x", 3808 length, usb_flags); 3809 3810 /* SLEEP flag should not be used in interrupt context */ 3811 if (servicing_interrupt()) { 3812 kmem_flag = KM_NOSLEEP; 3813 dmamem_wait = DDI_DMA_DONTWAIT; 3814 } else { 3815 kmem_flag = KM_SLEEP; 3816 dmamem_wait = DDI_DMA_SLEEP; 3817 } 3818 3819 /* Allocate space for the transfer wrapper */ 3820 if ((tw = kmem_zalloc(sizeof (uhci_trans_wrapper_t), kmem_flag)) == 3821 NULL) { 3822 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3823 "uhci_create_isoc_transfer_wrapper: kmem_alloc failed"); 3824 3825 return (NULL); 3826 } 3827 3828 /* Allocate space for the isoc buffer handles */ 3829 strtlen = sizeof (uhci_isoc_buf_t) * tmp_req->isoc_pkts_count; 3830 if ((tw->tw_isoc_bufs = kmem_zalloc(strtlen, kmem_flag)) == NULL) { 3831 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3832 "uhci_create_isoc_transfer_wrapper: kmem_alloc " 3833 "isoc buffer failed"); 3834 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 3835 3836 return (NULL); 3837 } 3838 3839 bcopy(&uhcip->uhci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t)); 3840 dma_attr.dma_attr_sgllen = 1; 3841 3842 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 3843 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 3844 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 3845 3846 /* Store the transfer length */ 3847 tw->tw_length = length; 3848 3849 for (i = 0; i < tmp_req->isoc_pkts_count; i++) { 3850 tw->tw_isoc_bufs[i].index = i; 3851 3852 /* Allocate the DMA handle */ 3853 if ((result = ddi_dma_alloc_handle(uhcip->uhci_dip, &dma_attr, 3854 dmamem_wait, 0, &tw->tw_isoc_bufs[i].dma_handle)) != 3855 DDI_SUCCESS) { 3856 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3857 "uhci_create_isoc_transfer_wrapper: " 3858 "Alloc handle %d failed", i); 3859 3860 for (j = 0; j < i; j++) { 3861 result = ddi_dma_unbind_handle( 3862 tw->tw_isoc_bufs[j].dma_handle); 3863 ASSERT(result == USB_SUCCESS); 3864 ddi_dma_mem_free(&tw->tw_isoc_bufs[j]. 3865 mem_handle); 3866 ddi_dma_free_handle(&tw->tw_isoc_bufs[j]. 3867 dma_handle); 3868 } 3869 kmem_free(tw->tw_isoc_bufs, strtlen); 3870 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 3871 3872 return (NULL); 3873 } 3874 3875 /* Allocate the memory */ 3876 xfer_size = tmp_req->isoc_pkt_descr[i].isoc_pkt_length; 3877 if ((result = ddi_dma_mem_alloc(tw->tw_isoc_bufs[i].dma_handle, 3878 xfer_size, &dev_attr, DDI_DMA_CONSISTENT, dmamem_wait, 3879 NULL, (caddr_t *)&tw->tw_isoc_bufs[i].buf_addr, 3880 &real_length, &tw->tw_isoc_bufs[i].mem_handle)) != 3881 DDI_SUCCESS) { 3882 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3883 "uhci_create_isoc_transfer_wrapper: " 3884 "dma_mem_alloc %d fail", i); 3885 ddi_dma_free_handle(&tw->tw_isoc_bufs[i].dma_handle); 3886 3887 for (j = 0; j < i; j++) { 3888 result = ddi_dma_unbind_handle( 3889 tw->tw_isoc_bufs[j].dma_handle); 3890 ASSERT(result == USB_SUCCESS); 3891 ddi_dma_mem_free(&tw->tw_isoc_bufs[j]. 3892 mem_handle); 3893 ddi_dma_free_handle(&tw->tw_isoc_bufs[j]. 3894 dma_handle); 3895 } 3896 kmem_free(tw->tw_isoc_bufs, strtlen); 3897 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 3898 3899 return (NULL); 3900 } 3901 3902 ASSERT(real_length >= xfer_size); 3903 3904 /* Bind the handle */ 3905 result = ddi_dma_addr_bind_handle( 3906 tw->tw_isoc_bufs[i].dma_handle, NULL, 3907 (caddr_t)tw->tw_isoc_bufs[i].buf_addr, real_length, 3908 DDI_DMA_RDWR|DDI_DMA_CONSISTENT, dmamem_wait, NULL, 3909 &tw->tw_isoc_bufs[i].cookie, &ccount); 3910 3911 if ((result == DDI_DMA_MAPPED) && (ccount == 1)) { 3912 tw->tw_isoc_bufs[i].length = xfer_size; 3913 3914 continue; 3915 } else { 3916 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3917 "uhci_create_isoc_transfer_wrapper: " 3918 "Bind handle %d failed", i); 3919 if (result == DDI_DMA_MAPPED) { 3920 result = ddi_dma_unbind_handle( 3921 tw->tw_isoc_bufs[i].dma_handle); 3922 ASSERT(result == USB_SUCCESS); 3923 } 3924 ddi_dma_mem_free(&tw->tw_isoc_bufs[i].mem_handle); 3925 ddi_dma_free_handle(&tw->tw_isoc_bufs[i].dma_handle); 3926 3927 for (j = 0; j < i; j++) { 3928 result = ddi_dma_unbind_handle( 3929 tw->tw_isoc_bufs[j].dma_handle); 3930 ASSERT(result == USB_SUCCESS); 3931 ddi_dma_mem_free(&tw->tw_isoc_bufs[j]. 3932 mem_handle); 3933 ddi_dma_free_handle(&tw->tw_isoc_bufs[j]. 3934 dma_handle); 3935 } 3936 kmem_free(tw->tw_isoc_bufs, strtlen); 3937 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 3938 3939 return (NULL); 3940 } 3941 } 3942 3943 tw->tw_ncookies = tmp_req->isoc_pkts_count; 3944 tw->tw_isoc_strtlen = strtlen; 3945 3946 /* 3947 * Only allow one wrapper to be added at a time. Insert the 3948 * new transaction wrapper into the list for this pipe. 3949 */ 3950 if (pp->pp_tw_head == NULL) { 3951 pp->pp_tw_head = tw; 3952 pp->pp_tw_tail = tw; 3953 } else { 3954 pp->pp_tw_tail->tw_next = tw; 3955 pp->pp_tw_tail = tw; 3956 ASSERT(tw->tw_next == NULL); 3957 } 3958 3959 /* Store a back pointer to the pipe private structure */ 3960 tw->tw_pipe_private = pp; 3961 3962 /* Store the transfer type - synchronous or asynchronous */ 3963 tw->tw_flags = usb_flags; 3964 3965 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3966 "uhci_create_isoc_transfer_wrapper: tw = 0x%p, ncookies = %u", 3967 (void *)tw, tw->tw_ncookies); 3968 3969 return (tw); 3970 } 3971 3972 /* 3973 * uhci_insert_isoc_td: 3974 * - Create transfer wrapper 3975 * - Allocate memory for the isoc td's 3976 * - Fill up all the TD's and submit to the HC 3977 * - Update all the linked lists 3978 */ 3979 int 3980 uhci_insert_isoc_td( 3981 uhci_state_t *uhcip, 3982 usba_pipe_handle_data_t *ph, 3983 usb_isoc_req_t *isoc_req, 3984 size_t length, 3985 usb_flags_t flags) 3986 { 3987 int rval = USB_SUCCESS; 3988 int error; 3989 uint_t ddic; 3990 uint32_t i, j, index; 3991 uint32_t bytes_to_xfer; 3992 uint32_t expired_frames = 0; 3993 usb_frame_number_t start_frame, end_frame, current_frame; 3994 uhci_td_t *td_ptr; 3995 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 3996 uhci_trans_wrapper_t *tw; 3997 uhci_bulk_isoc_xfer_t *isoc_xfer_info; 3998 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 3999 4000 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4001 "uhci_insert_isoc_td: ph = 0x%p isoc req = %p length = %lu", 4002 (void *)ph, (void *)isoc_req, length); 4003 4004 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4005 4006 /* Allocate a transfer wrapper */ 4007 if ((tw = uhci_create_isoc_transfer_wrapper(uhcip, pp, isoc_req, 4008 length, flags)) == NULL) { 4009 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4010 "uhci_insert_isoc_td: TW allocation failed"); 4011 4012 return (USB_NO_RESOURCES); 4013 } 4014 4015 /* Save current isochronous request pointer */ 4016 tw->tw_curr_xfer_reqp = (usb_opaque_t)isoc_req; 4017 4018 /* 4019 * Initialize the transfer wrapper. These values are useful 4020 * for sending back the reply. 4021 */ 4022 tw->tw_handle_td = uhci_handle_isoc_td; 4023 tw->tw_handle_callback_value = NULL; 4024 tw->tw_direction = (UHCI_XFER_DIR(&ph->p_ep) == USB_EP_DIR_OUT) ? 4025 PID_OUT : PID_IN; 4026 4027 /* 4028 * If the transfer isoc send, then copy the data from the request 4029 * to the transfer wrapper. 4030 */ 4031 if ((tw->tw_direction == PID_OUT) && length) { 4032 uchar_t *p; 4033 4034 ASSERT(isoc_req->isoc_data != NULL); 4035 p = isoc_req->isoc_data->b_rptr; 4036 4037 /* Copy the data into the message */ 4038 for (i = 0; i < isoc_req->isoc_pkts_count; i++) { 4039 ddi_rep_put8(tw->tw_isoc_bufs[i].mem_handle, 4040 p, (uint8_t *)tw->tw_isoc_bufs[i].buf_addr, 4041 isoc_req->isoc_pkt_descr[i].isoc_pkt_length, 4042 DDI_DEV_AUTOINCR); 4043 p += isoc_req->isoc_pkt_descr[i].isoc_pkt_length; 4044 } 4045 } 4046 4047 if (tw->tw_direction == PID_IN) { 4048 if ((rval = uhci_allocate_periodic_in_resource(uhcip, pp, tw, 4049 flags)) != USB_SUCCESS) { 4050 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4051 "uhci_insert_isoc_td: isoc_req_t alloc failed"); 4052 uhci_deallocate_tw(uhcip, pp, tw); 4053 4054 return (rval); 4055 } 4056 4057 isoc_req = (usb_isoc_req_t *)tw->tw_curr_xfer_reqp; 4058 } 4059 4060 tw->tw_isoc_req = (usb_isoc_req_t *)tw->tw_curr_xfer_reqp; 4061 4062 /* Get the pointer to the isoc_xfer_info structure */ 4063 isoc_xfer_info = (uhci_bulk_isoc_xfer_t *)&tw->tw_xfer_info; 4064 isoc_xfer_info->num_tds = isoc_req->isoc_pkts_count; 4065 4066 /* 4067 * Allocate memory for isoc tds 4068 */ 4069 if ((rval = uhci_alloc_bulk_isoc_tds(uhcip, isoc_req->isoc_pkts_count, 4070 isoc_xfer_info)) != USB_SUCCESS) { 4071 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4072 "uhci_alloc_bulk_isoc_td: Memory allocation failure"); 4073 4074 if (tw->tw_direction == PID_IN) { 4075 uhci_deallocate_periodic_in_resource(uhcip, pp, tw); 4076 } 4077 uhci_deallocate_tw(uhcip, pp, tw); 4078 4079 return (rval); 4080 } 4081 4082 /* 4083 * Get the isoc td pool address, buffer address and 4084 * max packet size that the device supports. 4085 */ 4086 td_pool_ptr = &isoc_xfer_info->td_pools[0]; 4087 td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 4088 index = 0; 4089 4090 /* 4091 * Fill up the isoc tds 4092 */ 4093 USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4094 "uhci_insert_isoc_td : isoc pkts %d", isoc_req->isoc_pkts_count); 4095 4096 for (i = 0; i < isoc_xfer_info->num_pools; i++) { 4097 for (j = 0; j < td_pool_ptr->num_tds; j++) { 4098 bytes_to_xfer = 4099 isoc_req->isoc_pkt_descr[index].isoc_pkt_length; 4100 4101 uhci_fill_in_bulk_isoc_td(uhcip, &td_ptr[j], 4102 (uhci_td_t *)NULL, HC_END_OF_LIST, ph, index, 4103 bytes_to_xfer, tw); 4104 td_ptr[j].isoc_pkt_index = index; 4105 index++; 4106 } 4107 4108 if (i < (isoc_xfer_info->num_pools - 1)) { 4109 td_pool_ptr = &isoc_xfer_info->td_pools[i + 1]; 4110 td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 4111 } 4112 } 4113 4114 /* 4115 * Get the starting frame number. 4116 * The client drivers sets the flag USB_ATTRS_ISOC_XFER_ASAP to inform 4117 * the HCD to care of starting frame number. 4118 * 4119 * Following code is very time critical. So, perform atomic execution. 4120 */ 4121 ddic = ddi_enter_critical(); 4122 current_frame = uhci_get_sw_frame_number(uhcip); 4123 4124 if (isoc_req->isoc_attributes & USB_ATTRS_ISOC_START_FRAME) { 4125 start_frame = isoc_req->isoc_frame_no; 4126 end_frame = start_frame + isoc_req->isoc_pkts_count; 4127 4128 /* Check available frames */ 4129 if ((end_frame - current_frame) < UHCI_MAX_ISOC_FRAMES) { 4130 if (current_frame > start_frame) { 4131 if ((current_frame + FRNUM_OFFSET) < 4132 end_frame) { 4133 expired_frames = current_frame + 4134 FRNUM_OFFSET - start_frame; 4135 start_frame = current_frame + 4136 FRNUM_OFFSET; 4137 } else { 4138 rval = USB_INVALID_START_FRAME; 4139 } 4140 } 4141 } else { 4142 rval = USB_INVALID_START_FRAME; 4143 } 4144 4145 } else if (isoc_req->isoc_attributes & USB_ATTRS_ISOC_XFER_ASAP) { 4146 start_frame = pp->pp_frame_num; 4147 4148 if (start_frame == INVALID_FRNUM) { 4149 start_frame = current_frame + FRNUM_OFFSET; 4150 } else if (current_frame > start_frame) { 4151 start_frame = current_frame + FRNUM_OFFSET; 4152 } 4153 4154 end_frame = start_frame + isoc_req->isoc_pkts_count; 4155 isoc_req->isoc_frame_no = start_frame; 4156 4157 } 4158 4159 if (rval != USB_SUCCESS) { 4160 4161 /* Exit the critical */ 4162 ddi_exit_critical(ddic); 4163 4164 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4165 "uhci_insert_isoc_td: Invalid starting frame number"); 4166 4167 if (tw->tw_direction == PID_IN) { 4168 uhci_deallocate_periodic_in_resource(uhcip, pp, tw); 4169 } 4170 4171 while (tw->tw_hctd_head) { 4172 uhci_delete_td(uhcip, tw->tw_hctd_head); 4173 } 4174 4175 for (i = 0; i < isoc_xfer_info->num_pools; i++) { 4176 td_pool_ptr = &isoc_xfer_info->td_pools[i]; 4177 error = ddi_dma_unbind_handle(td_pool_ptr->dma_handle); 4178 ASSERT(error == DDI_SUCCESS); 4179 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 4180 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 4181 } 4182 kmem_free(isoc_xfer_info->td_pools, 4183 (sizeof (uhci_bulk_isoc_td_pool_t) * 4184 isoc_xfer_info->num_pools)); 4185 4186 uhci_deallocate_tw(uhcip, pp, tw); 4187 4188 return (rval); 4189 } 4190 4191 for (i = 0; i < expired_frames; i++) { 4192 isoc_req->isoc_pkt_descr[i].isoc_pkt_status = 4193 USB_CR_NOT_ACCESSED; 4194 isoc_req->isoc_pkt_descr[i].isoc_pkt_actual_length = 4195 isoc_req->isoc_pkt_descr[i].isoc_pkt_length; 4196 uhci_get_isoc_td_by_index(uhcip, isoc_xfer_info, i, 4197 &td_ptr, &td_pool_ptr); 4198 uhci_delete_td(uhcip, td_ptr); 4199 --isoc_xfer_info->num_tds; 4200 } 4201 4202 /* 4203 * Add the TD's to the HC list 4204 */ 4205 start_frame = (start_frame & 0x3ff); 4206 for (; i < isoc_req->isoc_pkts_count; i++) { 4207 uhci_get_isoc_td_by_index(uhcip, isoc_xfer_info, i, 4208 &td_ptr, &td_pool_ptr); 4209 if (uhcip->uhci_isoc_q_tailp[start_frame]) { 4210 td_ptr->isoc_prev = 4211 uhcip->uhci_isoc_q_tailp[start_frame]; 4212 td_ptr->isoc_next = NULL; 4213 td_ptr->link_ptr = 4214 uhcip->uhci_isoc_q_tailp[start_frame]->link_ptr; 4215 uhcip->uhci_isoc_q_tailp[start_frame]->isoc_next = 4216 td_ptr; 4217 SetTD32(uhcip, 4218 uhcip->uhci_isoc_q_tailp[start_frame]->link_ptr, 4219 ISOCTD_PADDR(td_pool_ptr, td_ptr)); 4220 uhcip->uhci_isoc_q_tailp[start_frame] = td_ptr; 4221 } else { 4222 uhcip->uhci_isoc_q_tailp[start_frame] = td_ptr; 4223 td_ptr->isoc_next = NULL; 4224 td_ptr->isoc_prev = NULL; 4225 SetTD32(uhcip, td_ptr->link_ptr, 4226 GetFL32(uhcip, 4227 uhcip->uhci_frame_lst_tablep[start_frame])); 4228 SetFL32(uhcip, 4229 uhcip->uhci_frame_lst_tablep[start_frame], 4230 ISOCTD_PADDR(td_pool_ptr, td_ptr)); 4231 } 4232 td_ptr->starting_frame = start_frame; 4233 4234 if (++start_frame == NUM_FRAME_LST_ENTRIES) 4235 start_frame = 0; 4236 } 4237 4238 ddi_exit_critical(ddic); 4239 pp->pp_frame_num = end_frame; 4240 4241 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4242 "uhci_insert_isoc_td: current frame number 0x%llx, pipe frame num" 4243 " 0x%llx", (unsigned long long)current_frame, 4244 (unsigned long long)(pp->pp_frame_num)); 4245 4246 return (rval); 4247 } 4248 4249 4250 /* 4251 * uhci_get_isoc_td_by_index: 4252 * Obtain the addresses of the TD pool and the TD at the index. 4253 * 4254 * tdpp - pointer to the address of the TD at the isoc packet index 4255 * td_pool_pp - pointer to the address of the TD pool containing 4256 * the specified TD 4257 */ 4258 /* ARGSUSED */ 4259 static void 4260 uhci_get_isoc_td_by_index( 4261 uhci_state_t *uhcip, 4262 uhci_bulk_isoc_xfer_t *info, 4263 uint_t index, 4264 uhci_td_t **tdpp, 4265 uhci_bulk_isoc_td_pool_t **td_pool_pp) 4266 { 4267 uint_t i = 0, j = 0; 4268 uhci_td_t *td_ptr; 4269 4270 while (j < info->num_pools) { 4271 if ((i + info->td_pools[j].num_tds) <= index) { 4272 i += info->td_pools[j].num_tds; 4273 j++; 4274 } else { 4275 i = index - i; 4276 4277 break; 4278 } 4279 } 4280 4281 ASSERT(j < info->num_pools); 4282 *td_pool_pp = &info->td_pools[j]; 4283 td_ptr = (uhci_td_t *)((*td_pool_pp)->pool_addr); 4284 *tdpp = &td_ptr[i]; 4285 } 4286 4287 4288 /* 4289 * uhci_handle_isoc_td: 4290 * Handles the completed isoc tds 4291 */ 4292 void 4293 uhci_handle_isoc_td(uhci_state_t *uhcip, uhci_td_t *td) 4294 { 4295 uint_t rval, i; 4296 uint32_t pkt_index = td->isoc_pkt_index; 4297 usb_cr_t cr; 4298 uhci_trans_wrapper_t *tw = td->tw; 4299 usb_isoc_req_t *isoc_req = (usb_isoc_req_t *)tw->tw_isoc_req; 4300 uhci_pipe_private_t *pp = tw->tw_pipe_private; 4301 uhci_bulk_isoc_xfer_t *isoc_xfer_info = &tw->tw_xfer_info; 4302 usba_pipe_handle_data_t *usb_pp; 4303 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 4304 4305 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4306 "uhci_handle_isoc_td: td = 0x%p, pp = 0x%p, tw = 0x%p, req = 0x%p, " 4307 "index = %x", (void *)td, (void *)pp, (void *)tw, (void *)isoc_req, 4308 pkt_index); 4309 4310 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4311 4312 usb_pp = pp->pp_pipe_handle; 4313 4314 /* 4315 * Check whether there are any errors occurred. If so, update error 4316 * count and return it to the upper.But never return a non zero 4317 * completion reason. 4318 */ 4319 cr = USB_CR_OK; 4320 if (GetTD_status(uhcip, td) & TD_STATUS_MASK) { 4321 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4322 "uhci_handle_isoc_td: Error Occurred: TD Status = %x", 4323 GetTD_status(uhcip, td)); 4324 isoc_req->isoc_error_count++; 4325 } 4326 4327 if (isoc_req != NULL) { 4328 isoc_req->isoc_pkt_descr[pkt_index].isoc_pkt_status = cr; 4329 isoc_req->isoc_pkt_descr[pkt_index].isoc_pkt_actual_length = 4330 (GetTD_alen(uhcip, td) == ZERO_LENGTH) ? 0 : 4331 GetTD_alen(uhcip, td) + 1; 4332 } 4333 4334 uhci_delete_isoc_td(uhcip, td); 4335 4336 if (--isoc_xfer_info->num_tds != 0) { 4337 USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4338 "uhci_handle_isoc_td: Number of TDs %d", 4339 isoc_xfer_info->num_tds); 4340 4341 return; 4342 } 4343 4344 tw->tw_claim = UHCI_INTR_HDLR_CLAIMED; 4345 if (tw->tw_direction == PID_IN) { 4346 uhci_sendup_td_message(uhcip, cr, tw); 4347 4348 if ((uhci_handle_isoc_receive(uhcip, pp, tw)) != USB_SUCCESS) { 4349 USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4350 "uhci_handle_isoc_td: Drop message"); 4351 } 4352 4353 } else { 4354 /* update kstats only for OUT. sendup_td_msg() does it for IN */ 4355 uhci_do_byte_stats(uhcip, tw->tw_length, 4356 usb_pp->p_ep.bmAttributes, usb_pp->p_ep.bEndpointAddress); 4357 4358 uhci_hcdi_callback(uhcip, pp, usb_pp, tw, USB_CR_OK); 4359 } 4360 4361 for (i = 0; i < isoc_xfer_info->num_pools; i++) { 4362 td_pool_ptr = &isoc_xfer_info->td_pools[i]; 4363 rval = ddi_dma_unbind_handle(td_pool_ptr->dma_handle); 4364 ASSERT(rval == DDI_SUCCESS); 4365 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 4366 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 4367 } 4368 kmem_free(isoc_xfer_info->td_pools, 4369 (sizeof (uhci_bulk_isoc_td_pool_t) * 4370 isoc_xfer_info->num_pools)); 4371 uhci_deallocate_tw(uhcip, pp, tw); 4372 } 4373 4374 4375 /* 4376 * uhci_handle_isoc_receive: 4377 * - Sends the isoc data to the client 4378 * - Inserts another isoc receive request 4379 */ 4380 static int 4381 uhci_handle_isoc_receive( 4382 uhci_state_t *uhcip, 4383 uhci_pipe_private_t *pp, 4384 uhci_trans_wrapper_t *tw) 4385 { 4386 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4387 "uhci_handle_isoc_receive: tw = 0x%p", (void *)tw); 4388 4389 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4390 4391 /* 4392 * -- check for pipe state being polling before 4393 * inserting a new request. Check when is TD 4394 * de-allocation being done? (so we can reuse the same TD) 4395 */ 4396 if (uhci_start_isoc_receive_polling(uhcip, 4397 pp->pp_pipe_handle, (usb_isoc_req_t *)tw->tw_curr_xfer_reqp, 4398 0) != USB_SUCCESS) { 4399 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4400 "uhci_handle_isoc_receive: receive polling failed"); 4401 4402 return (USB_FAILURE); 4403 } 4404 4405 return (USB_SUCCESS); 4406 } 4407 4408 4409 /* 4410 * uhci_delete_isoc_td: 4411 * - Delete from the outstanding command queue 4412 * - Delete from the tw queue 4413 * - Delete from the isoc queue 4414 * - Delete from the HOST CONTROLLER list 4415 */ 4416 static void 4417 uhci_delete_isoc_td(uhci_state_t *uhcip, uhci_td_t *td) 4418 { 4419 uint32_t starting_frame = td->starting_frame; 4420 4421 if ((td->isoc_next == NULL) && (td->isoc_prev == NULL)) { 4422 SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[starting_frame], 4423 GetTD32(uhcip, td->link_ptr)); 4424 uhcip->uhci_isoc_q_tailp[starting_frame] = 0; 4425 } else if (td->isoc_next == NULL) { 4426 td->isoc_prev->link_ptr = td->link_ptr; 4427 td->isoc_prev->isoc_next = NULL; 4428 uhcip->uhci_isoc_q_tailp[starting_frame] = td->isoc_prev; 4429 } else if (td->isoc_prev == NULL) { 4430 td->isoc_next->isoc_prev = NULL; 4431 SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[starting_frame], 4432 GetTD32(uhcip, td->link_ptr)); 4433 } else { 4434 td->isoc_prev->isoc_next = td->isoc_next; 4435 td->isoc_next->isoc_prev = td->isoc_prev; 4436 td->isoc_prev->link_ptr = td->link_ptr; 4437 } 4438 4439 uhci_delete_td(uhcip, td); 4440 } 4441 4442 4443 /* 4444 * uhci_send_isoc_receive 4445 * - Allocates usb_isoc_request 4446 * - Updates the isoc request 4447 * - Inserts the isoc td's into the HC processing list. 4448 */ 4449 int 4450 uhci_start_isoc_receive_polling( 4451 uhci_state_t *uhcip, 4452 usba_pipe_handle_data_t *ph, 4453 usb_isoc_req_t *isoc_req, 4454 usb_flags_t usb_flags) 4455 { 4456 int ii, error; 4457 size_t max_isoc_xfer_size, length, isoc_pkts_length; 4458 ushort_t isoc_pkt_count; 4459 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 4460 usb_isoc_pkt_descr_t *isoc_pkt_descr; 4461 4462 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4463 "uhci_start_isoc_receive_polling: usb_flags = %x", usb_flags); 4464 4465 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4466 4467 max_isoc_xfer_size = ph->p_ep.wMaxPacketSize * UHCI_MAX_ISOC_PKTS; 4468 4469 if (isoc_req) { 4470 isoc_pkt_descr = isoc_req->isoc_pkt_descr; 4471 isoc_pkt_count = isoc_req->isoc_pkts_count; 4472 isoc_pkts_length = isoc_req->isoc_pkts_length; 4473 } else { 4474 isoc_pkt_descr = ((usb_isoc_req_t *) 4475 pp->pp_client_periodic_in_reqp)->isoc_pkt_descr; 4476 isoc_pkt_count = ((usb_isoc_req_t *) 4477 pp->pp_client_periodic_in_reqp)->isoc_pkts_count; 4478 isoc_pkts_length = ((usb_isoc_req_t *) 4479 pp->pp_client_periodic_in_reqp)->isoc_pkts_length; 4480 } 4481 4482 for (ii = 0, length = 0; ii < isoc_pkt_count; ii++) { 4483 length += isoc_pkt_descr->isoc_pkt_length; 4484 isoc_pkt_descr++; 4485 } 4486 4487 if ((isoc_pkts_length) && (isoc_pkts_length != length)) { 4488 4489 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 4490 "uhci_start_isoc_receive_polling: isoc_pkts_length 0x%lx " 4491 "is not equal to the sum of all pkt lengths 0x%lx in " 4492 "an isoc request", isoc_pkts_length, length); 4493 4494 return (USB_FAILURE); 4495 } 4496 4497 /* Check the size of isochronous request */ 4498 if (length > max_isoc_xfer_size) { 4499 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4500 "uhci_start_isoc_receive_polling: " 4501 "Max isoc request size = %lx, Given isoc req size = %lx", 4502 max_isoc_xfer_size, length); 4503 4504 return (USB_FAILURE); 4505 } 4506 4507 /* Add the TD into the Host Controller's isoc list */ 4508 error = uhci_insert_isoc_td(uhcip, ph, isoc_req, length, usb_flags); 4509 4510 return (error); 4511 } 4512 4513 4514 /* 4515 * uhci_remove_isoc_tds_tws 4516 * This routine scans the pipe and removes all the td's 4517 * and transfer wrappers and deallocates the memory 4518 * associated with those td's and tw's. 4519 */ 4520 void 4521 uhci_remove_isoc_tds_tws(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 4522 { 4523 uint_t rval, i; 4524 uhci_td_t *tmp_td, *td_head; 4525 usb_isoc_req_t *isoc_req; 4526 uhci_trans_wrapper_t *tmp_tw, *tw_head; 4527 uhci_bulk_isoc_xfer_t *isoc_xfer_info; 4528 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 4529 4530 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4531 "uhci_remove_isoc_tds_tws: pp = %p", (void *)pp); 4532 4533 tw_head = pp->pp_tw_head; 4534 while (tw_head) { 4535 tmp_tw = tw_head; 4536 tw_head = tw_head->tw_next; 4537 td_head = tmp_tw->tw_hctd_head; 4538 if (tmp_tw->tw_direction == PID_IN) { 4539 uhci_deallocate_periodic_in_resource(uhcip, pp, 4540 tmp_tw); 4541 } else if (tmp_tw->tw_direction == PID_OUT) { 4542 uhci_hcdi_callback(uhcip, pp, pp->pp_pipe_handle, 4543 tmp_tw, USB_CR_FLUSHED); 4544 } 4545 4546 while (td_head) { 4547 tmp_td = td_head; 4548 td_head = td_head->tw_td_next; 4549 uhci_delete_isoc_td(uhcip, tmp_td); 4550 } 4551 4552 isoc_req = (usb_isoc_req_t *)tmp_tw->tw_isoc_req; 4553 if (isoc_req) { 4554 usb_free_isoc_req(isoc_req); 4555 } 4556 4557 ASSERT(tmp_tw->tw_hctd_head == NULL); 4558 4559 if (tmp_tw->tw_xfer_info.td_pools) { 4560 isoc_xfer_info = 4561 (uhci_bulk_isoc_xfer_t *)&tmp_tw->tw_xfer_info; 4562 for (i = 0; i < isoc_xfer_info->num_pools; i++) { 4563 td_pool_ptr = &isoc_xfer_info->td_pools[i]; 4564 rval = ddi_dma_unbind_handle( 4565 td_pool_ptr->dma_handle); 4566 ASSERT(rval == DDI_SUCCESS); 4567 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 4568 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 4569 } 4570 kmem_free(isoc_xfer_info->td_pools, 4571 (sizeof (uhci_bulk_isoc_td_pool_t) * 4572 isoc_xfer_info->num_pools)); 4573 } 4574 4575 uhci_deallocate_tw(uhcip, pp, tmp_tw); 4576 } 4577 } 4578 4579 4580 /* 4581 * uhci_isoc_update_sw_frame_number() 4582 * to avoid code duplication, call uhci_get_sw_frame_number() 4583 */ 4584 void 4585 uhci_isoc_update_sw_frame_number(uhci_state_t *uhcip) 4586 { 4587 (void) uhci_get_sw_frame_number(uhcip); 4588 } 4589 4590 4591 /* 4592 * uhci_get_sw_frame_number: 4593 * Hold the uhci_int_mutex before calling this routine. 4594 */ 4595 uint64_t 4596 uhci_get_sw_frame_number(uhci_state_t *uhcip) 4597 { 4598 uint64_t sw_frnum, hw_frnum, current_frnum; 4599 4600 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4601 4602 sw_frnum = uhcip->uhci_sw_frnum; 4603 hw_frnum = Get_OpReg16(FRNUM); 4604 4605 /* 4606 * Check bit 10 in the software counter and hardware frame counter. 4607 * If both are same, then don't increment the software frame counter 4608 * (Bit 10 of hw frame counter toggle for every 1024 frames) 4609 * The lower 11 bits of software counter contains the hardware frame 4610 * counter value. The MSB (bit 10) of software counter is incremented 4611 * for every 1024 frames either here or in get frame number routine. 4612 */ 4613 if ((sw_frnum & UHCI_BIT_10_MASK) == (hw_frnum & UHCI_BIT_10_MASK)) { 4614 /* The MSB of hw counter did not toggle */ 4615 current_frnum = ((sw_frnum & (SW_FRNUM_MASK)) | hw_frnum); 4616 } else { 4617 /* 4618 * The hw counter wrapped around. And the interrupt handler 4619 * did not get a chance to update the sw frame counter. 4620 * So, update the sw frame counter and return correct frame no. 4621 */ 4622 sw_frnum >>= UHCI_SIZE_OF_HW_FRNUM - 1; 4623 current_frnum = 4624 ((++sw_frnum << (UHCI_SIZE_OF_HW_FRNUM - 1)) | hw_frnum); 4625 } 4626 uhcip->uhci_sw_frnum = current_frnum; 4627 4628 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 4629 "uhci_get_sw_frame_number: sw=%lld hd=%lld", 4630 (unsigned long long)(uhcip->uhci_sw_frnum), 4631 (unsigned long long)hw_frnum); 4632 4633 return (current_frnum); 4634 } 4635 4636 4637 /* 4638 * uhci_cmd_timeout_hdlr: 4639 * This routine will get called for every second. It checks for 4640 * timed out control commands/bulk commands. Timeout any commands 4641 * that exceeds the time out period specified by the pipe policy. 4642 */ 4643 void 4644 uhci_cmd_timeout_hdlr(void *arg) 4645 { 4646 uint_t flag = B_FALSE; 4647 uhci_td_t *head, *tmp_td; 4648 uhci_state_t *uhcip = (uhci_state_t *)arg; 4649 uhci_pipe_private_t *pp; 4650 4651 /* 4652 * Check whether any of the control xfers are timed out. 4653 * If so, complete those commands with time out as reason. 4654 */ 4655 mutex_enter(&uhcip->uhci_int_mutex); 4656 head = uhcip->uhci_outst_tds_head; 4657 4658 while (head) { 4659 /* 4660 * If timeout out is zero, then dont timeout command. 4661 */ 4662 if (head->tw->tw_timeout_cnt == 0) { 4663 head = head->outst_td_next; 4664 continue; 4665 } 4666 4667 if (!(head->tw->tw_flags & TW_TIMEOUT_FLAG)) { 4668 head->tw->tw_flags |= TW_TIMEOUT_FLAG; 4669 --head->tw->tw_timeout_cnt; 4670 } 4671 4672 /* only do it for bulk and control TDs */ 4673 if ((head->tw->tw_timeout_cnt == 0) && 4674 (head->tw->tw_handle_td != uhci_handle_isoc_td)) { 4675 4676 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 4677 "Command timed out: td = %p", (void *)head); 4678 4679 head->tw->tw_claim = UHCI_TIMEOUT_HDLR_CLAIMED; 4680 4681 /* 4682 * Check finally whether the command completed 4683 */ 4684 if (GetTD_status(uhcip, head) & UHCI_TD_ACTIVE) { 4685 SetTD32(uhcip, head->link_ptr, 4686 GetTD32(uhcip, head->link_ptr) | 4687 HC_END_OF_LIST); 4688 pp = head->tw->tw_pipe_private; 4689 SetQH32(uhcip, pp->pp_qh->element_ptr, 4690 GetQH32(uhcip, pp->pp_qh->element_ptr) | 4691 HC_END_OF_LIST); 4692 } 4693 4694 flag = B_TRUE; 4695 } 4696 4697 head = head->outst_td_next; 4698 } 4699 4700 if (flag) { 4701 (void) uhci_wait_for_sof(uhcip); 4702 } 4703 4704 head = uhcip->uhci_outst_tds_head; 4705 while (head) { 4706 if (head->tw->tw_flags & TW_TIMEOUT_FLAG) { 4707 head->tw->tw_flags &= ~TW_TIMEOUT_FLAG; 4708 } 4709 if (head->tw->tw_claim == UHCI_TIMEOUT_HDLR_CLAIMED) { 4710 head->tw->tw_claim = UHCI_NOT_CLAIMED; 4711 tmp_td = head->tw->tw_hctd_head; 4712 while (tmp_td) { 4713 SetTD_status(uhcip, tmp_td, 4714 UHCI_TD_CRC_TIMEOUT); 4715 tmp_td = tmp_td->tw_td_next; 4716 } 4717 } 4718 head = head->outst_td_next; 4719 } 4720 4721 /* 4722 * Process the td which was completed before shifting from normal 4723 * mode to polled mode 4724 */ 4725 if (uhcip->uhci_polled_flag == UHCI_POLLED_FLAG_TRUE) { 4726 uhci_process_submitted_td_queue(uhcip); 4727 uhcip->uhci_polled_flag = UHCI_POLLED_FLAG_FALSE; 4728 } else if (flag) { 4729 /* Process the completed/timed out commands */ 4730 uhci_process_submitted_td_queue(uhcip); 4731 } 4732 4733 /* Re-register the control/bulk/intr commands' timeout handler */ 4734 if (uhcip->uhci_cmd_timeout_id) { 4735 uhcip->uhci_cmd_timeout_id = timeout(uhci_cmd_timeout_hdlr, 4736 (void *)uhcip, UHCI_ONE_SECOND); 4737 } 4738 4739 mutex_exit(&uhcip->uhci_int_mutex); 4740 } 4741 4742 4743 /* 4744 * uhci_wait_for_sof: 4745 * Wait for the start of the next frame (implying any changes made in the 4746 * lattice have now taken effect). 4747 * To be sure this is the case, we wait for the completion of the current 4748 * frame (which might have already been pending), then another complete 4749 * frame to ensure everything has taken effect. 4750 */ 4751 int 4752 uhci_wait_for_sof(uhci_state_t *uhcip) 4753 { 4754 int n, error; 4755 ushort_t cmd_reg; 4756 usb_frame_number_t before_frame_number, after_frame_number; 4757 clock_t time, rval; 4758 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 4759 "uhci_wait_for_sof: uhcip = %p", (void *)uhcip); 4760 4761 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4762 4763 error = uhci_state_is_operational(uhcip); 4764 4765 if (error != USB_SUCCESS) { 4766 4767 return (error); 4768 } 4769 4770 before_frame_number = uhci_get_sw_frame_number(uhcip); 4771 for (n = 0; n < MAX_SOF_WAIT_COUNT; n++) { 4772 SetTD_ioc(uhcip, uhcip->uhci_sof_td, 1); 4773 uhcip->uhci_cv_signal = B_TRUE; 4774 4775 time = ddi_get_lbolt() + UHCI_ONE_SECOND; 4776 rval = cv_timedwait(&uhcip->uhci_cv_SOF, 4777 &uhcip->uhci_int_mutex, time); 4778 4779 after_frame_number = uhci_get_sw_frame_number(uhcip); 4780 if ((rval == -1) && 4781 (after_frame_number <= before_frame_number)) { 4782 cmd_reg = Get_OpReg16(USBCMD); 4783 Set_OpReg16(USBCMD, (cmd_reg | USBCMD_REG_HC_RUN)); 4784 Set_OpReg16(USBINTR, ENABLE_ALL_INTRS); 4785 after_frame_number = uhci_get_sw_frame_number(uhcip); 4786 } 4787 before_frame_number = after_frame_number; 4788 } 4789 4790 SetTD_ioc(uhcip, uhcip->uhci_sof_td, 0); 4791 4792 return (uhcip->uhci_cv_signal ? USB_FAILURE : USB_SUCCESS); 4793 4794 } 4795 4796 /* 4797 * uhci_allocate_periodic_in_resource: 4798 * Allocate interrupt/isochronous request structure for the 4799 * interrupt/isochronous IN transfer. 4800 */ 4801 int 4802 uhci_allocate_periodic_in_resource( 4803 uhci_state_t *uhcip, 4804 uhci_pipe_private_t *pp, 4805 uhci_trans_wrapper_t *tw, 4806 usb_flags_t flags) 4807 { 4808 size_t length = 0; 4809 usb_opaque_t client_periodic_in_reqp; 4810 usb_intr_req_t *cur_intr_req; 4811 usb_isoc_req_t *curr_isoc_reqp; 4812 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 4813 4814 USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4815 "uhci_allocate_periodic_in_resource:\n\t" 4816 "ph = 0x%p, pp = 0x%p, tw = 0x%p, flags = 0x%x", 4817 (void *)ph, (void *)pp, (void *)tw, flags); 4818 4819 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4820 4821 /* Check the current periodic in request pointer */ 4822 if (tw->tw_curr_xfer_reqp) { 4823 USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4824 "uhci_allocate_periodic_in_resource: Interrupt " 4825 "request structure already exists: " 4826 "allocation failed"); 4827 4828 return (USB_SUCCESS); 4829 } 4830 4831 /* Get the client periodic in request pointer */ 4832 client_periodic_in_reqp = pp->pp_client_periodic_in_reqp; 4833 4834 /* 4835 * If it a periodic IN request and periodic request is NULL, 4836 * allocate corresponding usb periodic IN request for the 4837 * current periodic polling request and copy the information 4838 * from the saved periodic request structure. 4839 */ 4840 if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_INTR) { 4841 /* Get the interrupt transfer length */ 4842 length = ((usb_intr_req_t *)client_periodic_in_reqp)-> 4843 intr_len; 4844 4845 cur_intr_req = usba_hcdi_dup_intr_req(ph->p_dip, 4846 (usb_intr_req_t *)client_periodic_in_reqp, length, flags); 4847 if (cur_intr_req == NULL) { 4848 USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4849 "uhci_allocate_periodic_in_resource: Interrupt " 4850 "request structure allocation failed"); 4851 4852 return (USB_NO_RESOURCES); 4853 } 4854 4855 /* Check and save the timeout value */ 4856 tw->tw_timeout_cnt = (cur_intr_req->intr_attributes & 4857 USB_ATTRS_ONE_XFER) ? cur_intr_req->intr_timeout: 0; 4858 tw->tw_curr_xfer_reqp = (usb_opaque_t)cur_intr_req; 4859 tw->tw_length = cur_intr_req->intr_len; 4860 } else { 4861 ASSERT(client_periodic_in_reqp != NULL); 4862 4863 if ((curr_isoc_reqp = usba_hcdi_dup_isoc_req(ph->p_dip, 4864 (usb_isoc_req_t *)client_periodic_in_reqp, flags)) == 4865 NULL) { 4866 USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4867 "uhci_allocate_periodic_in_resource: Isochronous " 4868 "request structure allocation failed"); 4869 4870 return (USB_NO_RESOURCES); 4871 } 4872 4873 /* 4874 * Save the client's isochronous request pointer and 4875 * length of isochronous transfer in transfer wrapper. 4876 * The dup'ed request is saved in pp_client_periodic_in_reqp 4877 */ 4878 tw->tw_curr_xfer_reqp = 4879 (usb_opaque_t)pp->pp_client_periodic_in_reqp; 4880 pp->pp_client_periodic_in_reqp = (usb_opaque_t)curr_isoc_reqp; 4881 } 4882 4883 mutex_enter(&ph->p_mutex); 4884 ph->p_req_count++; 4885 mutex_exit(&ph->p_mutex); 4886 4887 return (USB_SUCCESS); 4888 } 4889 4890 4891 /* 4892 * uhci_deallocate_periodic_in_resource: 4893 * Deallocate interrupt/isochronous request structure for the 4894 * interrupt/isochronous IN transfer. 4895 */ 4896 void 4897 uhci_deallocate_periodic_in_resource( 4898 uhci_state_t *uhcip, 4899 uhci_pipe_private_t *pp, 4900 uhci_trans_wrapper_t *tw) 4901 { 4902 usb_opaque_t curr_xfer_reqp; 4903 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 4904 4905 USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4906 "uhci_deallocate_periodic_in_resource: " 4907 "pp = 0x%p tw = 0x%p", (void *)pp, (void *)tw); 4908 4909 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4910 4911 curr_xfer_reqp = tw->tw_curr_xfer_reqp; 4912 if (curr_xfer_reqp) { 4913 /* 4914 * Reset periodic in request usb isoch 4915 * packet request pointers to null. 4916 */ 4917 tw->tw_curr_xfer_reqp = NULL; 4918 tw->tw_isoc_req = NULL; 4919 4920 mutex_enter(&ph->p_mutex); 4921 ph->p_req_count--; 4922 mutex_exit(&ph->p_mutex); 4923 4924 /* 4925 * Free pre-allocated interrupt or isochronous requests. 4926 */ 4927 switch (UHCI_XFER_TYPE(&ph->p_ep)) { 4928 case USB_EP_ATTR_INTR: 4929 usb_free_intr_req((usb_intr_req_t *)curr_xfer_reqp); 4930 break; 4931 case USB_EP_ATTR_ISOCH: 4932 usb_free_isoc_req((usb_isoc_req_t *)curr_xfer_reqp); 4933 break; 4934 } 4935 } 4936 } 4937 4938 4939 /* 4940 * uhci_hcdi_callback() 4941 * convenience wrapper around usba_hcdi_callback() 4942 */ 4943 void 4944 uhci_hcdi_callback(uhci_state_t *uhcip, uhci_pipe_private_t *pp, 4945 usba_pipe_handle_data_t *ph, uhci_trans_wrapper_t *tw, usb_cr_t cr) 4946 { 4947 usb_opaque_t curr_xfer_reqp; 4948 4949 USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4950 "uhci_hcdi_callback: ph = 0x%p, tw = 0x%p, cr = 0x%x", 4951 (void *)ph, (void *)tw, cr); 4952 4953 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4954 4955 if (tw && tw->tw_curr_xfer_reqp) { 4956 curr_xfer_reqp = tw->tw_curr_xfer_reqp; 4957 tw->tw_curr_xfer_reqp = NULL; 4958 tw->tw_isoc_req = NULL; 4959 } else { 4960 ASSERT(pp->pp_client_periodic_in_reqp != NULL); 4961 4962 curr_xfer_reqp = pp->pp_client_periodic_in_reqp; 4963 pp->pp_client_periodic_in_reqp = NULL; 4964 } 4965 4966 ASSERT(curr_xfer_reqp != NULL); 4967 4968 mutex_exit(&uhcip->uhci_int_mutex); 4969 usba_hcdi_cb(ph, curr_xfer_reqp, cr); 4970 mutex_enter(&uhcip->uhci_int_mutex); 4971 } 4972 4973 4974 /* 4975 * uhci_state_is_operational: 4976 * 4977 * Check the Host controller state and return proper values. 4978 */ 4979 int 4980 uhci_state_is_operational(uhci_state_t *uhcip) 4981 { 4982 int val; 4983 4984 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4985 4986 switch (uhcip->uhci_hc_soft_state) { 4987 case UHCI_CTLR_INIT_STATE: 4988 case UHCI_CTLR_SUSPEND_STATE: 4989 val = USB_FAILURE; 4990 break; 4991 case UHCI_CTLR_OPERATIONAL_STATE: 4992 val = USB_SUCCESS; 4993 break; 4994 case UHCI_CTLR_ERROR_STATE: 4995 val = USB_HC_HARDWARE_ERROR; 4996 break; 4997 default: 4998 val = USB_FAILURE; 4999 break; 5000 } 5001 5002 return (val); 5003 } 5004 5005 5006 #ifdef DEBUG 5007 static void 5008 uhci_print_td(uhci_state_t *uhcip, uhci_td_t *td) 5009 { 5010 uint_t *ptr = (uint_t *)td; 5011 5012 #ifndef lint 5013 _NOTE(NO_COMPETING_THREADS_NOW); 5014 #endif 5015 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 5016 "\tDWORD 1 0x%x\t DWORD 2 0x%x", ptr[0], ptr[1]); 5017 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 5018 "\tDWORD 3 0x%x\t DWORD 4 0x%x", ptr[2], ptr[3]); 5019 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 5020 "\tBytes xfered = %d", td->tw->tw_bytes_xfered); 5021 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 5022 "\tBytes Pending = %d", td->tw->tw_bytes_pending); 5023 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 5024 "Queue Head Details:"); 5025 uhci_print_qh(uhcip, td->tw->tw_pipe_private->pp_qh); 5026 5027 #ifndef lint 5028 _NOTE(COMPETING_THREADS_NOW); 5029 #endif 5030 } 5031 5032 5033 static void 5034 uhci_print_qh(uhci_state_t *uhcip, queue_head_t *qh) 5035 { 5036 uint_t *ptr = (uint_t *)qh; 5037 5038 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 5039 "\tLink Ptr = %x Element Ptr = %x", ptr[0], ptr[1]); 5040 } 5041 #endif 5042