1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * Universal Host Controller Driver (UHCI) 30 * 31 * The UHCI driver is a driver which interfaces to the Universal 32 * Serial Bus Driver (USBA) and the Host Controller (HC). The interface to 33 * the Host Controller is defined by the UHCI. 34 * This file contains misc functions. 35 */ 36 #include <sys/usb/hcd/uhci/uhcid.h> 37 #include <sys/usb/hcd/uhci/uhciutil.h> 38 #include <sys/usb/hcd/uhci/uhcipolled.h> 39 40 #include <sys/disp.h> 41 42 /* Globals */ 43 extern uint_t uhci_td_pool_size; /* Num TDs */ 44 extern uint_t uhci_qh_pool_size; /* Num QHs */ 45 extern ushort_t uhci_tree_bottom_nodes[]; 46 extern void *uhci_statep; 47 48 /* function prototypes */ 49 static void uhci_build_interrupt_lattice(uhci_state_t *uhcip); 50 static int uhci_init_frame_lst_table(dev_info_t *dip, uhci_state_t *uhcip); 51 52 static uint_t uhci_lattice_height(uint_t bandwidth); 53 static uint_t uhci_lattice_parent(uint_t node); 54 static uint_t uhci_leftmost_leaf(uint_t node, uint_t height); 55 static uint_t uhci_compute_total_bandwidth(usb_ep_descr_t *endpoint, 56 usb_port_status_t port_status); 57 58 static int uhci_bandwidth_adjust(uhci_state_t *uhcip, 59 usb_ep_descr_t *endpoint, usb_port_status_t port_status); 60 61 static uhci_td_t *uhci_allocate_td_from_pool(uhci_state_t *uhcip); 62 static void uhci_fill_in_td(uhci_state_t *uhcip, 63 uhci_td_t *td, uhci_td_t *current_dummy, 64 uint32_t buffer_offset, size_t length, 65 uhci_pipe_private_t *pp, uchar_t PID, 66 usb_req_attrs_t attrs, uhci_trans_wrapper_t *tw); 67 static uint32_t uhci_get_tw_paddr_by_offs(uhci_state_t *uhcip, 68 uint32_t buffer_offset, size_t length, 69 uhci_trans_wrapper_t *tw); 70 static uhci_trans_wrapper_t *uhci_create_transfer_wrapper( 71 uhci_state_t *uhcip, uhci_pipe_private_t *pp, 72 size_t length, usb_flags_t usb_flags); 73 static uhci_trans_wrapper_t *uhci_create_isoc_transfer_wrapper( 74 uhci_state_t *uhcip, uhci_pipe_private_t *pp, 75 usb_isoc_req_t *req, size_t length, 76 usb_flags_t usb_flags); 77 78 static int uhci_create_setup_pkt(uhci_state_t *uhcip, 79 uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw); 80 static void uhci_insert_ctrl_qh(uhci_state_t *uhcip, 81 uhci_pipe_private_t *pp); 82 static void uhci_remove_ctrl_qh(uhci_state_t *uhcip, 83 uhci_pipe_private_t *pp); 84 static void uhci_insert_intr_qh(uhci_state_t *uhcip, 85 uhci_pipe_private_t *pp); 86 static void uhci_remove_intr_qh(uhci_state_t *uhcip, 87 uhci_pipe_private_t *pp); 88 static void uhci_remove_bulk_qh(uhci_state_t *uhcip, 89 uhci_pipe_private_t *pp); 90 static void uhci_insert_bulk_qh(uhci_state_t *uhcip, 91 uhci_pipe_private_t *pp); 92 static void uhci_handle_bulk_td_errors(uhci_state_t *uhcip, uhci_td_t *td); 93 static int uhci_alloc_memory_for_tds(uhci_state_t *uhcip, uint_t num_tds, 94 uhci_bulk_isoc_xfer_t *info); 95 static int uhci_alloc_bulk_isoc_tds(uhci_state_t *uhcip, uint_t num_tds, 96 uhci_bulk_isoc_xfer_t *info); 97 static void uhci_get_isoc_td_by_index(uhci_state_t *uhcip, 98 uhci_bulk_isoc_xfer_t *info, uint_t index, 99 uhci_td_t **tdpp, uhci_bulk_isoc_td_pool_t **td_pool_pp); 100 static void uhci_get_bulk_td_by_paddr(uhci_state_t *uhcip, 101 uhci_bulk_isoc_xfer_t *info, uint32_t paddr, 102 uhci_bulk_isoc_td_pool_t **td_pool_pp); 103 104 static int uhci_handle_isoc_receive(uhci_state_t *uhcip, 105 uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw); 106 static void uhci_delete_isoc_td(uhci_state_t *uhcip, 107 uhci_td_t *td); 108 #ifdef DEBUG 109 static void uhci_print_td(uhci_state_t *uhcip, uhci_td_t *td); 110 static void uhci_print_qh(uhci_state_t *uhcip, queue_head_t *qh); 111 #endif 112 113 114 /* 115 * uhci_build_interrupt_lattice: 116 * 117 * Construct the interrupt lattice tree using static Queue Head pointers. 118 * This interrupt lattice tree will have total of 63 queue heads and the 119 * Host Controller (HC) processes queue heads every frame. 120 */ 121 static void 122 uhci_build_interrupt_lattice(uhci_state_t *uhcip) 123 { 124 int half_list = NUM_INTR_QH_LISTS / 2; 125 uint16_t i, j, k; 126 uhci_td_t *sof_td, *isoc_td; 127 uintptr_t addr; 128 queue_head_t *list_array = uhcip->uhci_qh_pool_addr; 129 queue_head_t *tmp_qh; 130 frame_lst_table_t *frame_lst_tablep = 131 uhcip->uhci_frame_lst_tablep; 132 133 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 134 "uhci_build_interrupt_lattice:"); 135 136 /* 137 * Reserve the first 63 queue head structures in the pool as static 138 * queue heads & these are required for constructing interrupt 139 * lattice tree. 140 */ 141 for (i = 0; i < NUM_INTR_QH_LISTS; i++) { 142 SetQH32(uhcip, list_array[i].link_ptr, HC_END_OF_LIST); 143 SetQH32(uhcip, list_array[i].element_ptr, HC_END_OF_LIST); 144 list_array[i].qh_flag = QUEUE_HEAD_FLAG_STATIC; 145 list_array[i].node = i; 146 } 147 148 /* Build the interrupt lattice tree */ 149 for (i = 0; i < half_list - 1; i++) { 150 /* 151 * The next pointer in the host controller queue head 152 * descriptor must contain an iommu address. Calculate 153 * the offset into the cpu address and add this to the 154 * starting iommu address. 155 */ 156 addr = QH_PADDR(&list_array[i]) | HC_QUEUE_HEAD; 157 158 SetQH32(uhcip, list_array[2*i + 1].link_ptr, addr); 159 SetQH32(uhcip, list_array[2*i + 2].link_ptr, addr); 160 } 161 162 /* 163 * Initialize the interrupt list in the Frame list Table 164 * so that it points to the bottom of the tree. 165 */ 166 for (i = 0, j = 0; i < pow_2(TREE_HEIGHT); i++) { 167 addr = QH_PADDR(&list_array[half_list + i - 1]); 168 for (k = 0; k < pow_2(VIRTUAL_TREE_HEIGHT); k++) { 169 SetFL32(uhcip, 170 frame_lst_tablep[uhci_tree_bottom_nodes[j++]], 171 addr | HC_QUEUE_HEAD); 172 } 173 } 174 175 /* 176 * Create a controller and bulk Queue heads 177 */ 178 uhcip->uhci_ctrl_xfers_q_head = uhci_alloc_queue_head(uhcip); 179 tmp_qh = uhcip->uhci_ctrl_xfers_q_tail = uhcip->uhci_ctrl_xfers_q_head; 180 181 SetQH32(uhcip, list_array[0].link_ptr, 182 (QH_PADDR(tmp_qh) | HC_QUEUE_HEAD)); 183 184 uhcip->uhci_bulk_xfers_q_head = uhci_alloc_queue_head(uhcip); 185 uhcip->uhci_bulk_xfers_q_tail = uhcip->uhci_bulk_xfers_q_head; 186 SetQH32(uhcip, tmp_qh->link_ptr, 187 (QH_PADDR(uhcip->uhci_bulk_xfers_q_head)|HC_QUEUE_HEAD)); 188 189 SetQH32(uhcip, uhcip->uhci_bulk_xfers_q_head->link_ptr, HC_END_OF_LIST); 190 191 /* 192 * Add a dummy TD to the static queue head 0. THis is used 193 * to generate an at the end of frame. 194 */ 195 sof_td = uhci_allocate_td_from_pool(uhcip); 196 197 SetQH32(uhcip, list_array[0].element_ptr, 198 TD_PADDR(sof_td) | HC_TD_HEAD); 199 SetTD32(uhcip, sof_td->link_ptr, HC_END_OF_LIST); 200 uhcip->uhci_sof_td = sof_td; 201 202 /* 203 * Add a dummy td that is used to generate an interrupt for 204 * every 1024 frames. 205 */ 206 isoc_td = uhci_allocate_td_from_pool(uhcip); 207 SetTD32(uhcip, isoc_td->link_ptr, HC_END_OF_LIST); 208 uhcip->uhci_isoc_td = isoc_td; 209 210 uhcip->uhci_isoc_qh = uhci_alloc_queue_head(uhcip); 211 SetQH32(uhcip, uhcip->uhci_isoc_qh->link_ptr, 212 GetFL32(uhcip, uhcip->uhci_frame_lst_tablep[MAX_FRAME_NUM])); 213 SetQH32(uhcip, uhcip->uhci_isoc_qh->element_ptr, TD_PADDR(isoc_td)); 214 SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[MAX_FRAME_NUM], 215 QH_PADDR(uhcip->uhci_isoc_qh) | HC_QUEUE_HEAD); 216 } 217 218 219 /* 220 * uhci_allocate_pools: 221 * Allocate the system memory for the Queue Heads Descriptor and 222 * for the Transfer Descriptor (TD) pools. Both QH and TD structures 223 * must be aligned to a 16 byte boundary. 224 */ 225 int 226 uhci_allocate_pools(uhci_state_t *uhcip) 227 { 228 dev_info_t *dip = uhcip->uhci_dip; 229 size_t real_length; 230 int i, result; 231 uint_t ccount; 232 ddi_device_acc_attr_t dev_attr; 233 234 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 235 "uhci_allocate_pools:"); 236 237 /* The host controller will be little endian */ 238 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 239 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 240 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 241 242 /* Allocate the TD pool DMA handle */ 243 if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 0, 244 &uhcip->uhci_td_pool_dma_handle) != DDI_SUCCESS) { 245 246 return (USB_FAILURE); 247 } 248 249 /* Allocate the memory for the TD pool */ 250 if (ddi_dma_mem_alloc(uhcip->uhci_td_pool_dma_handle, 251 uhci_td_pool_size * sizeof (uhci_td_t), 252 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0, 253 (caddr_t *)&uhcip->uhci_td_pool_addr, &real_length, 254 &uhcip->uhci_td_pool_mem_handle)) { 255 256 return (USB_FAILURE); 257 } 258 259 /* Map the TD pool into the I/O address space */ 260 result = ddi_dma_addr_bind_handle(uhcip->uhci_td_pool_dma_handle, 261 NULL, (caddr_t)uhcip->uhci_td_pool_addr, real_length, 262 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 263 NULL, &uhcip->uhci_td_pool_cookie, &ccount); 264 265 bzero((void *)uhcip->uhci_td_pool_addr, 266 uhci_td_pool_size * sizeof (uhci_td_t)); 267 268 /* Process the result */ 269 if (result == DDI_DMA_MAPPED) { 270 /* The cookie count should be 1 */ 271 if (ccount != 1) { 272 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 273 "uhci_allocate_pools: More than 1 cookie"); 274 275 return (USB_FAILURE); 276 } 277 } else { 278 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 279 "uhci_allocate_pools: Result = %d", result); 280 281 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result); 282 283 return (USB_FAILURE); 284 } 285 286 uhcip->uhci_dma_addr_bind_flag |= UHCI_TD_POOL_BOUND; 287 288 /* Initialize the TD pool */ 289 for (i = 0; i < uhci_td_pool_size; i++) { 290 uhcip->uhci_td_pool_addr[i].flag = TD_FLAG_FREE; 291 } 292 293 /* Allocate the TD pool DMA handle */ 294 if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 295 0, &uhcip->uhci_qh_pool_dma_handle) != DDI_SUCCESS) { 296 297 return (USB_FAILURE); 298 } 299 300 /* Allocate the memory for the QH pool */ 301 if (ddi_dma_mem_alloc(uhcip->uhci_qh_pool_dma_handle, 302 uhci_qh_pool_size * sizeof (queue_head_t), 303 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0, 304 (caddr_t *)&uhcip->uhci_qh_pool_addr, &real_length, 305 &uhcip->uhci_qh_pool_mem_handle) != DDI_SUCCESS) { 306 307 return (USB_FAILURE); 308 } 309 310 result = ddi_dma_addr_bind_handle(uhcip->uhci_qh_pool_dma_handle, 311 NULL, (caddr_t)uhcip->uhci_qh_pool_addr, real_length, 312 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 313 &uhcip->uhci_qh_pool_cookie, &ccount); 314 315 /* Process the result */ 316 if (result == DDI_DMA_MAPPED) { 317 /* The cookie count should be 1 */ 318 if (ccount != 1) { 319 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 320 "uhci_allocate_pools: More than 1 cookie"); 321 322 return (USB_FAILURE); 323 } 324 } else { 325 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result); 326 327 return (USB_FAILURE); 328 } 329 330 uhcip->uhci_dma_addr_bind_flag |= UHCI_QH_POOL_BOUND; 331 332 bzero((void *)uhcip->uhci_qh_pool_addr, 333 uhci_qh_pool_size * sizeof (queue_head_t)); 334 335 /* Initialize the QH pool */ 336 for (i = 0; i < uhci_qh_pool_size; i ++) { 337 uhcip->uhci_qh_pool_addr[i].qh_flag = QUEUE_HEAD_FLAG_FREE; 338 } 339 340 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 341 "uhci_allocate_pools: Completed"); 342 343 return (USB_SUCCESS); 344 } 345 346 347 /* 348 * uhci_free_pools: 349 * Cleanup on attach failure or detach 350 */ 351 void 352 uhci_free_pools(uhci_state_t *uhcip) 353 { 354 int i, flag, rval; 355 uhci_td_t *td; 356 uhci_trans_wrapper_t *tw; 357 358 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 359 "uhci_free_pools:"); 360 361 if (uhcip->uhci_td_pool_addr && uhcip->uhci_td_pool_mem_handle) { 362 for (i = 0; i < uhci_td_pool_size; i ++) { 363 td = &uhcip->uhci_td_pool_addr[i]; 364 365 flag = uhcip->uhci_td_pool_addr[i].flag; 366 if ((flag != TD_FLAG_FREE) && 367 (flag != TD_FLAG_DUMMY) && (td->tw != NULL)) { 368 tw = td->tw; 369 uhci_free_tw(uhcip, tw); 370 } 371 372 } 373 374 if (uhcip->uhci_dma_addr_bind_flag & UHCI_TD_POOL_BOUND) { 375 rval = ddi_dma_unbind_handle( 376 uhcip->uhci_td_pool_dma_handle); 377 ASSERT(rval == DDI_SUCCESS); 378 } 379 380 ddi_dma_mem_free(&uhcip->uhci_td_pool_mem_handle); 381 } 382 383 /* Free the TD pool */ 384 if (uhcip->uhci_td_pool_dma_handle) { 385 ddi_dma_free_handle(&uhcip->uhci_td_pool_dma_handle); 386 } 387 388 if (uhcip->uhci_qh_pool_addr && uhcip->uhci_qh_pool_mem_handle) { 389 if (uhcip->uhci_dma_addr_bind_flag & UHCI_QH_POOL_BOUND) { 390 rval = ddi_dma_unbind_handle( 391 uhcip->uhci_qh_pool_dma_handle); 392 ASSERT(rval == DDI_SUCCESS); 393 } 394 ddi_dma_mem_free(&uhcip->uhci_qh_pool_mem_handle); 395 } 396 397 /* Free the QH pool */ 398 if (uhcip->uhci_qh_pool_dma_handle) { 399 ddi_dma_free_handle(&uhcip->uhci_qh_pool_dma_handle); 400 } 401 402 /* Free the Frame list Table area */ 403 if (uhcip->uhci_frame_lst_tablep && uhcip->uhci_flt_mem_handle) { 404 if (uhcip->uhci_dma_addr_bind_flag & UHCI_FLA_POOL_BOUND) { 405 rval = ddi_dma_unbind_handle( 406 uhcip->uhci_flt_dma_handle); 407 ASSERT(rval == DDI_SUCCESS); 408 } 409 ddi_dma_mem_free(&uhcip->uhci_flt_mem_handle); 410 } 411 412 if (uhcip->uhci_flt_dma_handle) { 413 ddi_dma_free_handle(&uhcip->uhci_flt_dma_handle); 414 } 415 } 416 417 418 /* 419 * uhci_decode_ddi_dma_addr_bind_handle_result: 420 * Process the return values of ddi_dma_addr_bind_handle() 421 */ 422 void 423 uhci_decode_ddi_dma_addr_bind_handle_result(uhci_state_t *uhcip, int result) 424 { 425 char *msg; 426 427 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 428 "uhci_decode_ddi_dma_addr_bind_handle_result:"); 429 430 switch (result) { 431 case DDI_DMA_PARTIAL_MAP: 432 msg = "Partial transfers not allowed"; 433 break; 434 case DDI_DMA_INUSE: 435 msg = "Handle is in use"; 436 break; 437 case DDI_DMA_NORESOURCES: 438 msg = "No resources"; 439 break; 440 case DDI_DMA_NOMAPPING: 441 msg = "No mapping"; 442 break; 443 case DDI_DMA_TOOBIG: 444 msg = "Object is too big"; 445 break; 446 default: 447 msg = "Unknown dma error"; 448 } 449 450 USB_DPRINTF_L4(PRINT_MASK_ALL, uhcip->uhci_log_hdl, "%s", msg); 451 } 452 453 454 /* 455 * uhci_init_ctlr: 456 * Initialize the Host Controller (HC). 457 */ 458 int 459 uhci_init_ctlr(uhci_state_t *uhcip) 460 { 461 dev_info_t *dip = uhcip->uhci_dip; 462 uint_t cmd_reg; 463 uint_t frame_base_addr; 464 465 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, "uhci_init_ctlr:"); 466 467 /* 468 * When USB legacy mode is enabled, the BIOS manages the USB keyboard 469 * attached to the UHCI controller. It has been observed that some 470 * times the BIOS does not clear the interrupts in the legacy mode 471 * register in the PCI configuration space. So, disable the SMI intrs 472 * and route the intrs to PIRQD here. 473 */ 474 pci_config_put16(uhcip->uhci_config_handle, 475 LEGACYMODE_REG_OFFSET, LEGACYMODE_REG_INIT_VALUE); 476 477 /* 478 * Disable all the interrupts. 479 */ 480 Set_OpReg16(USBINTR, DISABLE_ALL_INTRS); 481 482 mutex_enter(&uhcip->uhci_int_mutex); 483 cmd_reg = Get_OpReg16(USBCMD); 484 cmd_reg &= (~USBCMD_REG_HC_RUN); 485 486 /* Stop the controller */ 487 Set_OpReg16(USBCMD, cmd_reg); 488 489 /* Reset the host controller */ 490 Set_OpReg16(USBCMD, USBCMD_REG_GBL_RESET); 491 492 /* Wait 10ms for reset to complete */ 493 mutex_exit(&uhcip->uhci_int_mutex); 494 delay(drv_usectohz(UHCI_RESET_DELAY)); 495 mutex_enter(&uhcip->uhci_int_mutex); 496 497 Set_OpReg16(USBCMD, 0); 498 499 /* Set the frame number to zero */ 500 Set_OpReg16(FRNUM, 0); 501 502 /* Initialize the Frame list base address area */ 503 if (uhci_init_frame_lst_table(dip, uhcip) != USB_SUCCESS) { 504 mutex_exit(&uhcip->uhci_int_mutex); 505 506 return (USB_FAILURE); 507 } 508 509 /* Save the contents of the Frame Interval Registers */ 510 uhcip->uhci_frame_interval = Get_OpReg8(SOFMOD); 511 512 frame_base_addr = uhcip->uhci_flt_cookie.dmac_address; 513 514 /* Set the Frame list base address */ 515 Set_OpReg32(FRBASEADD, frame_base_addr); 516 517 /* 518 * Begin sending SOFs 519 * Set the Host Controller Functional State to Operational 520 */ 521 cmd_reg = Get_OpReg16(USBCMD); 522 cmd_reg |= (USBCMD_REG_HC_RUN | USBCMD_REG_MAXPKT_64 | 523 USBCMD_REG_CONFIG_FLAG); 524 525 Set_OpReg16(USBCMD, cmd_reg); 526 mutex_exit(&uhcip->uhci_int_mutex); 527 528 /* 529 * Verify the Command and interrupt enable registers, 530 * a sanity check whether actually initialized or not 531 */ 532 cmd_reg = Get_OpReg16(USBCMD); 533 534 if (!(cmd_reg & (USBCMD_REG_HC_RUN | USBCMD_REG_MAXPKT_64 | 535 USBCMD_REG_CONFIG_FLAG))) { 536 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 537 "uhci_init_ctlr: Controller initialization failed"); 538 539 return (USB_FAILURE); 540 } 541 542 /* 543 * Set the ioc bit of the isoc intr td. This enables 544 * the generation of an interrupt for every 1024 frames. 545 */ 546 SetTD_ioc(uhcip, uhcip->uhci_isoc_td, 1); 547 548 /* Set the flag that uhci controller has been initialized. */ 549 uhcip->uhci_ctlr_init_flag = B_TRUE; 550 551 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 552 "uhci_init_ctlr: Completed"); 553 554 return (USB_SUCCESS); 555 } 556 557 558 /* 559 * uhci_uninit_ctlr: 560 * uninitialize the Host Controller (HC). 561 */ 562 void 563 uhci_uninit_ctlr(uhci_state_t *uhcip) 564 { 565 if (uhcip->uhci_regs_handle) { 566 /* Disable all the interrupts. */ 567 Set_OpReg16(USBINTR, DISABLE_ALL_INTRS); 568 569 /* Complete the current transaction and then halt. */ 570 Set_OpReg16(USBCMD, 0); 571 572 /* Wait for sometime */ 573 mutex_exit(&uhcip->uhci_int_mutex); 574 delay(drv_usectohz(UHCI_TIMEWAIT)); 575 mutex_enter(&uhcip->uhci_int_mutex); 576 } 577 } 578 579 580 /* 581 * uhci_map_regs: 582 * The Host Controller (HC) contains a set of on-chip operational 583 * registers and which should be mapped into a non-cacheable 584 * portion of the system addressable space. 585 */ 586 int 587 uhci_map_regs(uhci_state_t *uhcip) 588 { 589 dev_info_t *dip = uhcip->uhci_dip; 590 int index; 591 uint32_t regs_prop_len; 592 int32_t *regs_list; 593 uint16_t command_reg; 594 ddi_device_acc_attr_t attr; 595 596 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, "uhci_map_regs:"); 597 598 /* The host controller will be little endian */ 599 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 600 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 601 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 602 603 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, uhcip->uhci_dip, 604 DDI_PROP_DONTPASS, "reg", ®s_list, ®s_prop_len) != 605 DDI_PROP_SUCCESS) { 606 607 return (USB_FAILURE); 608 } 609 610 for (index = 0; index * 5 < regs_prop_len; index++) { 611 if (regs_list[index * 5] & UHCI_PROP_MASK) { 612 break; 613 } 614 } 615 616 /* 617 * Deallocate the memory allocated by the ddi_prop_lookup_int_array 618 */ 619 ddi_prop_free(regs_list); 620 621 if (index * 5 >= regs_prop_len) { 622 623 return (USB_FAILURE); 624 } 625 626 /* Map in operational registers */ 627 if (ddi_regs_map_setup(dip, index, (caddr_t *)&uhcip->uhci_regsp, 628 0, sizeof (hc_regs_t), &attr, &uhcip->uhci_regs_handle) != 629 DDI_SUCCESS) { 630 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 631 "ddi_regs_map_setup: failed"); 632 633 return (USB_FAILURE); 634 } 635 636 if (pci_config_setup(dip, &uhcip->uhci_config_handle) != DDI_SUCCESS) { 637 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 638 "uhci_map_regs: Config error"); 639 640 return (USB_FAILURE); 641 } 642 643 /* Make sure Memory Access Enable and Master Enable are set */ 644 command_reg = pci_config_get16(uhcip->uhci_config_handle, 645 PCI_CONF_COMM); 646 if (!(command_reg & (PCI_COMM_MAE | PCI_COMM_ME))) { 647 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 648 "uhci_map_regs: No MAE/ME"); 649 } 650 651 command_reg |= PCI_COMM_MAE | PCI_COMM_ME; 652 pci_config_put16(uhcip->uhci_config_handle, PCI_CONF_COMM, command_reg); 653 654 /* 655 * Check whether I/O base address is configured and enabled. 656 */ 657 if (!(command_reg & PCI_COMM_IO)) { 658 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 659 "I/O Base address access disabled"); 660 661 return (USB_FAILURE); 662 } 663 /* 664 * Get the IO base address of the controller 665 */ 666 uhcip->uhci_iobase = (pci_config_get16(uhcip->uhci_config_handle, 667 PCI_CONF_IOBASE) & PCI_CONF_IOBASE_MASK); 668 669 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 670 "uhci_map_regs: Completed"); 671 672 return (USB_SUCCESS); 673 } 674 675 676 void 677 uhci_unmap_regs(uhci_state_t *uhcip) 678 { 679 /* Unmap the UHCI registers */ 680 if (uhcip->uhci_regs_handle) { 681 /* Reset the host controller */ 682 Set_OpReg16(USBCMD, USBCMD_REG_GBL_RESET); 683 684 ddi_regs_map_free(&uhcip->uhci_regs_handle); 685 } 686 687 if (uhcip->uhci_config_handle) { 688 pci_config_teardown(&uhcip->uhci_config_handle); 689 } 690 } 691 692 693 /* 694 * uhci_set_dma_attributes: 695 * Set the limits in the DMA attributes structure. Most of the values used 696 * in the DMA limit structres are the default values as specified by the 697 * Writing PCI device drivers document. 698 */ 699 void 700 uhci_set_dma_attributes(uhci_state_t *uhcip) 701 { 702 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 703 "uhci_set_dma_attributes:"); 704 705 /* Initialize the DMA attributes */ 706 uhcip->uhci_dma_attr.dma_attr_version = DMA_ATTR_V0; 707 uhcip->uhci_dma_attr.dma_attr_addr_lo = 0x00000000ull; 708 uhcip->uhci_dma_attr.dma_attr_addr_hi = 0xfffffff0ull; 709 710 /* 32 bit addressing */ 711 uhcip->uhci_dma_attr.dma_attr_count_max = 0xffffffull; 712 713 /* 714 * Setting the dam_att_align to 512, some times fails the 715 * binding handle. I dont know why ? But setting to 16 will 716 * be right for our case (16 byte alignment required per 717 * UHCI spec for TD descriptors). 718 */ 719 720 /* 16 byte alignment */ 721 uhcip->uhci_dma_attr.dma_attr_align = 0x10; 722 723 /* 724 * Since PCI specification is byte alignment, the 725 * burstsize field should be set to 1 for PCI devices. 726 */ 727 uhcip->uhci_dma_attr.dma_attr_burstsizes = 0x1; 728 729 uhcip->uhci_dma_attr.dma_attr_minxfer = 0x1; 730 uhcip->uhci_dma_attr.dma_attr_maxxfer = 0xffffffull; 731 uhcip->uhci_dma_attr.dma_attr_seg = 0xffffffffull; 732 uhcip->uhci_dma_attr.dma_attr_sgllen = 1; 733 uhcip->uhci_dma_attr.dma_attr_granular = 1; 734 uhcip->uhci_dma_attr.dma_attr_flags = 0; 735 } 736 737 738 uint_t 739 pow_2(uint_t x) 740 { 741 return ((x == 0) ? 1 : (1 << x)); 742 } 743 744 745 uint_t 746 log_2(uint_t x) 747 { 748 int ret_val = 0; 749 750 while (x != 1) { 751 ret_val++; 752 x = x >> 1; 753 } 754 755 return (ret_val); 756 } 757 758 759 /* 760 * uhci_obtain_state: 761 */ 762 uhci_state_t * 763 uhci_obtain_state(dev_info_t *dip) 764 { 765 int instance = ddi_get_instance(dip); 766 uhci_state_t *state = ddi_get_soft_state(uhci_statep, instance); 767 768 ASSERT(state != NULL); 769 770 return (state); 771 } 772 773 774 /* 775 * uhci_alloc_hcdi_ops: 776 * The HCDI interfaces or entry points are the software interfaces used by 777 * the Universal Serial Bus Driver (USBA) to access the services of the 778 * Host Controller Driver (HCD). During HCD initialization, inform USBA 779 * about all available HCDI interfaces or entry points. 780 */ 781 usba_hcdi_ops_t * 782 uhci_alloc_hcdi_ops(uhci_state_t *uhcip) 783 { 784 usba_hcdi_ops_t *hcdi_ops; 785 786 USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 787 "uhci_alloc_hcdi_ops:"); 788 789 hcdi_ops = usba_alloc_hcdi_ops(); 790 791 hcdi_ops->usba_hcdi_ops_version = HCDI_OPS_VERSION_1; 792 793 hcdi_ops->usba_hcdi_pipe_open = uhci_hcdi_pipe_open; 794 hcdi_ops->usba_hcdi_pipe_close = uhci_hcdi_pipe_close; 795 hcdi_ops->usba_hcdi_pipe_reset = uhci_hcdi_pipe_reset; 796 797 hcdi_ops->usba_hcdi_pipe_ctrl_xfer = uhci_hcdi_pipe_ctrl_xfer; 798 hcdi_ops->usba_hcdi_pipe_bulk_xfer = uhci_hcdi_pipe_bulk_xfer; 799 hcdi_ops->usba_hcdi_pipe_intr_xfer = uhci_hcdi_pipe_intr_xfer; 800 hcdi_ops->usba_hcdi_pipe_isoc_xfer = uhci_hcdi_pipe_isoc_xfer; 801 802 hcdi_ops->usba_hcdi_bulk_transfer_size = uhci_hcdi_bulk_transfer_size; 803 hcdi_ops->usba_hcdi_pipe_stop_intr_polling = 804 uhci_hcdi_pipe_stop_intr_polling; 805 hcdi_ops->usba_hcdi_pipe_stop_isoc_polling = 806 uhci_hcdi_pipe_stop_isoc_polling; 807 808 hcdi_ops->usba_hcdi_get_current_frame_number = 809 uhci_hcdi_get_current_frame_number; 810 hcdi_ops->usba_hcdi_get_max_isoc_pkts = uhci_hcdi_get_max_isoc_pkts; 811 812 hcdi_ops->usba_hcdi_console_input_init = uhci_hcdi_polled_input_init; 813 hcdi_ops->usba_hcdi_console_input_enter = uhci_hcdi_polled_input_enter; 814 hcdi_ops->usba_hcdi_console_read = uhci_hcdi_polled_read; 815 hcdi_ops->usba_hcdi_console_input_exit = uhci_hcdi_polled_input_exit; 816 hcdi_ops->usba_hcdi_console_input_fini = uhci_hcdi_polled_input_fini; 817 818 hcdi_ops->usba_hcdi_console_output_init = uhci_hcdi_polled_output_init; 819 hcdi_ops->usba_hcdi_console_output_enter = 820 uhci_hcdi_polled_output_enter; 821 hcdi_ops->usba_hcdi_console_write = uhci_hcdi_polled_write; 822 hcdi_ops->usba_hcdi_console_output_exit = uhci_hcdi_polled_output_exit; 823 hcdi_ops->usba_hcdi_console_output_fini = uhci_hcdi_polled_output_fini; 824 825 return (hcdi_ops); 826 } 827 828 829 /* 830 * uhci_init_frame_lst_table : 831 * Allocate the system memory and initialize Host Controller 832 * Frame list table area The starting of the Frame list Table 833 * area must be 4096 byte aligned. 834 */ 835 static int 836 uhci_init_frame_lst_table(dev_info_t *dip, uhci_state_t *uhcip) 837 { 838 int result; 839 uint_t ccount; 840 size_t real_length; 841 ddi_device_acc_attr_t dev_attr; 842 843 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 844 845 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 846 "uhci_init_frame_lst_table:"); 847 848 /* The host controller will be little endian */ 849 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 850 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 851 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 852 853 /* 4K alignment required */ 854 uhcip->uhci_dma_attr.dma_attr_align = 0x1000; 855 856 /* Create space for the HCCA block */ 857 if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 858 0, &uhcip->uhci_flt_dma_handle) != DDI_SUCCESS) { 859 860 return (USB_FAILURE); 861 } 862 863 /* Reset to default 16 bytes */ 864 uhcip->uhci_dma_attr.dma_attr_align = 0x10; 865 866 if (ddi_dma_mem_alloc(uhcip->uhci_flt_dma_handle, 867 SIZE_OF_FRAME_LST_TABLE, &dev_attr, DDI_DMA_CONSISTENT, 868 DDI_DMA_SLEEP, 0, (caddr_t *)&uhcip->uhci_frame_lst_tablep, 869 &real_length, &uhcip->uhci_flt_mem_handle)) { 870 871 return (USB_FAILURE); 872 } 873 874 /* Map the whole Frame list base area into the I/O address space */ 875 result = ddi_dma_addr_bind_handle(uhcip->uhci_flt_dma_handle, 876 NULL, (caddr_t)uhcip->uhci_frame_lst_tablep, real_length, 877 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 878 &uhcip->uhci_flt_cookie, &ccount); 879 880 if (result == DDI_DMA_MAPPED) { 881 /* The cookie count should be 1 */ 882 if (ccount != 1) { 883 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 884 "uhci_init_frame_list_table: More than 1 cookie"); 885 886 return (USB_FAILURE); 887 } 888 } else { 889 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result); 890 891 return (USB_FAILURE); 892 } 893 894 uhcip->uhci_dma_addr_bind_flag |= UHCI_FLA_POOL_BOUND; 895 896 bzero((void *)uhcip->uhci_frame_lst_tablep, real_length); 897 898 /* Initialize the interrupt lists */ 899 uhci_build_interrupt_lattice(uhcip); 900 901 return (USB_SUCCESS); 902 } 903 904 905 /* 906 * uhci_alloc_queue_head: 907 * Allocate a queue head 908 */ 909 queue_head_t * 910 uhci_alloc_queue_head(uhci_state_t *uhcip) 911 { 912 int index; 913 uhci_td_t *dummy_td; 914 queue_head_t *queue_head; 915 916 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 917 "uhci_alloc_queue_head"); 918 919 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 920 921 /* Allocate a dummy td first. */ 922 if ((dummy_td = uhci_allocate_td_from_pool(uhcip)) == NULL) { 923 924 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 925 "uhci_alloc_queue_head: allocate td from pool failed"); 926 927 return (NULL); 928 } 929 930 /* 931 * The first 63 queue heads in the Queue Head (QH) 932 * buffer pool are reserved for building interrupt lattice 933 * tree. Search for a blank Queue head in the QH buffer pool. 934 */ 935 for (index = NUM_STATIC_NODES; index < uhci_qh_pool_size; index++) { 936 if (uhcip->uhci_qh_pool_addr[index].qh_flag == 937 QUEUE_HEAD_FLAG_FREE) { 938 break; 939 } 940 } 941 942 USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 943 "uhci_alloc_queue_head: Allocated %d", index); 944 945 if (index == uhci_qh_pool_size) { 946 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 947 "uhci_alloc_queue_head: All QH exhausted"); 948 949 /* Free the dummy td allocated for this qh. */ 950 dummy_td->flag = TD_FLAG_FREE; 951 952 return (NULL); 953 } 954 955 queue_head = &uhcip->uhci_qh_pool_addr[index]; 956 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 957 "uhci_alloc_queue_head: Allocated address 0x%p", queue_head); 958 959 bzero((void *)queue_head, sizeof (queue_head_t)); 960 SetQH32(uhcip, queue_head->link_ptr, HC_END_OF_LIST); 961 SetQH32(uhcip, queue_head->element_ptr, HC_END_OF_LIST); 962 queue_head->prev_qh = NULL; 963 queue_head->qh_flag = QUEUE_HEAD_FLAG_BUSY; 964 965 bzero((char *)dummy_td, sizeof (uhci_td_t)); 966 queue_head->td_tailp = dummy_td; 967 SetQH32(uhcip, queue_head->element_ptr, TD_PADDR(dummy_td)); 968 969 return (queue_head); 970 } 971 972 973 /* 974 * uhci_allocate_bandwidth: 975 * Figure out whether or not this interval may be supported. Return 976 * the index into the lattice if it can be supported. Return 977 * allocation failure if it can not be supported. 978 */ 979 int 980 uhci_allocate_bandwidth( 981 uhci_state_t *uhcip, 982 usba_pipe_handle_data_t *pipe_handle, 983 uint_t *node) 984 { 985 int bandwidth; /* Requested bandwidth */ 986 uint_t min, min_index; 987 uint_t i; 988 uint_t height; /* Bandwidth's height in the tree */ 989 uint_t leftmost; 990 uint_t length; 991 uint32_t paddr; 992 queue_head_t *tmp_qh; 993 usb_ep_descr_t *endpoint = &pipe_handle->p_ep; 994 995 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 996 997 /* 998 * Calculate the length in bytes of a transaction on this 999 * periodic endpoint. 1000 */ 1001 mutex_enter(&pipe_handle->p_usba_device->usb_mutex); 1002 1003 length = uhci_compute_total_bandwidth(endpoint, 1004 pipe_handle->p_usba_device->usb_port_status); 1005 mutex_exit(&pipe_handle->p_usba_device->usb_mutex); 1006 1007 /* 1008 * If the length in bytes plus the allocated bandwidth exceeds 1009 * the maximum, return bandwidth allocation failure. 1010 */ 1011 if ((length + uhcip->uhci_bandwidth_intr_min + 1012 uhcip->uhci_bandwidth_isoch_sum) > (MAX_PERIODIC_BANDWIDTH)) { 1013 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1014 "uhci_allocate_bandwidth: " 1015 "Reached maximum bandwidth value and cannot allocate " 1016 "bandwidth for a given Interrupt/Isoch endpoint"); 1017 1018 return (USB_NO_BANDWIDTH); 1019 } 1020 1021 /* 1022 * ISOC xfers are not supported at this point type 1023 */ 1024 if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_ISOCH) { 1025 uhcip->uhci_bandwidth_isoch_sum += length; 1026 1027 return (USB_SUCCESS); 1028 } 1029 1030 /* 1031 * This is an interrupt endpoint. 1032 * Adjust bandwidth to be a power of 2 1033 */ 1034 mutex_enter(&pipe_handle->p_usba_device->usb_mutex); 1035 bandwidth = uhci_bandwidth_adjust(uhcip, endpoint, 1036 pipe_handle->p_usba_device->usb_port_status); 1037 mutex_exit(&pipe_handle->p_usba_device->usb_mutex); 1038 1039 /* 1040 * If this bandwidth can't be supported, 1041 * return allocation failure. 1042 */ 1043 if (bandwidth == USB_FAILURE) { 1044 1045 return (USB_FAILURE); 1046 } 1047 1048 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1049 "The new bandwidth is %d", bandwidth); 1050 1051 /* Find the leaf with the smallest allocated bandwidth */ 1052 min_index = 0; 1053 min = uhcip->uhci_bandwidth[0]; 1054 1055 for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) { 1056 if (uhcip->uhci_bandwidth[i] < min) { 1057 min_index = i; 1058 min = uhcip->uhci_bandwidth[i]; 1059 } 1060 } 1061 1062 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1063 "The leaf with minimal bandwidth %d, " 1064 "The smallest bandwidth %d", min_index, min); 1065 1066 /* 1067 * Find the index into the lattice given the 1068 * leaf with the smallest allocated bandwidth. 1069 */ 1070 height = uhci_lattice_height(bandwidth); 1071 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1072 "The height is %d", height); 1073 1074 *node = uhci_tree_bottom_nodes[min_index]; 1075 1076 /* check if there are isocs TDs scheduled for this frame */ 1077 if (uhcip->uhci_isoc_q_tailp[*node]) { 1078 paddr = (uhcip->uhci_isoc_q_tailp[*node]->link_ptr & 1079 FRAME_LST_PTR_MASK); 1080 } else { 1081 paddr = (uhcip->uhci_frame_lst_tablep[*node] & 1082 FRAME_LST_PTR_MASK); 1083 } 1084 1085 tmp_qh = QH_VADDR(paddr); 1086 *node = tmp_qh->node; 1087 for (i = 0; i < height; i++) { 1088 *node = uhci_lattice_parent(*node); 1089 } 1090 1091 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1092 "The real node is %d", *node); 1093 1094 /* 1095 * Find the leftmost leaf in the subtree specified by the node. 1096 */ 1097 leftmost = uhci_leftmost_leaf(*node, height); 1098 USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl, 1099 "Leftmost %d", leftmost); 1100 1101 for (i = leftmost; i < leftmost + 1102 (NUM_FRAME_LST_ENTRIES/bandwidth); i ++) { 1103 1104 if ((length + uhcip->uhci_bandwidth_isoch_sum + 1105 uhcip->uhci_bandwidth[i]) > MAX_PERIODIC_BANDWIDTH) { 1106 1107 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1108 "uhci_allocate_bandwidth: " 1109 "Reached maximum bandwidth value and cannot " 1110 "allocate bandwidth for Interrupt endpoint"); 1111 1112 return (USB_NO_BANDWIDTH); 1113 } 1114 } 1115 1116 /* 1117 * All the leaves for this node must be updated with the bandwidth. 1118 */ 1119 for (i = leftmost; i < leftmost + 1120 (NUM_FRAME_LST_ENTRIES/bandwidth); i ++) { 1121 uhcip->uhci_bandwidth[i] += length; 1122 } 1123 1124 /* Find the leaf with the smallest allocated bandwidth */ 1125 min_index = 0; 1126 min = uhcip->uhci_bandwidth[0]; 1127 1128 for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) { 1129 if (uhcip->uhci_bandwidth[i] < min) { 1130 min_index = i; 1131 min = uhcip->uhci_bandwidth[i]; 1132 } 1133 } 1134 1135 /* Save the minimum for later use */ 1136 uhcip->uhci_bandwidth_intr_min = min; 1137 1138 return (USB_SUCCESS); 1139 } 1140 1141 1142 /* 1143 * uhci_deallocate_bandwidth: 1144 * Deallocate bandwidth for the given node in the lattice 1145 * and the length of transfer. 1146 */ 1147 void 1148 uhci_deallocate_bandwidth(uhci_state_t *uhcip, 1149 usba_pipe_handle_data_t *pipe_handle) 1150 { 1151 uint_t bandwidth; 1152 uint_t height; 1153 uint_t leftmost; 1154 uint_t i; 1155 uint_t min; 1156 usb_ep_descr_t *endpoint = &pipe_handle->p_ep; 1157 uint_t node, length; 1158 uhci_pipe_private_t *pp = 1159 (uhci_pipe_private_t *)pipe_handle->p_hcd_private; 1160 1161 /* This routine is protected by the uhci_int_mutex */ 1162 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1163 1164 /* Obtain the length */ 1165 mutex_enter(&pipe_handle->p_usba_device->usb_mutex); 1166 length = uhci_compute_total_bandwidth(endpoint, 1167 pipe_handle->p_usba_device->usb_port_status); 1168 mutex_exit(&pipe_handle->p_usba_device->usb_mutex); 1169 1170 /* 1171 * If this is an isochronous endpoint, just delete endpoint's 1172 * bandwidth from the total allocated isochronous bandwidth. 1173 */ 1174 if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_ISOCH) { 1175 uhcip->uhci_bandwidth_isoch_sum -= length; 1176 1177 return; 1178 } 1179 1180 /* Obtain the node */ 1181 node = pp->pp_node; 1182 1183 /* Adjust bandwidth to be a power of 2 */ 1184 mutex_enter(&pipe_handle->p_usba_device->usb_mutex); 1185 bandwidth = uhci_bandwidth_adjust(uhcip, endpoint, 1186 pipe_handle->p_usba_device->usb_port_status); 1187 mutex_exit(&pipe_handle->p_usba_device->usb_mutex); 1188 1189 /* Find the height in the tree */ 1190 height = uhci_lattice_height(bandwidth); 1191 1192 /* 1193 * Find the leftmost leaf in the subtree specified by the node 1194 */ 1195 leftmost = uhci_leftmost_leaf(node, height); 1196 1197 /* Delete the bandwith from the appropriate lists */ 1198 for (i = leftmost; i < leftmost + (NUM_FRAME_LST_ENTRIES/bandwidth); 1199 i ++) { 1200 uhcip->uhci_bandwidth[i] -= length; 1201 } 1202 1203 min = uhcip->uhci_bandwidth[0]; 1204 1205 /* Recompute the minimum */ 1206 for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) { 1207 if (uhcip->uhci_bandwidth[i] < min) { 1208 min = uhcip->uhci_bandwidth[i]; 1209 } 1210 } 1211 1212 /* Save the minimum for later use */ 1213 uhcip->uhci_bandwidth_intr_min = min; 1214 } 1215 1216 1217 /* 1218 * uhci_compute_total_bandwidth: 1219 * 1220 * Given a periodic endpoint (interrupt or isochronous) determine the total 1221 * bandwidth for one transaction. The UHCI host controller traverses the 1222 * endpoint descriptor lists on a first-come-first-serve basis. When the HC 1223 * services an endpoint, only a single transaction attempt is made. The HC 1224 * moves to the next Endpoint Descriptor after the first transaction attempt 1225 * rather than finishing the entire Transfer Descriptor. Therefore, when a 1226 * Transfer Descriptor is inserted into the lattice, we will only count the 1227 * number of bytes for one transaction. 1228 * 1229 * The following are the formulas used for calculating bandwidth in terms 1230 * bytes and it is for the single USB full speed and low speed transaction 1231 * respectively. The protocol overheads will be different for each of type 1232 * of USB transfer and all these formulas & protocol overheads are derived 1233 * from the 5.9.3 section of USB Specification & with the help of Bandwidth 1234 * Analysis white paper which is posted on the USB developer forum. 1235 * 1236 * Full-Speed: 1237 * Protocol overhead + ((MaxPacketSize * 7)/6 ) + Host_Delay 1238 * 1239 * Low-Speed: 1240 * Protocol overhead + Hub LS overhead + 1241 * (Low-Speed clock * ((MaxPacketSize * 7)/6 )) + Host_Delay 1242 */ 1243 static uint_t 1244 uhci_compute_total_bandwidth(usb_ep_descr_t *endpoint, 1245 usb_port_status_t port_status) 1246 { 1247 uint_t bandwidth; 1248 ushort_t MaxPacketSize = endpoint->wMaxPacketSize; 1249 1250 /* Add Host Controller specific delay to required bandwidth */ 1251 bandwidth = HOST_CONTROLLER_DELAY; 1252 1253 /* Add bit-stuffing overhead */ 1254 MaxPacketSize = (ushort_t)((MaxPacketSize * 7) / 6); 1255 1256 /* Low Speed interrupt transaction */ 1257 if (port_status == USBA_LOW_SPEED_DEV) { 1258 /* Low Speed interrupt transaction */ 1259 bandwidth += (LOW_SPEED_PROTO_OVERHEAD + 1260 HUB_LOW_SPEED_PROTO_OVERHEAD + 1261 (LOW_SPEED_CLOCK * MaxPacketSize)); 1262 } else { 1263 /* Full Speed transaction */ 1264 bandwidth += MaxPacketSize; 1265 1266 if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_INTR) { 1267 /* Full Speed interrupt transaction */ 1268 bandwidth += FS_NON_ISOC_PROTO_OVERHEAD; 1269 } else { 1270 /* Isochronus and input transaction */ 1271 if (UHCI_XFER_DIR(endpoint) == USB_EP_DIR_IN) { 1272 bandwidth += FS_ISOC_INPUT_PROTO_OVERHEAD; 1273 } else { 1274 /* Isochronus and output transaction */ 1275 bandwidth += FS_ISOC_OUTPUT_PROTO_OVERHEAD; 1276 } 1277 } 1278 } 1279 1280 return (bandwidth); 1281 } 1282 1283 1284 /* 1285 * uhci_bandwidth_adjust: 1286 */ 1287 static int 1288 uhci_bandwidth_adjust( 1289 uhci_state_t *uhcip, 1290 usb_ep_descr_t *endpoint, 1291 usb_port_status_t port_status) 1292 { 1293 int i = 0; 1294 uint_t interval; 1295 1296 /* 1297 * Get the polling interval from the endpoint descriptor 1298 */ 1299 interval = endpoint->bInterval; 1300 1301 /* 1302 * The bInterval value in the endpoint descriptor can range 1303 * from 1 to 255ms. The interrupt lattice has 32 leaf nodes, 1304 * and the host controller cycles through these nodes every 1305 * 32ms. The longest polling interval that the controller 1306 * supports is 32ms. 1307 */ 1308 1309 /* 1310 * Return an error if the polling interval is less than 1ms 1311 * and greater than 255ms 1312 */ 1313 if ((interval < MIN_POLL_INTERVAL) || (interval > MAX_POLL_INTERVAL)) { 1314 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1315 "uhci_bandwidth_adjust: Endpoint's poll interval must be " 1316 "between %d and %d ms", MIN_POLL_INTERVAL, 1317 MAX_POLL_INTERVAL); 1318 1319 return (USB_FAILURE); 1320 } 1321 1322 /* 1323 * According USB Specifications, a full-speed endpoint can 1324 * specify a desired polling interval 1ms to 255ms and a low 1325 * speed endpoints are limited to specifying only 10ms to 1326 * 255ms. But some old keyboards & mice uses polling interval 1327 * of 8ms. For compatibility purpose, we are using polling 1328 * interval between 8ms & 255ms for low speed endpoints. 1329 */ 1330 if ((port_status == USBA_LOW_SPEED_DEV) && 1331 (interval < MIN_LOW_SPEED_POLL_INTERVAL)) { 1332 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1333 "uhci_bandwidth_adjust: Low speed endpoint's poll interval " 1334 "must be >= %d ms, adjusted", 1335 MIN_LOW_SPEED_POLL_INTERVAL); 1336 1337 interval = MIN_LOW_SPEED_POLL_INTERVAL; 1338 } 1339 1340 /* 1341 * If polling interval is greater than 32ms, 1342 * adjust polling interval equal to 32ms. 1343 */ 1344 if (interval > 32) { 1345 interval = 32; 1346 } 1347 1348 /* 1349 * Find the nearest power of 2 that's less 1350 * than interval. 1351 */ 1352 while ((pow_2(i)) <= interval) { 1353 i++; 1354 } 1355 1356 return (pow_2((i - 1))); 1357 } 1358 1359 1360 /* 1361 * uhci_lattice_height: 1362 * Given the requested bandwidth, find the height in the tree at 1363 * which the nodes for this bandwidth fall. The height is measured 1364 * as the number of nodes from the leaf to the level specified by 1365 * bandwidth The root of the tree is at height TREE_HEIGHT. 1366 */ 1367 static uint_t 1368 uhci_lattice_height(uint_t bandwidth) 1369 { 1370 return (TREE_HEIGHT - (log_2(bandwidth))); 1371 } 1372 1373 1374 static uint_t 1375 uhci_lattice_parent(uint_t node) 1376 { 1377 return (((node % 2) == 0) ? ((node/2) - 1) : (node/2)); 1378 } 1379 1380 1381 /* 1382 * uhci_leftmost_leaf: 1383 * Find the leftmost leaf in the subtree specified by the node. 1384 * Height refers to number of nodes from the bottom of the tree 1385 * to the node, including the node. 1386 */ 1387 static uint_t 1388 uhci_leftmost_leaf(uint_t node, uint_t height) 1389 { 1390 node = pow_2(height + VIRTUAL_TREE_HEIGHT) * (node+1) - 1391 NUM_FRAME_LST_ENTRIES; 1392 return (node); 1393 } 1394 1395 1396 /* 1397 * uhci_insert_qh: 1398 * Add the Queue Head (QH) into the Host Controller's (HC) 1399 * appropriate queue head list. 1400 */ 1401 void 1402 uhci_insert_qh(uhci_state_t *uhcip, usba_pipe_handle_data_t *ph) 1403 { 1404 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 1405 1406 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1407 "uhci_insert_qh:"); 1408 1409 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1410 1411 switch (UHCI_XFER_TYPE(&ph->p_ep)) { 1412 case USB_EP_ATTR_CONTROL: 1413 uhci_insert_ctrl_qh(uhcip, pp); 1414 break; 1415 case USB_EP_ATTR_BULK: 1416 uhci_insert_bulk_qh(uhcip, pp); 1417 break; 1418 case USB_EP_ATTR_INTR: 1419 uhci_insert_intr_qh(uhcip, pp); 1420 break; 1421 case USB_EP_ATTR_ISOCH: 1422 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 1423 "uhci_insert_qh: Illegal request"); 1424 break; 1425 } 1426 } 1427 1428 1429 /* 1430 * uhci_insert_ctrl_qh: 1431 * Insert a control QH into the Host Controller's (HC) control QH list. 1432 */ 1433 static void 1434 uhci_insert_ctrl_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 1435 { 1436 queue_head_t *qh = pp->pp_qh; 1437 1438 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1439 "uhci_insert_ctrl_qh:"); 1440 1441 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1442 1443 if (uhcip->uhci_ctrl_xfers_q_head == uhcip->uhci_ctrl_xfers_q_tail) { 1444 uhcip->uhci_ctrl_xfers_q_head->prev_qh = UHCI_INVALID_PTR; 1445 } 1446 1447 SetQH32(uhcip, qh->link_ptr, 1448 GetQH32(uhcip, uhcip->uhci_ctrl_xfers_q_tail->link_ptr)); 1449 qh->prev_qh = uhcip->uhci_ctrl_xfers_q_tail; 1450 SetQH32(uhcip, uhcip->uhci_ctrl_xfers_q_tail->link_ptr, 1451 QH_PADDR(qh) | HC_QUEUE_HEAD); 1452 uhcip->uhci_ctrl_xfers_q_tail = qh; 1453 1454 } 1455 1456 1457 /* 1458 * uhci_insert_bulk_qh: 1459 * Insert a bulk QH into the Host Controller's (HC) bulk QH list. 1460 */ 1461 static void 1462 uhci_insert_bulk_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 1463 { 1464 queue_head_t *qh = pp->pp_qh; 1465 1466 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1467 "uhci_insert_bulk_qh:"); 1468 1469 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1470 1471 if (uhcip->uhci_bulk_xfers_q_head == uhcip->uhci_bulk_xfers_q_tail) { 1472 uhcip->uhci_bulk_xfers_q_head->prev_qh = UHCI_INVALID_PTR; 1473 } else if (uhcip->uhci_bulk_xfers_q_head->link_ptr == 1474 uhcip->uhci_bulk_xfers_q_tail->link_ptr) { 1475 1476 /* If there is already a loop, we should keep the loop. */ 1477 qh->link_ptr = uhcip->uhci_bulk_xfers_q_tail->link_ptr; 1478 } 1479 1480 qh->prev_qh = uhcip->uhci_bulk_xfers_q_tail; 1481 SetQH32(uhcip, uhcip->uhci_bulk_xfers_q_tail->link_ptr, 1482 QH_PADDR(qh) | HC_QUEUE_HEAD); 1483 uhcip->uhci_bulk_xfers_q_tail = qh; 1484 } 1485 1486 1487 /* 1488 * uhci_insert_intr_qh: 1489 * Insert a periodic Queue head i.e Interrupt queue head into the 1490 * Host Controller's (HC) interrupt lattice tree. 1491 */ 1492 static void 1493 uhci_insert_intr_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 1494 { 1495 uint_t node = pp->pp_node; /* The appropriate node was */ 1496 /* found during the opening */ 1497 /* of the pipe. */ 1498 queue_head_t *qh = pp->pp_qh; 1499 queue_head_t *next_lattice_qh, *lattice_qh; 1500 1501 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1502 "uhci_insert_intr_qh:"); 1503 1504 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1505 1506 /* Find the lattice queue head */ 1507 lattice_qh = &uhcip->uhci_qh_pool_addr[node]; 1508 next_lattice_qh = 1509 QH_VADDR(GetQH32(uhcip, lattice_qh->link_ptr) & QH_LINK_PTR_MASK); 1510 1511 next_lattice_qh->prev_qh = qh; 1512 qh->link_ptr = lattice_qh->link_ptr; 1513 qh->prev_qh = lattice_qh; 1514 SetQH32(uhcip, lattice_qh->link_ptr, QH_PADDR(qh) | HC_QUEUE_HEAD); 1515 pp->pp_data_toggle = 0; 1516 } 1517 1518 1519 /* 1520 * uhci_insert_intr_td: 1521 * Create a TD and a data buffer for an interrupt endpoint. 1522 */ 1523 int 1524 uhci_insert_intr_td( 1525 uhci_state_t *uhcip, 1526 usba_pipe_handle_data_t *ph, 1527 usb_intr_req_t *req, 1528 usb_flags_t flags) 1529 { 1530 int error, pipe_dir; 1531 uint_t length, mps; 1532 uint32_t buf_offs; 1533 uhci_td_t *tmp_td; 1534 usb_intr_req_t *intr_reqp; 1535 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 1536 uhci_trans_wrapper_t *tw; 1537 1538 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1539 "uhci_insert_intr_td: req: 0x%p", req); 1540 1541 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1542 1543 /* Get the interrupt pipe direction */ 1544 pipe_dir = UHCI_XFER_DIR(&ph->p_ep); 1545 1546 /* Get the current interrupt request pointer */ 1547 if (req) { 1548 length = req->intr_len; 1549 } else { 1550 ASSERT(pipe_dir == USB_EP_DIR_IN); 1551 length = (pp->pp_client_periodic_in_reqp) ? 1552 (((usb_intr_req_t *)pp-> 1553 pp_client_periodic_in_reqp)->intr_len) : 1554 ph->p_ep.wMaxPacketSize; 1555 } 1556 1557 /* Check the size of interrupt request */ 1558 if (length > UHCI_MAX_TD_XFER_SIZE) { 1559 1560 /* the length shouldn't exceed 8K */ 1561 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1562 "uhci_insert_intr_td: Intr request size 0x%lx is " 1563 "more than 0x%x", length, UHCI_MAX_TD_XFER_SIZE); 1564 1565 return (USB_INVALID_REQUEST); 1566 } 1567 1568 USB_DPRINTF_L3(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1569 "uhci_insert_intr_td: length: 0x%lx", length); 1570 1571 /* Allocate a transaction wrapper */ 1572 if ((tw = uhci_create_transfer_wrapper(uhcip, pp, length, flags)) == 1573 NULL) { 1574 1575 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1576 "uhci_insert_intr_td: TW allocation failed"); 1577 1578 return (USB_NO_RESOURCES); 1579 } 1580 1581 /* 1582 * Initialize the callback and any callback 1583 * data for when the td completes. 1584 */ 1585 tw->tw_handle_td = uhci_handle_intr_td; 1586 tw->tw_handle_callback_value = NULL; 1587 tw->tw_direction = (pipe_dir == USB_EP_DIR_OUT) ? 1588 PID_OUT : PID_IN; 1589 tw->tw_curr_xfer_reqp = (usb_opaque_t)req; 1590 1591 /* 1592 * If it is an Interrupt IN request and interrupt request is NULL, 1593 * allocate the usb interrupt request structure for the current 1594 * interrupt polling request. 1595 */ 1596 if (tw->tw_direction == PID_IN) { 1597 if ((error = uhci_allocate_periodic_in_resource(uhcip, 1598 pp, tw, flags)) != USB_SUCCESS) { 1599 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1600 "uhci_insert_intr_td: Interrupt request structure " 1601 "allocation failed"); 1602 1603 /* free the transfer wrapper */ 1604 uhci_deallocate_tw(uhcip, pp, tw); 1605 1606 return (error); 1607 } 1608 } 1609 1610 intr_reqp = (usb_intr_req_t *)tw->tw_curr_xfer_reqp; 1611 ASSERT(tw->tw_curr_xfer_reqp != NULL); 1612 1613 tw->tw_timeout_cnt = (intr_reqp->intr_attributes & USB_ATTRS_ONE_XFER) ? 1614 intr_reqp->intr_timeout : 0; 1615 1616 /* DATA IN */ 1617 if (tw->tw_direction == PID_IN) { 1618 /* Insert the td onto the queue head */ 1619 error = uhci_insert_hc_td(uhcip, 0, 1620 length, pp, tw, PID_IN, intr_reqp->intr_attributes); 1621 1622 if (error != USB_SUCCESS) { 1623 1624 uhci_deallocate_periodic_in_resource(uhcip, pp, tw); 1625 /* free the transfer wrapper */ 1626 uhci_deallocate_tw(uhcip, pp, tw); 1627 1628 return (USB_NO_RESOURCES); 1629 } 1630 tw->tw_bytes_xfered = 0; 1631 1632 return (USB_SUCCESS); 1633 } 1634 1635 /* DATA OUT */ 1636 ASSERT(req->intr_data != NULL); 1637 1638 /* Copy the data into the message */ 1639 ddi_rep_put8(tw->tw_accesshandle, req->intr_data->b_rptr, 1640 (uint8_t *)tw->tw_buf, req->intr_len, DDI_DEV_AUTOINCR); 1641 1642 /* set tw->tw_claim flag, so that nobody else works on this tw. */ 1643 tw->tw_claim = UHCI_INTR_HDLR_CLAIMED; 1644 1645 mps = ph->p_ep.wMaxPacketSize; 1646 buf_offs = 0; 1647 1648 /* Insert tds onto the queue head */ 1649 while (length > 0) { 1650 1651 error = uhci_insert_hc_td(uhcip, buf_offs, 1652 (length > mps) ? mps : length, 1653 pp, tw, PID_OUT, 1654 intr_reqp->intr_attributes); 1655 1656 if (error != USB_SUCCESS) { 1657 /* no resource. */ 1658 break; 1659 } 1660 1661 if (length <= mps) { 1662 /* inserted all data. */ 1663 length = 0; 1664 1665 } else { 1666 1667 buf_offs += mps; 1668 length -= mps; 1669 } 1670 } 1671 1672 if (error != USB_SUCCESS) { 1673 1674 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 1675 "uhci_insert_intr_td: allocate td failed, free resource"); 1676 1677 /* remove all the tds */ 1678 while (tw->tw_hctd_head != NULL) { 1679 uhci_delete_td(uhcip, tw->tw_hctd_head); 1680 } 1681 1682 tw->tw_claim = UHCI_NOT_CLAIMED; 1683 uhci_deallocate_tw(uhcip, pp, tw); 1684 1685 return (error); 1686 } 1687 1688 /* allow HC to xfer the tds of this tw */ 1689 tmp_td = tw->tw_hctd_head; 1690 while (tmp_td != NULL) { 1691 1692 SetTD_status(uhcip, tmp_td, UHCI_TD_ACTIVE); 1693 tmp_td = tmp_td->tw_td_next; 1694 } 1695 1696 tw->tw_bytes_xfered = 0; 1697 tw->tw_claim = UHCI_NOT_CLAIMED; 1698 1699 return (error); 1700 } 1701 1702 1703 /* 1704 * uhci_create_transfer_wrapper: 1705 * Create a Transaction Wrapper (TW) for non-isoc transfer types. 1706 * This involves the allocating of DMA resources. 1707 * 1708 * For non-isoc transfers, one DMA handle and one DMA buffer are 1709 * allocated per transfer. The DMA buffer may contain multiple 1710 * DMA cookies and the cookies should meet certain alignment 1711 * requirement to be able to fit in the multiple TDs. The alignment 1712 * needs to ensure: 1713 * 1. the size of a cookie be larger than max TD length (0x500) 1714 * 2. the size of a cookie be a multiple of wMaxPacketSize of the 1715 * ctrl/bulk pipes 1716 * 1717 * wMaxPacketSize for ctrl and bulk pipes may be 8, 16, 32 or 64 bytes. 1718 * So the alignment should be a multiple of 64. wMaxPacketSize for intr 1719 * pipes is a little different since it only specifies the max to be 1720 * 64 bytes, but as long as an intr transfer is limited to max TD length, 1721 * any alignment can work if the cookie size is larger than max TD length. 1722 * 1723 * Considering the above conditions, 2K alignment is used. 4K alignment 1724 * should also be fine. 1725 */ 1726 static uhci_trans_wrapper_t * 1727 uhci_create_transfer_wrapper( 1728 uhci_state_t *uhcip, 1729 uhci_pipe_private_t *pp, 1730 size_t length, 1731 usb_flags_t usb_flags) 1732 { 1733 size_t real_length; 1734 uhci_trans_wrapper_t *tw; 1735 ddi_device_acc_attr_t dev_attr; 1736 ddi_dma_attr_t dma_attr; 1737 int kmem_flag; 1738 int (*dmamem_wait)(caddr_t); 1739 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 1740 1741 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1742 "uhci_create_transfer_wrapper: length = 0x%lx flags = 0x%x", 1743 length, usb_flags); 1744 1745 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1746 1747 /* isochronous pipe should not call into this function */ 1748 if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_ISOCH) { 1749 1750 return (NULL); 1751 } 1752 1753 /* SLEEP flag should not be used in interrupt context */ 1754 if (servicing_interrupt()) { 1755 kmem_flag = KM_NOSLEEP; 1756 dmamem_wait = DDI_DMA_DONTWAIT; 1757 } else { 1758 kmem_flag = KM_SLEEP; 1759 dmamem_wait = DDI_DMA_SLEEP; 1760 } 1761 1762 /* Allocate space for the transfer wrapper */ 1763 if ((tw = kmem_zalloc(sizeof (uhci_trans_wrapper_t), kmem_flag)) == 1764 NULL) { 1765 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1766 "uhci_create_transfer_wrapper: kmem_alloc failed"); 1767 1768 return (NULL); 1769 } 1770 1771 /* allow sg lists for transfer wrapper dma memory */ 1772 bcopy(&uhcip->uhci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t)); 1773 dma_attr.dma_attr_sgllen = UHCI_DMA_ATTR_SGLLEN; 1774 dma_attr.dma_attr_align = UHCI_DMA_ATTR_ALIGN; 1775 1776 /* Store the transfer length */ 1777 tw->tw_length = length; 1778 1779 /* Allocate the DMA handle */ 1780 if (ddi_dma_alloc_handle(uhcip->uhci_dip, &dma_attr, dmamem_wait, 1781 0, &tw->tw_dmahandle) != DDI_SUCCESS) { 1782 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1783 "uhci_create_transfer_wrapper: Alloc handle failed"); 1784 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 1785 1786 return (NULL); 1787 } 1788 1789 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 1790 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 1791 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1792 1793 /* Allocate the memory */ 1794 if (ddi_dma_mem_alloc(tw->tw_dmahandle, tw->tw_length, &dev_attr, 1795 DDI_DMA_CONSISTENT, dmamem_wait, NULL, (caddr_t *)&tw->tw_buf, 1796 &real_length, &tw->tw_accesshandle) != DDI_SUCCESS) { 1797 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1798 "uhci_create_transfer_wrapper: dma_mem_alloc fail"); 1799 ddi_dma_free_handle(&tw->tw_dmahandle); 1800 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 1801 1802 return (NULL); 1803 } 1804 1805 ASSERT(real_length >= length); 1806 1807 /* Bind the handle */ 1808 if (ddi_dma_addr_bind_handle(tw->tw_dmahandle, NULL, 1809 (caddr_t)tw->tw_buf, real_length, DDI_DMA_RDWR|DDI_DMA_CONSISTENT, 1810 dmamem_wait, NULL, &tw->tw_cookie, &tw->tw_ncookies) != 1811 DDI_DMA_MAPPED) { 1812 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1813 "uhci_create_transfer_wrapper: Bind handle failed"); 1814 ddi_dma_mem_free(&tw->tw_accesshandle); 1815 ddi_dma_free_handle(&tw->tw_dmahandle); 1816 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 1817 1818 return (NULL); 1819 } 1820 1821 tw->tw_cookie_idx = 0; 1822 tw->tw_dma_offs = 0; 1823 1824 /* 1825 * Only allow one wrapper to be added at a time. Insert the 1826 * new transaction wrapper into the list for this pipe. 1827 */ 1828 if (pp->pp_tw_head == NULL) { 1829 pp->pp_tw_head = tw; 1830 pp->pp_tw_tail = tw; 1831 } else { 1832 pp->pp_tw_tail->tw_next = tw; 1833 pp->pp_tw_tail = tw; 1834 ASSERT(tw->tw_next == NULL); 1835 } 1836 1837 /* Store a back pointer to the pipe private structure */ 1838 tw->tw_pipe_private = pp; 1839 1840 /* Store the transfer type - synchronous or asynchronous */ 1841 tw->tw_flags = usb_flags; 1842 1843 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 1844 "uhci_create_transfer_wrapper: tw = 0x%p, ncookies = %u", 1845 tw, tw->tw_ncookies); 1846 1847 return (tw); 1848 } 1849 1850 1851 /* 1852 * uhci_insert_hc_td: 1853 * Insert a Transfer Descriptor (TD) on an QH. 1854 */ 1855 int 1856 uhci_insert_hc_td( 1857 uhci_state_t *uhcip, 1858 uint32_t buffer_offset, 1859 size_t hcgtd_length, 1860 uhci_pipe_private_t *pp, 1861 uhci_trans_wrapper_t *tw, 1862 uchar_t PID, 1863 usb_req_attrs_t attrs) 1864 { 1865 uhci_td_t *td, *current_dummy; 1866 queue_head_t *qh = pp->pp_qh; 1867 1868 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 1869 1870 if ((td = uhci_allocate_td_from_pool(uhcip)) == NULL) { 1871 1872 return (USB_NO_RESOURCES); 1873 } 1874 1875 current_dummy = qh->td_tailp; 1876 1877 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 1878 "uhci_insert_hc_td: td %p, attrs = 0x%x", td, attrs); 1879 1880 /* 1881 * Fill in the current dummy td and 1882 * add the new dummy to the end. 1883 */ 1884 uhci_fill_in_td(uhcip, td, current_dummy, buffer_offset, 1885 hcgtd_length, pp, PID, attrs, tw); 1886 1887 /* 1888 * Allow HC hardware xfer the td, except interrupt out td. 1889 */ 1890 if ((tw->tw_handle_td != uhci_handle_intr_td) || (PID != PID_OUT)) { 1891 1892 SetTD_status(uhcip, current_dummy, UHCI_TD_ACTIVE); 1893 } 1894 1895 /* Insert this td onto the tw */ 1896 1897 if (tw->tw_hctd_head == NULL) { 1898 ASSERT(tw->tw_hctd_tail == NULL); 1899 tw->tw_hctd_head = current_dummy; 1900 tw->tw_hctd_tail = current_dummy; 1901 } else { 1902 /* Add the td to the end of the list */ 1903 tw->tw_hctd_tail->tw_td_next = current_dummy; 1904 tw->tw_hctd_tail = current_dummy; 1905 } 1906 1907 /* 1908 * Insert the TD on to the QH. When this occurs, 1909 * the Host Controller will see the newly filled in TD 1910 */ 1911 current_dummy->outst_td_next = NULL; 1912 current_dummy->outst_td_prev = uhcip->uhci_outst_tds_tail; 1913 if (uhcip->uhci_outst_tds_head == NULL) { 1914 uhcip->uhci_outst_tds_head = current_dummy; 1915 } else { 1916 uhcip->uhci_outst_tds_tail->outst_td_next = current_dummy; 1917 } 1918 uhcip->uhci_outst_tds_tail = current_dummy; 1919 current_dummy->tw = tw; 1920 1921 return (USB_SUCCESS); 1922 } 1923 1924 1925 /* 1926 * uhci_fill_in_td: 1927 * Fill in the fields of a Transfer Descriptor (TD). 1928 */ 1929 static void 1930 uhci_fill_in_td( 1931 uhci_state_t *uhcip, 1932 uhci_td_t *td, 1933 uhci_td_t *current_dummy, 1934 uint32_t buffer_offset, 1935 size_t length, 1936 uhci_pipe_private_t *pp, 1937 uchar_t PID, 1938 usb_req_attrs_t attrs, 1939 uhci_trans_wrapper_t *tw) 1940 { 1941 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 1942 uint32_t buf_addr; 1943 1944 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 1945 "uhci_fill_in_td: td 0x%p buf_offs 0x%x len 0x%lx " 1946 "attrs 0x%x", td, buffer_offset, length, attrs); 1947 1948 /* 1949 * If this is an isochronous TD, just return 1950 */ 1951 if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_ISOCH) { 1952 1953 return; 1954 } 1955 1956 /* The maximum transfer length of UHCI cannot exceed 0x500 bytes */ 1957 ASSERT(length <= UHCI_MAX_TD_XFER_SIZE); 1958 1959 bzero((char *)td, sizeof (uhci_td_t)); /* Clear the TD */ 1960 SetTD32(uhcip, current_dummy->link_ptr, TD_PADDR(td)); 1961 1962 if (attrs & USB_ATTRS_SHORT_XFER_OK) { 1963 SetTD_spd(uhcip, current_dummy, 1); 1964 } 1965 1966 mutex_enter(&ph->p_usba_device->usb_mutex); 1967 if (ph->p_usba_device->usb_port_status == USBA_LOW_SPEED_DEV) { 1968 SetTD_ls(uhcip, current_dummy, LOW_SPEED_DEVICE); 1969 } 1970 1971 SetTD_c_err(uhcip, current_dummy, UHCI_MAX_ERR_COUNT); 1972 SetTD_mlen(uhcip, current_dummy, (length == 0) ? 0x7ff: (length - 1)); 1973 SetTD_dtogg(uhcip, current_dummy, pp->pp_data_toggle); 1974 1975 /* Adjust the data toggle bit */ 1976 ADJ_DATA_TOGGLE(pp); 1977 1978 SetTD_devaddr(uhcip, current_dummy, ph->p_usba_device->usb_addr); 1979 SetTD_endpt(uhcip, current_dummy, 1980 ph->p_ep.bEndpointAddress & END_POINT_ADDRESS_MASK); 1981 SetTD_PID(uhcip, current_dummy, PID); 1982 SetTD_ioc(uhcip, current_dummy, INTERRUPT_ON_COMPLETION); 1983 1984 buf_addr = uhci_get_tw_paddr_by_offs(uhcip, buffer_offset, length, tw); 1985 SetTD32(uhcip, current_dummy->buffer_address, buf_addr); 1986 1987 td->qh_td_prev = current_dummy; 1988 current_dummy->qh_td_prev = NULL; 1989 pp->pp_qh->td_tailp = td; 1990 mutex_exit(&ph->p_usba_device->usb_mutex); 1991 } 1992 1993 /* 1994 * uhci_get_tw_paddr_by_offs: 1995 * Walk through the DMA cookies of a TW buffer to retrieve 1996 * the device address used for a TD. 1997 * 1998 * buffer_offset - the starting offset into the TW buffer, where the 1999 * TD should transfer from. When a TW has more than 2000 * one TD, the TDs must be filled in increasing order. 2001 */ 2002 static uint32_t 2003 uhci_get_tw_paddr_by_offs( 2004 uhci_state_t *uhcip, 2005 uint32_t buffer_offset, 2006 size_t length, 2007 uhci_trans_wrapper_t *tw) 2008 { 2009 uint32_t buf_addr; 2010 int rem_len; 2011 2012 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2013 "uhci_get_tw_paddr_by_offs: buf_offs 0x%x len 0x%lx", 2014 buffer_offset, length); 2015 2016 /* 2017 * TDs must be filled in increasing DMA offset order. 2018 * tw_dma_offs is initialized to be 0 at TW creation and 2019 * is only increased in this function. 2020 */ 2021 ASSERT(length == 0 || buffer_offset >= tw->tw_dma_offs); 2022 2023 if (length == 0) { 2024 buf_addr = 0; 2025 2026 return (buf_addr); 2027 } 2028 2029 /* 2030 * Advance to the next DMA cookie until finding the cookie 2031 * that buffer_offset falls in. 2032 * It is very likely this loop will never repeat more than 2033 * once. It is here just to accommodate the case buffer_offset 2034 * is increased by multiple cookies during two consecutive 2035 * calls into this function. In that case, the interim DMA 2036 * buffer is allowed to be skipped. 2037 */ 2038 while ((tw->tw_dma_offs + tw->tw_cookie.dmac_size) <= 2039 buffer_offset) { 2040 /* 2041 * tw_dma_offs always points to the starting offset 2042 * of a cookie 2043 */ 2044 tw->tw_dma_offs += tw->tw_cookie.dmac_size; 2045 ddi_dma_nextcookie(tw->tw_dmahandle, &tw->tw_cookie); 2046 tw->tw_cookie_idx++; 2047 ASSERT(tw->tw_cookie_idx < tw->tw_ncookies); 2048 } 2049 2050 /* 2051 * Counting the remained buffer length to be filled in 2052 * the TDs for current DMA cookie 2053 */ 2054 rem_len = (tw->tw_dma_offs + tw->tw_cookie.dmac_size) - 2055 buffer_offset; 2056 2057 /* Calculate the beginning address of the buffer */ 2058 ASSERT(length <= rem_len); 2059 buf_addr = (buffer_offset - tw->tw_dma_offs) + 2060 tw->tw_cookie.dmac_address; 2061 2062 USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2063 "uhci_get_tw_paddr_by_offs: dmac_addr 0x%p dmac_size " 2064 "0x%lx idx %d", buf_addr, tw->tw_cookie.dmac_size, 2065 tw->tw_cookie_idx); 2066 2067 return (buf_addr); 2068 } 2069 2070 2071 /* 2072 * uhci_modify_td_active_bits: 2073 * Sets active bit in all the tds of QH to INACTIVE so that 2074 * the HC stops processing the TD's related to the QH. 2075 */ 2076 void 2077 uhci_modify_td_active_bits( 2078 uhci_state_t *uhcip, 2079 uhci_pipe_private_t *pp) 2080 { 2081 uhci_td_t *td_head; 2082 usb_ep_descr_t *ept = &pp->pp_pipe_handle->p_ep; 2083 uhci_trans_wrapper_t *tw_head = pp->pp_tw_head; 2084 2085 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2086 "uhci_modify_td_active_bits: tw head %p", (void *)tw_head); 2087 2088 while (tw_head != NULL) { 2089 tw_head->tw_claim = UHCI_MODIFY_TD_BITS_CLAIMED; 2090 td_head = tw_head->tw_hctd_head; 2091 2092 while (td_head) { 2093 if (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_ISOCH) { 2094 SetTD_status(uhcip, td_head, 2095 GetTD_status(uhcip, td_head) & TD_INACTIVE); 2096 } else { 2097 SetTD32(uhcip, td_head->link_ptr, 2098 GetTD32(uhcip, td_head->link_ptr) | 2099 HC_END_OF_LIST); 2100 } 2101 2102 td_head = td_head->tw_td_next; 2103 } 2104 tw_head = tw_head->tw_next; 2105 } 2106 } 2107 2108 2109 /* 2110 * uhci_insert_ctrl_td: 2111 * Create a TD and a data buffer for a control Queue Head. 2112 */ 2113 int 2114 uhci_insert_ctrl_td( 2115 uhci_state_t *uhcip, 2116 usba_pipe_handle_data_t *ph, 2117 usb_ctrl_req_t *ctrl_reqp, 2118 usb_flags_t flags) 2119 { 2120 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 2121 uhci_trans_wrapper_t *tw; 2122 size_t ctrl_buf_size; 2123 2124 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2125 "uhci_insert_ctrl_td: timeout: 0x%x", ctrl_reqp->ctrl_timeout); 2126 2127 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2128 2129 /* 2130 * If we have a control data phase, make the data buffer start 2131 * on the next 64-byte boundary so as to ensure the DMA cookie 2132 * can fit in the multiple TDs. The buffer in the range of 2133 * [SETUP_SIZE, UHCI_CTRL_EPT_MAX_SIZE) is just for padding 2134 * and not to be transferred. 2135 */ 2136 if (ctrl_reqp->ctrl_wLength) { 2137 ctrl_buf_size = UHCI_CTRL_EPT_MAX_SIZE + 2138 ctrl_reqp->ctrl_wLength; 2139 } else { 2140 ctrl_buf_size = SETUP_SIZE; 2141 } 2142 2143 /* Allocate a transaction wrapper */ 2144 if ((tw = uhci_create_transfer_wrapper(uhcip, pp, 2145 ctrl_buf_size, flags)) == NULL) { 2146 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2147 "uhci_insert_ctrl_td: TW allocation failed"); 2148 2149 return (USB_NO_RESOURCES); 2150 } 2151 2152 pp->pp_data_toggle = 0; 2153 2154 tw->tw_curr_xfer_reqp = (usb_opaque_t)ctrl_reqp; 2155 tw->tw_bytes_xfered = 0; 2156 tw->tw_bytes_pending = ctrl_reqp->ctrl_wLength; 2157 tw->tw_timeout_cnt = max(UHCI_CTRL_TIMEOUT, ctrl_reqp->ctrl_timeout); 2158 2159 /* 2160 * Initialize the callback and any callback 2161 * data for when the td completes. 2162 */ 2163 tw->tw_handle_td = uhci_handle_ctrl_td; 2164 tw->tw_handle_callback_value = NULL; 2165 2166 if ((uhci_create_setup_pkt(uhcip, pp, tw)) != USB_SUCCESS) { 2167 tw->tw_ctrl_state = 0; 2168 2169 /* free the transfer wrapper */ 2170 uhci_deallocate_tw(uhcip, pp, tw); 2171 2172 return (USB_NO_RESOURCES); 2173 } 2174 2175 tw->tw_ctrl_state = SETUP; 2176 2177 return (USB_SUCCESS); 2178 } 2179 2180 2181 /* 2182 * uhci_create_setup_pkt: 2183 * create a setup packet to initiate a control transfer. 2184 * 2185 * OHCI driver has seen the case where devices fail if there is 2186 * more than one control transfer to the device within a frame. 2187 * So, the UHCI ensures that only one TD will be put on the control 2188 * pipe to one device (to be consistent with OHCI driver). 2189 */ 2190 static int 2191 uhci_create_setup_pkt( 2192 uhci_state_t *uhcip, 2193 uhci_pipe_private_t *pp, 2194 uhci_trans_wrapper_t *tw) 2195 { 2196 int sdata; 2197 usb_ctrl_req_t *req = (usb_ctrl_req_t *)tw->tw_curr_xfer_reqp; 2198 2199 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2200 "uhci_create_setup_pkt: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%p", 2201 req->ctrl_bmRequestType, req->ctrl_bRequest, req->ctrl_wValue, 2202 req->ctrl_wIndex, req->ctrl_wLength, (void *)req->ctrl_data); 2203 2204 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2205 ASSERT(tw != NULL); 2206 2207 /* Create the first four bytes of the setup packet */ 2208 sdata = (req->ctrl_bmRequestType | (req->ctrl_bRequest << 8) | 2209 (req->ctrl_wValue << 16)); 2210 ddi_put32(tw->tw_accesshandle, (uint_t *)tw->tw_buf, sdata); 2211 2212 /* Create the second four bytes */ 2213 sdata = (uint32_t)(req->ctrl_wIndex | (req->ctrl_wLength << 16)); 2214 ddi_put32(tw->tw_accesshandle, 2215 (uint_t *)(tw->tw_buf + sizeof (uint_t)), sdata); 2216 2217 /* 2218 * The TD's are placed on the QH one at a time. 2219 * Once this TD is placed on the done list, the 2220 * data or status phase TD will be enqueued. 2221 */ 2222 if ((uhci_insert_hc_td(uhcip, 0, SETUP_SIZE, 2223 pp, tw, PID_SETUP, req->ctrl_attributes)) != USB_SUCCESS) { 2224 2225 return (USB_NO_RESOURCES); 2226 } 2227 2228 USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2229 "Create_setup: pp = 0x%p, attrs = 0x%x", pp, req->ctrl_attributes); 2230 2231 /* 2232 * If this control transfer has a data phase, record the 2233 * direction. If the data phase is an OUT transaction , 2234 * copy the data into the buffer of the transfer wrapper. 2235 */ 2236 if (req->ctrl_wLength != 0) { 2237 /* There is a data stage. Find the direction */ 2238 if (req->ctrl_bmRequestType & USB_DEV_REQ_DEV_TO_HOST) { 2239 tw->tw_direction = PID_IN; 2240 } else { 2241 tw->tw_direction = PID_OUT; 2242 2243 /* Copy the data into the buffer */ 2244 ddi_rep_put8(tw->tw_accesshandle, 2245 req->ctrl_data->b_rptr, 2246 (uint8_t *)(tw->tw_buf + UHCI_CTRL_EPT_MAX_SIZE), 2247 req->ctrl_wLength, 2248 DDI_DEV_AUTOINCR); 2249 } 2250 } 2251 2252 return (USB_SUCCESS); 2253 } 2254 2255 2256 /* 2257 * uhci_create_stats: 2258 * Allocate and initialize the uhci kstat structures 2259 */ 2260 void 2261 uhci_create_stats(uhci_state_t *uhcip) 2262 { 2263 int i; 2264 char kstatname[KSTAT_STRLEN]; 2265 char *usbtypes[USB_N_COUNT_KSTATS] = 2266 {"ctrl", "isoch", "bulk", "intr"}; 2267 uint_t instance = uhcip->uhci_instance; 2268 const char *dname = ddi_driver_name(uhcip->uhci_dip); 2269 uhci_intrs_stats_t *isp; 2270 2271 if (UHCI_INTRS_STATS(uhcip) == NULL) { 2272 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,intrs", 2273 dname, instance); 2274 UHCI_INTRS_STATS(uhcip) = kstat_create("usba", instance, 2275 kstatname, "usb_interrupts", KSTAT_TYPE_NAMED, 2276 sizeof (uhci_intrs_stats_t) / sizeof (kstat_named_t), 2277 KSTAT_FLAG_PERSISTENT); 2278 2279 if (UHCI_INTRS_STATS(uhcip) != NULL) { 2280 isp = UHCI_INTRS_STATS_DATA(uhcip); 2281 kstat_named_init(&isp->uhci_intrs_hc_halted, 2282 "HC Halted", KSTAT_DATA_UINT64); 2283 kstat_named_init(&isp->uhci_intrs_hc_process_err, 2284 "HC Process Errors", KSTAT_DATA_UINT64); 2285 kstat_named_init(&isp->uhci_intrs_host_sys_err, 2286 "Host Sys Errors", KSTAT_DATA_UINT64); 2287 kstat_named_init(&isp->uhci_intrs_resume_detected, 2288 "Resume Detected", KSTAT_DATA_UINT64); 2289 kstat_named_init(&isp->uhci_intrs_usb_err_intr, 2290 "USB Error", KSTAT_DATA_UINT64); 2291 kstat_named_init(&isp->uhci_intrs_usb_intr, 2292 "USB Interrupts", KSTAT_DATA_UINT64); 2293 kstat_named_init(&isp->uhci_intrs_total, 2294 "Total Interrupts", KSTAT_DATA_UINT64); 2295 kstat_named_init(&isp->uhci_intrs_not_claimed, 2296 "Not Claimed", KSTAT_DATA_UINT64); 2297 2298 UHCI_INTRS_STATS(uhcip)->ks_private = uhcip; 2299 UHCI_INTRS_STATS(uhcip)->ks_update = nulldev; 2300 kstat_install(UHCI_INTRS_STATS(uhcip)); 2301 } 2302 } 2303 2304 if (UHCI_TOTAL_STATS(uhcip) == NULL) { 2305 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,total", 2306 dname, instance); 2307 UHCI_TOTAL_STATS(uhcip) = kstat_create("usba", instance, 2308 kstatname, "usb_byte_count", KSTAT_TYPE_IO, 1, 2309 KSTAT_FLAG_PERSISTENT); 2310 2311 if (UHCI_TOTAL_STATS(uhcip) != NULL) { 2312 kstat_install(UHCI_TOTAL_STATS(uhcip)); 2313 } 2314 } 2315 2316 for (i = 0; i < USB_N_COUNT_KSTATS; i++) { 2317 if (uhcip->uhci_count_stats[i] == NULL) { 2318 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,%s", 2319 dname, instance, usbtypes[i]); 2320 uhcip->uhci_count_stats[i] = kstat_create("usba", 2321 instance, kstatname, "usb_byte_count", 2322 KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 2323 2324 if (uhcip->uhci_count_stats[i] != NULL) { 2325 kstat_install(uhcip->uhci_count_stats[i]); 2326 } 2327 } 2328 } 2329 } 2330 2331 2332 /* 2333 * uhci_destroy_stats: 2334 * Clean up uhci kstat structures 2335 */ 2336 void 2337 uhci_destroy_stats(uhci_state_t *uhcip) 2338 { 2339 int i; 2340 2341 if (UHCI_INTRS_STATS(uhcip)) { 2342 kstat_delete(UHCI_INTRS_STATS(uhcip)); 2343 UHCI_INTRS_STATS(uhcip) = NULL; 2344 } 2345 2346 if (UHCI_TOTAL_STATS(uhcip)) { 2347 kstat_delete(UHCI_TOTAL_STATS(uhcip)); 2348 UHCI_TOTAL_STATS(uhcip) = NULL; 2349 } 2350 2351 for (i = 0; i < USB_N_COUNT_KSTATS; i++) { 2352 if (uhcip->uhci_count_stats[i]) { 2353 kstat_delete(uhcip->uhci_count_stats[i]); 2354 uhcip->uhci_count_stats[i] = NULL; 2355 } 2356 } 2357 } 2358 2359 2360 void 2361 uhci_do_intrs_stats(uhci_state_t *uhcip, int val) 2362 { 2363 if (UHCI_INTRS_STATS(uhcip) == NULL) { 2364 2365 return; 2366 } 2367 2368 UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_total.value.ui64++; 2369 switch (val) { 2370 case USBSTS_REG_HC_HALTED: 2371 UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_hc_halted.value.ui64++; 2372 break; 2373 case USBSTS_REG_HC_PROCESS_ERR: 2374 UHCI_INTRS_STATS_DATA(uhcip)-> 2375 uhci_intrs_hc_process_err.value.ui64++; 2376 break; 2377 case USBSTS_REG_HOST_SYS_ERR: 2378 UHCI_INTRS_STATS_DATA(uhcip)-> 2379 uhci_intrs_host_sys_err.value.ui64++; 2380 break; 2381 case USBSTS_REG_RESUME_DETECT: 2382 UHCI_INTRS_STATS_DATA(uhcip)-> 2383 uhci_intrs_resume_detected.value.ui64++; 2384 break; 2385 case USBSTS_REG_USB_ERR_INTR: 2386 UHCI_INTRS_STATS_DATA(uhcip)-> 2387 uhci_intrs_usb_err_intr.value.ui64++; 2388 break; 2389 case USBSTS_REG_USB_INTR: 2390 UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_usb_intr.value.ui64++; 2391 break; 2392 default: 2393 UHCI_INTRS_STATS_DATA(uhcip)-> 2394 uhci_intrs_not_claimed.value.ui64++; 2395 break; 2396 } 2397 } 2398 2399 2400 void 2401 uhci_do_byte_stats(uhci_state_t *uhcip, size_t len, uint8_t attr, uint8_t addr) 2402 { 2403 uint8_t type = attr & USB_EP_ATTR_MASK; 2404 uint8_t dir = addr & USB_EP_DIR_MASK; 2405 2406 switch (dir) { 2407 case USB_EP_DIR_IN: 2408 UHCI_TOTAL_STATS_DATA(uhcip)->reads++; 2409 UHCI_TOTAL_STATS_DATA(uhcip)->nread += len; 2410 switch (type) { 2411 case USB_EP_ATTR_CONTROL: 2412 UHCI_CTRL_STATS(uhcip)->reads++; 2413 UHCI_CTRL_STATS(uhcip)->nread += len; 2414 break; 2415 case USB_EP_ATTR_BULK: 2416 UHCI_BULK_STATS(uhcip)->reads++; 2417 UHCI_BULK_STATS(uhcip)->nread += len; 2418 break; 2419 case USB_EP_ATTR_INTR: 2420 UHCI_INTR_STATS(uhcip)->reads++; 2421 UHCI_INTR_STATS(uhcip)->nread += len; 2422 break; 2423 case USB_EP_ATTR_ISOCH: 2424 UHCI_ISOC_STATS(uhcip)->reads++; 2425 UHCI_ISOC_STATS(uhcip)->nread += len; 2426 break; 2427 } 2428 break; 2429 case USB_EP_DIR_OUT: 2430 UHCI_TOTAL_STATS_DATA(uhcip)->writes++; 2431 UHCI_TOTAL_STATS_DATA(uhcip)->nwritten += len; 2432 switch (type) { 2433 case USB_EP_ATTR_CONTROL: 2434 UHCI_CTRL_STATS(uhcip)->writes++; 2435 UHCI_CTRL_STATS(uhcip)->nwritten += len; 2436 break; 2437 case USB_EP_ATTR_BULK: 2438 UHCI_BULK_STATS(uhcip)->writes++; 2439 UHCI_BULK_STATS(uhcip)->nwritten += len; 2440 break; 2441 case USB_EP_ATTR_INTR: 2442 UHCI_INTR_STATS(uhcip)->writes++; 2443 UHCI_INTR_STATS(uhcip)->nwritten += len; 2444 break; 2445 case USB_EP_ATTR_ISOCH: 2446 UHCI_ISOC_STATS(uhcip)->writes++; 2447 UHCI_ISOC_STATS(uhcip)->nwritten += len; 2448 break; 2449 } 2450 break; 2451 } 2452 } 2453 2454 2455 /* 2456 * uhci_free_tw: 2457 * Free the Transfer Wrapper (TW). 2458 */ 2459 void 2460 uhci_free_tw(uhci_state_t *uhcip, uhci_trans_wrapper_t *tw) 2461 { 2462 int rval, i; 2463 2464 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, "uhci_free_tw:"); 2465 2466 ASSERT(tw != NULL); 2467 2468 if (tw->tw_isoc_strtlen > 0) { 2469 ASSERT(tw->tw_isoc_bufs != NULL); 2470 for (i = 0; i < tw->tw_ncookies; i++) { 2471 rval = ddi_dma_unbind_handle( 2472 tw->tw_isoc_bufs[i].dma_handle); 2473 ASSERT(rval == USB_SUCCESS); 2474 ddi_dma_mem_free(&tw->tw_isoc_bufs[i].mem_handle); 2475 ddi_dma_free_handle(&tw->tw_isoc_bufs[i].dma_handle); 2476 } 2477 kmem_free(tw->tw_isoc_bufs, tw->tw_isoc_strtlen); 2478 } else if (tw->tw_dmahandle != NULL) { 2479 rval = ddi_dma_unbind_handle(tw->tw_dmahandle); 2480 ASSERT(rval == DDI_SUCCESS); 2481 2482 ddi_dma_mem_free(&tw->tw_accesshandle); 2483 ddi_dma_free_handle(&tw->tw_dmahandle); 2484 } 2485 2486 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 2487 } 2488 2489 2490 /* 2491 * uhci_deallocate_tw: 2492 * Deallocate of a Transaction Wrapper (TW) and this involves 2493 * the freeing of DMA resources. 2494 */ 2495 void 2496 uhci_deallocate_tw(uhci_state_t *uhcip, 2497 uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw) 2498 { 2499 uhci_trans_wrapper_t *head; 2500 2501 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2502 "uhci_deallocate_tw:"); 2503 2504 /* 2505 * If the transfer wrapper has no Host Controller (HC) 2506 * Transfer Descriptors (TD) associated with it, then 2507 * remove the transfer wrapper. The transfers are done 2508 * in FIFO order, so this should be the first transfer 2509 * wrapper on the list. 2510 */ 2511 if (tw->tw_hctd_head != NULL) { 2512 ASSERT(tw->tw_hctd_tail != NULL); 2513 2514 return; 2515 } 2516 2517 ASSERT(tw->tw_hctd_tail == NULL); 2518 ASSERT(pp->pp_tw_head != NULL); 2519 2520 /* 2521 * If pp->pp_tw_head is NULL, set the tail also to NULL. 2522 */ 2523 head = pp->pp_tw_head; 2524 2525 if (head == tw) { 2526 pp->pp_tw_head = head->tw_next; 2527 if (pp->pp_tw_head == NULL) { 2528 pp->pp_tw_tail = NULL; 2529 } 2530 } else { 2531 while (head->tw_next != tw) 2532 head = head->tw_next; 2533 head->tw_next = tw->tw_next; 2534 if (tw->tw_next == NULL) { 2535 pp->pp_tw_tail = head; 2536 } 2537 } 2538 uhci_free_tw(uhcip, tw); 2539 } 2540 2541 2542 void 2543 uhci_delete_td(uhci_state_t *uhcip, uhci_td_t *td) 2544 { 2545 uhci_td_t *tmp_td; 2546 uhci_trans_wrapper_t *tw = td->tw; 2547 2548 if ((td->outst_td_next == NULL) && (td->outst_td_prev == NULL)) { 2549 uhcip->uhci_outst_tds_head = NULL; 2550 uhcip->uhci_outst_tds_tail = NULL; 2551 } else if (td->outst_td_next == NULL) { 2552 td->outst_td_prev->outst_td_next = NULL; 2553 uhcip->uhci_outst_tds_tail = td->outst_td_prev; 2554 } else if (td->outst_td_prev == NULL) { 2555 td->outst_td_next->outst_td_prev = NULL; 2556 uhcip->uhci_outst_tds_head = td->outst_td_next; 2557 } else { 2558 td->outst_td_prev->outst_td_next = td->outst_td_next; 2559 td->outst_td_next->outst_td_prev = td->outst_td_prev; 2560 } 2561 2562 tmp_td = tw->tw_hctd_head; 2563 2564 if (tmp_td != td) { 2565 while (tmp_td->tw_td_next != td) { 2566 tmp_td = tmp_td->tw_td_next; 2567 } 2568 ASSERT(tmp_td); 2569 tmp_td->tw_td_next = td->tw_td_next; 2570 if (td->tw_td_next == NULL) { 2571 tw->tw_hctd_tail = tmp_td; 2572 } 2573 } else { 2574 tw->tw_hctd_head = tw->tw_hctd_head->tw_td_next; 2575 if (tw->tw_hctd_head == NULL) { 2576 tw->tw_hctd_tail = NULL; 2577 } 2578 } 2579 2580 td->flag = TD_FLAG_FREE; 2581 } 2582 2583 2584 void 2585 uhci_remove_tds_tws( 2586 uhci_state_t *uhcip, 2587 usba_pipe_handle_data_t *ph) 2588 { 2589 usb_opaque_t curr_reqp; 2590 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 2591 usb_ep_descr_t *ept = &pp->pp_pipe_handle->p_ep; 2592 uhci_trans_wrapper_t *tw_tmp; 2593 uhci_trans_wrapper_t *tw_head = pp->pp_tw_head; 2594 2595 while (tw_head != NULL) { 2596 tw_tmp = tw_head; 2597 tw_head = tw_head->tw_next; 2598 2599 curr_reqp = tw_tmp->tw_curr_xfer_reqp; 2600 if (curr_reqp) { 2601 /* do this for control/bulk/intr */ 2602 if ((tw_tmp->tw_direction == PID_IN) && 2603 (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_INTR)) { 2604 uhci_deallocate_periodic_in_resource(uhcip, 2605 pp, tw_tmp); 2606 } else { 2607 uhci_hcdi_callback(uhcip, pp, 2608 pp->pp_pipe_handle, tw_tmp, USB_CR_FLUSHED); 2609 } 2610 } /* end of curr_reqp */ 2611 2612 if (tw_tmp->tw_claim != UHCI_MODIFY_TD_BITS_CLAIMED) { 2613 continue; 2614 } 2615 2616 while (tw_tmp->tw_hctd_head != NULL) { 2617 uhci_delete_td(uhcip, tw_tmp->tw_hctd_head); 2618 } 2619 2620 uhci_deallocate_tw(uhcip, pp, tw_tmp); 2621 } 2622 } 2623 2624 2625 /* 2626 * uhci_remove_qh: 2627 * Remove the Queue Head from the Host Controller's 2628 * appropriate QH list. 2629 */ 2630 void 2631 uhci_remove_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 2632 { 2633 uhci_td_t *dummy_td; 2634 2635 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2636 2637 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2638 "uhci_remove_qh:"); 2639 2640 dummy_td = pp->pp_qh->td_tailp; 2641 dummy_td->flag = TD_FLAG_FREE; 2642 2643 switch (UHCI_XFER_TYPE(&pp->pp_pipe_handle->p_ep)) { 2644 case USB_EP_ATTR_CONTROL: 2645 uhci_remove_ctrl_qh(uhcip, pp); 2646 break; 2647 case USB_EP_ATTR_BULK: 2648 uhci_remove_bulk_qh(uhcip, pp); 2649 break; 2650 case USB_EP_ATTR_INTR: 2651 uhci_remove_intr_qh(uhcip, pp); 2652 break; 2653 } 2654 } 2655 2656 2657 static void 2658 uhci_remove_intr_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 2659 { 2660 queue_head_t *qh = pp->pp_qh; 2661 queue_head_t *next_lattice_qh = 2662 QH_VADDR(GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK); 2663 2664 qh->prev_qh->link_ptr = qh->link_ptr; 2665 next_lattice_qh->prev_qh = qh->prev_qh; 2666 qh->qh_flag = QUEUE_HEAD_FLAG_FREE; 2667 2668 } 2669 2670 /* 2671 * uhci_remove_bulk_qh: 2672 * Remove a bulk QH from the Host Controller's QH list. There may be a 2673 * loop for bulk QHs, we must care about this while removing a bulk QH. 2674 */ 2675 static void 2676 uhci_remove_bulk_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 2677 { 2678 queue_head_t *qh = pp->pp_qh; 2679 queue_head_t *next_lattice_qh; 2680 uint32_t paddr; 2681 2682 paddr = (GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK); 2683 next_lattice_qh = (qh == uhcip->uhci_bulk_xfers_q_tail) ? 2684 0 : QH_VADDR(paddr); 2685 2686 if ((qh == uhcip->uhci_bulk_xfers_q_tail) && 2687 (qh->prev_qh == uhcip->uhci_bulk_xfers_q_head)) { 2688 SetQH32(uhcip, qh->prev_qh->link_ptr, HC_END_OF_LIST); 2689 } else { 2690 qh->prev_qh->link_ptr = qh->link_ptr; 2691 } 2692 2693 if (next_lattice_qh == NULL) { 2694 uhcip->uhci_bulk_xfers_q_tail = qh->prev_qh; 2695 } else { 2696 next_lattice_qh->prev_qh = qh->prev_qh; 2697 } 2698 2699 qh->qh_flag = QUEUE_HEAD_FLAG_FREE; 2700 2701 } 2702 2703 2704 static void 2705 uhci_remove_ctrl_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 2706 { 2707 queue_head_t *qh = pp->pp_qh; 2708 queue_head_t *next_lattice_qh = 2709 QH_VADDR(GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK); 2710 2711 qh->prev_qh->link_ptr = qh->link_ptr; 2712 if (next_lattice_qh->prev_qh != NULL) { 2713 next_lattice_qh->prev_qh = qh->prev_qh; 2714 } else { 2715 uhcip->uhci_ctrl_xfers_q_tail = qh->prev_qh; 2716 } 2717 2718 qh->qh_flag = QUEUE_HEAD_FLAG_FREE; 2719 } 2720 2721 2722 /* 2723 * uhci_allocate_td_from_pool: 2724 * Allocate a Transfer Descriptor (TD) from the TD buffer pool. 2725 */ 2726 static uhci_td_t * 2727 uhci_allocate_td_from_pool(uhci_state_t *uhcip) 2728 { 2729 int index; 2730 uhci_td_t *td; 2731 2732 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2733 2734 /* 2735 * Search for a blank Transfer Descriptor (TD) 2736 * in the TD buffer pool. 2737 */ 2738 for (index = 0; index < uhci_td_pool_size; index ++) { 2739 if (uhcip->uhci_td_pool_addr[index].flag == TD_FLAG_FREE) { 2740 break; 2741 } 2742 } 2743 2744 if (index == uhci_td_pool_size) { 2745 USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2746 "uhci_allocate_td_from_pool: TD exhausted"); 2747 2748 return (NULL); 2749 } 2750 2751 USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, 2752 "uhci_allocate_td_from_pool: Allocated %d", index); 2753 2754 /* Create a new dummy for the end of the TD list */ 2755 td = &uhcip->uhci_td_pool_addr[index]; 2756 2757 /* Mark the newly allocated TD as a dummy */ 2758 td->flag = TD_FLAG_DUMMY; 2759 td->qh_td_prev = NULL; 2760 2761 return (td); 2762 } 2763 2764 2765 /* 2766 * uhci_insert_bulk_td: 2767 */ 2768 int 2769 uhci_insert_bulk_td( 2770 uhci_state_t *uhcip, 2771 usba_pipe_handle_data_t *ph, 2772 usb_bulk_req_t *req, 2773 usb_flags_t flags) 2774 { 2775 size_t length; 2776 uint_t mps; /* MaxPacketSize */ 2777 uint_t num_bulk_tds, i, j; 2778 uint32_t buf_offs; 2779 uhci_td_t *bulk_td_ptr; 2780 uhci_td_t *current_dummy, *tmp_td; 2781 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 2782 uhci_trans_wrapper_t *tw; 2783 uhci_bulk_isoc_xfer_t *bulk_xfer_info; 2784 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 2785 2786 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2787 "uhci_insert_bulk_td: req: 0x%p, flags = 0x%x", req, flags); 2788 2789 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 2790 2791 /* 2792 * Create transfer wrapper 2793 */ 2794 if ((tw = uhci_create_transfer_wrapper(uhcip, pp, req->bulk_len, 2795 flags)) == NULL) { 2796 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2797 "uhci_insert_bulk_td: TW allocation failed"); 2798 2799 return (USB_NO_RESOURCES); 2800 } 2801 2802 tw->tw_bytes_xfered = 0; 2803 tw->tw_bytes_pending = req->bulk_len; 2804 tw->tw_handle_td = uhci_handle_bulk_td; 2805 tw->tw_handle_callback_value = (usb_opaque_t)req->bulk_data; 2806 tw->tw_timeout_cnt = req->bulk_timeout; 2807 tw->tw_data = req->bulk_data; 2808 tw->tw_curr_xfer_reqp = (usb_opaque_t)req; 2809 2810 /* Get the bulk pipe direction */ 2811 tw->tw_direction = (UHCI_XFER_DIR(&ph->p_ep) == USB_EP_DIR_OUT) ? 2812 PID_OUT : PID_IN; 2813 2814 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2815 "uhci_insert_bulk_td: direction: 0x%x", tw->tw_direction); 2816 2817 /* If the DATA OUT, copy the data into transfer buffer. */ 2818 if (tw->tw_direction == PID_OUT) { 2819 ASSERT(req->bulk_data != NULL); 2820 2821 /* Copy the data into the message */ 2822 ddi_rep_put8(tw->tw_accesshandle, req->bulk_data->b_rptr, 2823 (uint8_t *)tw->tw_buf, req->bulk_len, DDI_DEV_AUTOINCR); 2824 } 2825 2826 /* Get the max packet size. */ 2827 length = mps = pp->pp_pipe_handle->p_ep.wMaxPacketSize; 2828 2829 /* 2830 * Calculate number of TD's to insert in the current frame interval. 2831 * Max number TD's allowed (driver implementation) is 128 2832 * in one frame interval. Once all the TD's are completed 2833 * then the remaining TD's will be inserted into the lattice 2834 * in the uhci_handle_bulk_td(). 2835 */ 2836 if ((tw->tw_bytes_pending / mps) >= MAX_NUM_BULK_TDS_PER_XFER) { 2837 num_bulk_tds = MAX_NUM_BULK_TDS_PER_XFER; 2838 } else { 2839 num_bulk_tds = (tw->tw_bytes_pending / mps); 2840 2841 if (tw->tw_bytes_pending % mps) { 2842 num_bulk_tds++; 2843 length = (tw->tw_bytes_pending % mps); 2844 } 2845 } 2846 2847 /* 2848 * Allocate memory for the bulk xfer information structure 2849 */ 2850 if ((bulk_xfer_info = kmem_zalloc( 2851 sizeof (uhci_bulk_isoc_xfer_t), KM_NOSLEEP)) == NULL) { 2852 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2853 "uhci_insert_bulk_td: kmem_zalloc failed"); 2854 2855 /* Free the transfer wrapper */ 2856 uhci_deallocate_tw(uhcip, pp, tw); 2857 2858 return (USB_FAILURE); 2859 } 2860 2861 /* Allocate memory for the bulk TD's */ 2862 if (uhci_alloc_bulk_isoc_tds(uhcip, num_bulk_tds, bulk_xfer_info) != 2863 USB_SUCCESS) { 2864 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2865 "uhci_insert_bulk_td: alloc_bulk_isoc_tds failed"); 2866 2867 kmem_free(bulk_xfer_info, sizeof (uhci_bulk_isoc_xfer_t)); 2868 2869 /* Free the transfer wrapper */ 2870 uhci_deallocate_tw(uhcip, pp, tw); 2871 2872 return (USB_FAILURE); 2873 } 2874 2875 td_pool_ptr = &bulk_xfer_info->td_pools[0]; 2876 bulk_td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 2877 bulk_td_ptr[0].qh_td_prev = NULL; 2878 current_dummy = pp->pp_qh->td_tailp; 2879 buf_offs = 0; 2880 pp->pp_qh->bulk_xfer_info = bulk_xfer_info; 2881 2882 /* Fill up all the bulk TD's */ 2883 for (i = 0; i < bulk_xfer_info->num_pools; i++) { 2884 for (j = 0; j < (td_pool_ptr->num_tds - 1); j++) { 2885 uhci_fill_in_bulk_isoc_td(uhcip, &bulk_td_ptr[j], 2886 &bulk_td_ptr[j+1], BULKTD_PADDR(td_pool_ptr, 2887 &bulk_td_ptr[j+1]), ph, buf_offs, mps, tw); 2888 buf_offs += mps; 2889 } 2890 2891 /* fill in the last TD */ 2892 if (i == (bulk_xfer_info->num_pools - 1)) { 2893 uhci_fill_in_bulk_isoc_td(uhcip, &bulk_td_ptr[j], 2894 current_dummy, TD_PADDR(current_dummy), 2895 ph, buf_offs, length, tw); 2896 } else { 2897 /* fill in the TD at the tail of a pool */ 2898 tmp_td = &bulk_td_ptr[j]; 2899 td_pool_ptr = &bulk_xfer_info->td_pools[i + 1]; 2900 bulk_td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 2901 uhci_fill_in_bulk_isoc_td(uhcip, tmp_td, 2902 &bulk_td_ptr[0], BULKTD_PADDR(td_pool_ptr, 2903 &bulk_td_ptr[0]), ph, buf_offs, mps, tw); 2904 buf_offs += mps; 2905 } 2906 } 2907 2908 bulk_xfer_info->num_tds = num_bulk_tds; 2909 2910 /* 2911 * Point the end of the lattice tree to the start of the bulk xfers 2912 * queue head. This allows the HC to execute the same Queue Head/TD 2913 * in the same frame. There are some bulk devices, which NAKs after 2914 * completing each TD. As a result, the performance on such devices 2915 * is very bad. This loop will provide a chance to execute NAk'ed 2916 * bulk TDs again in the same frame. 2917 */ 2918 if (uhcip->uhci_pending_bulk_cmds++ == 0) { 2919 uhcip->uhci_bulk_xfers_q_tail->link_ptr = 2920 uhcip->uhci_bulk_xfers_q_head->link_ptr; 2921 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 2922 "uhci_insert_bulk_td: count = %d no tds %d", 2923 uhcip->uhci_pending_bulk_cmds, num_bulk_tds); 2924 } 2925 2926 /* Insert on the bulk queue head for the execution by HC */ 2927 SetQH32(uhcip, pp->pp_qh->element_ptr, 2928 bulk_xfer_info->td_pools[0].cookie.dmac_address); 2929 2930 return (USB_SUCCESS); 2931 } 2932 2933 2934 /* 2935 * uhci_fill_in_bulk_isoc_td 2936 * Fills the bulk/isoc TD 2937 * 2938 * offset - different meanings for bulk and isoc TDs: 2939 * starting offset into the TW buffer for a bulk TD 2940 * and the index into the isoc packet list for an isoc TD 2941 */ 2942 void 2943 uhci_fill_in_bulk_isoc_td(uhci_state_t *uhcip, uhci_td_t *current_td, 2944 uhci_td_t *next_td, 2945 uint32_t next_td_paddr, 2946 usba_pipe_handle_data_t *ph, 2947 uint_t offset, 2948 uint_t length, 2949 uhci_trans_wrapper_t *tw) 2950 { 2951 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 2952 usb_ep_descr_t *ept = &pp->pp_pipe_handle->p_ep; 2953 uint32_t buf_addr; 2954 2955 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 2956 "uhci_fill_in_bulk_isoc_td: tw 0x%p offs 0x%x length 0x%x", 2957 tw, offset, length); 2958 2959 bzero((char *)current_td, sizeof (uhci_td_t)); 2960 SetTD32(uhcip, current_td->link_ptr, next_td_paddr | HC_DEPTH_FIRST); 2961 2962 switch (UHCI_XFER_TYPE(ept)) { 2963 case USB_EP_ATTR_ISOCH: 2964 if (((usb_isoc_req_t *)tw->tw_curr_xfer_reqp)->isoc_attributes 2965 & USB_ATTRS_SHORT_XFER_OK) { 2966 SetTD_spd(uhcip, current_td, 1); 2967 } 2968 break; 2969 case USB_EP_ATTR_BULK: 2970 if (((usb_bulk_req_t *)tw->tw_curr_xfer_reqp)->bulk_attributes 2971 & USB_ATTRS_SHORT_XFER_OK) { 2972 SetTD_spd(uhcip, current_td, 1); 2973 } 2974 break; 2975 } 2976 2977 mutex_enter(&ph->p_usba_device->usb_mutex); 2978 2979 SetTD_c_err(uhcip, current_td, UHCI_MAX_ERR_COUNT); 2980 SetTD_status(uhcip, current_td, UHCI_TD_ACTIVE); 2981 SetTD_ioc(uhcip, current_td, INTERRUPT_ON_COMPLETION); 2982 SetTD_mlen(uhcip, current_td, (length - 1)); 2983 SetTD_dtogg(uhcip, current_td, pp->pp_data_toggle); 2984 SetTD_devaddr(uhcip, current_td, ph->p_usba_device->usb_addr); 2985 SetTD_endpt(uhcip, current_td, ph->p_ep.bEndpointAddress & 2986 END_POINT_ADDRESS_MASK); 2987 SetTD_PID(uhcip, current_td, tw->tw_direction); 2988 2989 /* Get the right buffer address for the current TD */ 2990 switch (UHCI_XFER_TYPE(ept)) { 2991 case USB_EP_ATTR_ISOCH: 2992 buf_addr = tw->tw_isoc_bufs[offset].cookie.dmac_address; 2993 break; 2994 case USB_EP_ATTR_BULK: 2995 buf_addr = uhci_get_tw_paddr_by_offs(uhcip, offset, 2996 length, tw); 2997 break; 2998 } 2999 SetTD32(uhcip, current_td->buffer_address, buf_addr); 3000 3001 /* 3002 * Adjust the data toggle. 3003 * The data toggle bit must always be 0 for isoc transfers. 3004 * And set the "iso" bit in the TD for isoc transfers. 3005 */ 3006 if (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_ISOCH) { 3007 pp->pp_data_toggle = 0; 3008 SetTD_iso(uhcip, current_td, 1); 3009 } else { 3010 ADJ_DATA_TOGGLE(pp); 3011 next_td->qh_td_prev = current_td; 3012 pp->pp_qh->td_tailp = next_td; 3013 } 3014 3015 current_td->outst_td_next = NULL; 3016 current_td->outst_td_prev = uhcip->uhci_outst_tds_tail; 3017 if (uhcip->uhci_outst_tds_head == NULL) { 3018 uhcip->uhci_outst_tds_head = current_td; 3019 } else { 3020 uhcip->uhci_outst_tds_tail->outst_td_next = current_td; 3021 } 3022 uhcip->uhci_outst_tds_tail = current_td; 3023 current_td->tw = tw; 3024 3025 if (tw->tw_hctd_head == NULL) { 3026 ASSERT(tw->tw_hctd_tail == NULL); 3027 tw->tw_hctd_head = current_td; 3028 tw->tw_hctd_tail = current_td; 3029 } else { 3030 /* Add the td to the end of the list */ 3031 tw->tw_hctd_tail->tw_td_next = current_td; 3032 tw->tw_hctd_tail = current_td; 3033 } 3034 3035 mutex_exit(&ph->p_usba_device->usb_mutex); 3036 } 3037 3038 3039 /* 3040 * uhci_alloc_bulk_isoc_tds: 3041 * - Allocates the isoc/bulk TD pools. It will allocate one whole 3042 * pool to store all the TDs if the system allows. Only when the 3043 * first allocation fails, it tries to allocate several small 3044 * pools with each pool limited in physical page size. 3045 */ 3046 static int 3047 uhci_alloc_bulk_isoc_tds( 3048 uhci_state_t *uhcip, 3049 uint_t num_tds, 3050 uhci_bulk_isoc_xfer_t *info) 3051 { 3052 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3053 "uhci_alloc_bulk_isoc_tds: num_tds: 0x%x info: 0x%p", 3054 num_tds, info); 3055 3056 info->num_pools = 1; 3057 /* allocate as a whole pool at the first time */ 3058 if (uhci_alloc_memory_for_tds(uhcip, num_tds, info) != 3059 USB_SUCCESS) { 3060 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3061 "alloc_memory_for_tds failed: num_tds %d num_pools %d", 3062 num_tds, info->num_pools); 3063 3064 /* reduce the td number per pool and alloc again */ 3065 info->num_pools = num_tds / UHCI_MAX_TD_NUM_PER_POOL; 3066 if (num_tds % UHCI_MAX_TD_NUM_PER_POOL) { 3067 info->num_pools++; 3068 } 3069 3070 if (uhci_alloc_memory_for_tds(uhcip, num_tds, info) != 3071 USB_SUCCESS) { 3072 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3073 "alloc_memory_for_tds failed: num_tds %d " 3074 "num_pools %d", num_tds, info->num_pools); 3075 3076 return (USB_NO_RESOURCES); 3077 } 3078 } 3079 3080 return (USB_SUCCESS); 3081 } 3082 3083 3084 /* 3085 * uhci_alloc_memory_for_tds: 3086 * - Allocates memory for the isoc/bulk td pools. 3087 */ 3088 static int 3089 uhci_alloc_memory_for_tds( 3090 uhci_state_t *uhcip, 3091 uint_t num_tds, 3092 uhci_bulk_isoc_xfer_t *info) 3093 { 3094 int result, i, j, err; 3095 size_t real_length; 3096 uint_t ccount, num; 3097 ddi_device_acc_attr_t dev_attr; 3098 uhci_bulk_isoc_td_pool_t *td_pool_ptr1, *td_pool_ptr2; 3099 3100 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3101 "uhci_alloc_memory_for_tds: num_tds: 0x%x info: 0x%p " 3102 "num_pools: %u", num_tds, info, info->num_pools); 3103 3104 /* The host controller will be little endian */ 3105 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 3106 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 3107 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 3108 3109 /* Allocate the TD pool structures */ 3110 if ((info->td_pools = kmem_zalloc( 3111 (sizeof (uhci_bulk_isoc_td_pool_t) * info->num_pools), 3112 KM_SLEEP)) == NULL) { 3113 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3114 "uhci_alloc_memory_for_tds: alloc td_pools failed"); 3115 3116 return (USB_FAILURE); 3117 } 3118 3119 for (i = 0; i < info->num_pools; i++) { 3120 if (info->num_pools == 1) { 3121 num = num_tds; 3122 } else if (i < (info->num_pools - 1)) { 3123 num = UHCI_MAX_TD_NUM_PER_POOL; 3124 } else { 3125 num = (num_tds % UHCI_MAX_TD_NUM_PER_POOL); 3126 } 3127 3128 td_pool_ptr1 = &info->td_pools[i]; 3129 3130 /* Allocate the bulk TD pool DMA handle */ 3131 if (ddi_dma_alloc_handle(uhcip->uhci_dip, 3132 &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 0, 3133 &td_pool_ptr1->dma_handle) != DDI_SUCCESS) { 3134 3135 for (j = 0; j < i; j++) { 3136 td_pool_ptr2 = &info->td_pools[j]; 3137 result = ddi_dma_unbind_handle( 3138 td_pool_ptr2->dma_handle); 3139 ASSERT(result == DDI_SUCCESS); 3140 ddi_dma_mem_free(&td_pool_ptr2->mem_handle); 3141 ddi_dma_free_handle(&td_pool_ptr2->dma_handle); 3142 } 3143 3144 kmem_free(info->td_pools, 3145 (sizeof (uhci_bulk_isoc_td_pool_t) * 3146 info->num_pools)); 3147 3148 return (USB_FAILURE); 3149 } 3150 3151 /* Allocate the memory for the bulk TD pool */ 3152 if (ddi_dma_mem_alloc(td_pool_ptr1->dma_handle, 3153 num * sizeof (uhci_td_t), &dev_attr, 3154 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0, 3155 &td_pool_ptr1->pool_addr, &real_length, 3156 &td_pool_ptr1->mem_handle) != DDI_SUCCESS) { 3157 3158 ddi_dma_free_handle(&td_pool_ptr1->dma_handle); 3159 3160 for (j = 0; j < i; j++) { 3161 td_pool_ptr2 = &info->td_pools[j]; 3162 result = ddi_dma_unbind_handle( 3163 td_pool_ptr2->dma_handle); 3164 ASSERT(result == DDI_SUCCESS); 3165 ddi_dma_mem_free(&td_pool_ptr2->mem_handle); 3166 ddi_dma_free_handle(&td_pool_ptr2->dma_handle); 3167 } 3168 3169 kmem_free(info->td_pools, 3170 (sizeof (uhci_bulk_isoc_td_pool_t) * 3171 info->num_pools)); 3172 3173 return (USB_FAILURE); 3174 } 3175 3176 /* Map the bulk TD pool into the I/O address space */ 3177 result = ddi_dma_addr_bind_handle(td_pool_ptr1->dma_handle, 3178 NULL, (caddr_t)td_pool_ptr1->pool_addr, real_length, 3179 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 3180 &td_pool_ptr1->cookie, &ccount); 3181 3182 /* Process the result */ 3183 err = USB_SUCCESS; 3184 3185 if (result != DDI_DMA_MAPPED) { 3186 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3187 "uhci_allocate_memory_for_tds: Result = %d", 3188 result); 3189 uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, 3190 result); 3191 3192 err = USB_FAILURE; 3193 } 3194 3195 if ((result == DDI_DMA_MAPPED) && (ccount != 1)) { 3196 /* The cookie count should be 1 */ 3197 USB_DPRINTF_L2(PRINT_MASK_ATTA, 3198 uhcip->uhci_log_hdl, 3199 "uhci_allocate_memory_for_tds: " 3200 "More than 1 cookie"); 3201 3202 result = ddi_dma_unbind_handle( 3203 td_pool_ptr1->dma_handle); 3204 ASSERT(result == DDI_SUCCESS); 3205 3206 err = USB_FAILURE; 3207 } 3208 3209 if (err == USB_FAILURE) { 3210 3211 ddi_dma_mem_free(&td_pool_ptr1->mem_handle); 3212 ddi_dma_free_handle(&td_pool_ptr1->dma_handle); 3213 3214 for (j = 0; j < i; j++) { 3215 td_pool_ptr2 = &info->td_pools[j]; 3216 result = ddi_dma_unbind_handle( 3217 td_pool_ptr2->dma_handle); 3218 ASSERT(result == DDI_SUCCESS); 3219 ddi_dma_mem_free(&td_pool_ptr2->mem_handle); 3220 ddi_dma_free_handle(&td_pool_ptr2->dma_handle); 3221 } 3222 3223 kmem_free(info->td_pools, 3224 (sizeof (uhci_bulk_isoc_td_pool_t) * 3225 info->num_pools)); 3226 3227 return (USB_FAILURE); 3228 } 3229 3230 bzero((void *)td_pool_ptr1->pool_addr, 3231 num * sizeof (uhci_td_t)); 3232 td_pool_ptr1->num_tds = num; 3233 } 3234 3235 return (USB_SUCCESS); 3236 } 3237 3238 3239 /* 3240 * uhci_handle_bulk_td: 3241 * 3242 * Handles the completed bulk transfer descriptors 3243 */ 3244 void 3245 uhci_handle_bulk_td(uhci_state_t *uhcip, uhci_td_t *td) 3246 { 3247 uint_t num_bulk_tds, index, td_count, j; 3248 usb_cr_t error; 3249 uint_t length, bytes_xfered; 3250 ushort_t MaxPacketSize; 3251 uint32_t buf_offs, paddr; 3252 uhci_td_t *bulk_td_ptr, *current_dummy, *td_head; 3253 uhci_td_t *tmp_td; 3254 queue_head_t *qh, *next_qh; 3255 uhci_trans_wrapper_t *tw = td->tw; 3256 uhci_pipe_private_t *pp = tw->tw_pipe_private; 3257 uhci_bulk_isoc_xfer_t *bulk_xfer_info; 3258 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 3259 usba_pipe_handle_data_t *ph; 3260 3261 USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3262 "uhci_handle_bulk_td: td = 0x%p tw = 0x%p", td, tw); 3263 3264 /* 3265 * Update the tw_bytes_pending, and tw_bytes_xfered 3266 */ 3267 bytes_xfered = ZERO_LENGTH; 3268 3269 /* 3270 * Check whether there are any errors occurred in the xfer. 3271 * If so, update the data_toggle for the queue head and 3272 * return error to the upper layer. 3273 */ 3274 if (GetTD_status(uhcip, td) & TD_STATUS_MASK) { 3275 uhci_handle_bulk_td_errors(uhcip, td); 3276 3277 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3278 "uhci_handle_bulk_td: error; data toggle: 0x%x", 3279 pp->pp_data_toggle); 3280 3281 return; 3282 } 3283 3284 /* 3285 * Update the tw_bytes_pending, and tw_bytes_xfered 3286 */ 3287 bytes_xfered = GetTD_alen(uhcip, td); 3288 if (bytes_xfered != ZERO_LENGTH) { 3289 tw->tw_bytes_pending -= (bytes_xfered + 1); 3290 tw->tw_bytes_xfered += (bytes_xfered + 1); 3291 } 3292 3293 /* 3294 * Get Bulk pipe information and pipe handle 3295 */ 3296 bulk_xfer_info = pp->pp_qh->bulk_xfer_info; 3297 ph = tw->tw_pipe_private->pp_pipe_handle; 3298 3299 /* 3300 * Check whether data underrun occurred. 3301 * If so, complete the transfer 3302 * Update the data toggle bit 3303 */ 3304 if (bytes_xfered != GetTD_mlen(uhcip, td)) { 3305 bulk_xfer_info->num_tds = 1; 3306 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3307 "uhci_handle_bulk_td: Data underrun occured"); 3308 3309 pp->pp_data_toggle = GetTD_dtogg(uhcip, td) == 0 ? 1 : 0; 3310 } 3311 3312 /* 3313 * If the TD's in the current frame are completed, then check 3314 * whether we have any more bytes to xfer. If so, insert TD's. 3315 * If no more bytes needs to be transferred, then do callback to the 3316 * upper layer. 3317 * If the TD's in the current frame are not completed, then 3318 * just delete the TD from the linked lists. 3319 */ 3320 USB_DPRINTF_L3(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3321 "uhci_handle_bulk_td: completed TD data toggle: 0x%x", 3322 GetTD_dtogg(uhcip, td)); 3323 3324 if (--bulk_xfer_info->num_tds == 0) { 3325 uhci_delete_td(uhcip, td); 3326 3327 if ((tw->tw_bytes_pending) && 3328 (GetTD_mlen(uhcip, td) - GetTD_alen(uhcip, td) == 0)) { 3329 3330 MaxPacketSize = pp->pp_pipe_handle->p_ep.wMaxPacketSize; 3331 length = MaxPacketSize; 3332 3333 qh = pp->pp_qh; 3334 paddr = GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK; 3335 if (GetQH32(uhcip, qh->link_ptr) != 3336 GetQH32(uhcip, 3337 uhcip->uhci_bulk_xfers_q_head->link_ptr)) { 3338 next_qh = QH_VADDR(paddr); 3339 SetQH32(uhcip, qh->prev_qh->link_ptr, 3340 paddr|(0x2)); 3341 next_qh->prev_qh = qh->prev_qh; 3342 SetQH32(uhcip, qh->link_ptr, 3343 GetQH32(uhcip, 3344 uhcip->uhci_bulk_xfers_q_head->link_ptr)); 3345 qh->prev_qh = uhcip->uhci_bulk_xfers_q_tail; 3346 SetQH32(uhcip, 3347 uhcip->uhci_bulk_xfers_q_tail->link_ptr, 3348 QH_PADDR(qh) | 0x2); 3349 uhcip->uhci_bulk_xfers_q_tail = qh; 3350 } 3351 3352 if ((tw->tw_bytes_pending / MaxPacketSize) >= 3353 MAX_NUM_BULK_TDS_PER_XFER) { 3354 num_bulk_tds = MAX_NUM_BULK_TDS_PER_XFER; 3355 } else { 3356 num_bulk_tds = 3357 (tw->tw_bytes_pending / MaxPacketSize); 3358 if (tw->tw_bytes_pending % MaxPacketSize) { 3359 num_bulk_tds++; 3360 length = (tw->tw_bytes_pending % 3361 MaxPacketSize); 3362 } 3363 } 3364 3365 current_dummy = pp->pp_qh->td_tailp; 3366 td_pool_ptr = &bulk_xfer_info->td_pools[0]; 3367 bulk_td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 3368 buf_offs = tw->tw_bytes_xfered; 3369 td_count = num_bulk_tds; 3370 index = 0; 3371 3372 /* reuse the TDs to transfer more data */ 3373 while (td_count > 0) { 3374 for (j = 0; 3375 (j < (td_pool_ptr->num_tds - 1)) && 3376 (td_count > 1); j++, td_count--) { 3377 uhci_fill_in_bulk_isoc_td(uhcip, 3378 &bulk_td_ptr[j], &bulk_td_ptr[j+1], 3379 BULKTD_PADDR(td_pool_ptr, 3380 &bulk_td_ptr[j+1]), ph, buf_offs, 3381 MaxPacketSize, tw); 3382 buf_offs += MaxPacketSize; 3383 } 3384 3385 if (td_count == 1) { 3386 uhci_fill_in_bulk_isoc_td(uhcip, 3387 &bulk_td_ptr[j], current_dummy, 3388 TD_PADDR(current_dummy), ph, 3389 buf_offs, length, tw); 3390 3391 break; 3392 } else { 3393 tmp_td = &bulk_td_ptr[j]; 3394 ASSERT(index < 3395 (bulk_xfer_info->num_pools - 1)); 3396 td_pool_ptr = &bulk_xfer_info-> 3397 td_pools[index + 1]; 3398 bulk_td_ptr = (uhci_td_t *) 3399 td_pool_ptr->pool_addr; 3400 uhci_fill_in_bulk_isoc_td(uhcip, 3401 tmp_td, &bulk_td_ptr[0], 3402 BULKTD_PADDR(td_pool_ptr, 3403 &bulk_td_ptr[0]), ph, buf_offs, 3404 MaxPacketSize, tw); 3405 buf_offs += MaxPacketSize; 3406 td_count--; 3407 index++; 3408 } 3409 } 3410 3411 pp->pp_qh->bulk_xfer_info = bulk_xfer_info; 3412 bulk_xfer_info->num_tds = num_bulk_tds; 3413 SetQH32(uhcip, pp->pp_qh->element_ptr, 3414 bulk_xfer_info->td_pools[0].cookie.dmac_address); 3415 } else { 3416 usba_pipe_handle_data_t *usb_pp = pp->pp_pipe_handle; 3417 3418 pp->pp_qh->bulk_xfer_info = NULL; 3419 3420 if (tw->tw_bytes_pending) { 3421 /* Update the element pointer */ 3422 SetQH32(uhcip, pp->pp_qh->element_ptr, 3423 TD_PADDR(pp->pp_qh->td_tailp)); 3424 3425 /* Remove all the tds */ 3426 td_head = tw->tw_hctd_head; 3427 while (td_head != NULL) { 3428 uhci_delete_td(uhcip, td_head); 3429 td_head = tw->tw_hctd_head; 3430 } 3431 } 3432 3433 if (tw->tw_direction == PID_IN) { 3434 usb_req_attrs_t attrs = ((usb_bulk_req_t *) 3435 tw->tw_curr_xfer_reqp)->bulk_attributes; 3436 3437 error = USB_CR_OK; 3438 3439 /* Data run occurred */ 3440 if (tw->tw_bytes_pending && 3441 (!(attrs & USB_ATTRS_SHORT_XFER_OK))) { 3442 error = USB_CR_DATA_UNDERRUN; 3443 } 3444 3445 uhci_sendup_td_message(uhcip, error, tw); 3446 } else { 3447 uhci_do_byte_stats(uhcip, tw->tw_length, 3448 usb_pp->p_ep.bmAttributes, 3449 usb_pp->p_ep.bEndpointAddress); 3450 3451 /* Data underrun occurred */ 3452 if (tw->tw_bytes_pending) { 3453 3454 tw->tw_data->b_rptr += 3455 tw->tw_bytes_xfered; 3456 3457 USB_DPRINTF_L2(PRINT_MASK_ATTA, 3458 uhcip->uhci_log_hdl, 3459 "uhci_handle_bulk_td: " 3460 "data underrun occurred"); 3461 3462 uhci_hcdi_callback(uhcip, pp, 3463 tw->tw_pipe_private->pp_pipe_handle, 3464 tw, USB_CR_DATA_UNDERRUN); 3465 } else { 3466 uhci_hcdi_callback(uhcip, pp, 3467 tw->tw_pipe_private->pp_pipe_handle, 3468 tw, USB_CR_OK); 3469 } 3470 } /* direction */ 3471 3472 /* Deallocate DMA memory */ 3473 uhci_deallocate_tw(uhcip, pp, tw); 3474 for (j = 0; j < bulk_xfer_info->num_pools; j++) { 3475 td_pool_ptr = &bulk_xfer_info->td_pools[j]; 3476 (void) ddi_dma_unbind_handle( 3477 td_pool_ptr->dma_handle); 3478 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 3479 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 3480 } 3481 kmem_free(bulk_xfer_info->td_pools, 3482 (sizeof (uhci_bulk_isoc_td_pool_t) * 3483 bulk_xfer_info->num_pools)); 3484 kmem_free(bulk_xfer_info, 3485 sizeof (uhci_bulk_isoc_xfer_t)); 3486 3487 /* 3488 * When there are no pending bulk commands, point the 3489 * end of the lattice tree to NULL. This will make sure 3490 * that the HC control does not loop anymore and PCI 3491 * bus is not affected. 3492 */ 3493 if (--uhcip->uhci_pending_bulk_cmds == 0) { 3494 uhcip->uhci_bulk_xfers_q_tail->link_ptr = 3495 HC_END_OF_LIST; 3496 USB_DPRINTF_L3(PRINT_MASK_ATTA, 3497 uhcip->uhci_log_hdl, 3498 "uhci_handle_bulk_td: count = %d", 3499 uhcip->uhci_pending_bulk_cmds); 3500 } 3501 } 3502 } else { 3503 uhci_delete_td(uhcip, td); 3504 } 3505 } 3506 3507 3508 void 3509 uhci_handle_bulk_td_errors(uhci_state_t *uhcip, uhci_td_t *td) 3510 { 3511 usb_cr_t usb_err; 3512 uint32_t paddr_tail, element_ptr, paddr; 3513 uhci_td_t *next_td; 3514 uhci_pipe_private_t *pp; 3515 uhci_trans_wrapper_t *tw = td->tw; 3516 usba_pipe_handle_data_t *ph; 3517 uhci_bulk_isoc_td_pool_t *td_pool_ptr = NULL; 3518 3519 USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3520 "uhci_handle_bulk_td_errors: td = %p", (void *)td); 3521 3522 #ifdef DEBUG 3523 uhci_print_td(uhcip, td); 3524 #endif 3525 3526 tw = td->tw; 3527 ph = tw->tw_pipe_private->pp_pipe_handle; 3528 pp = (uhci_pipe_private_t *)ph->p_hcd_private; 3529 3530 /* 3531 * Find the type of error occurred and return the error 3532 * to the upper layer. And adjust the data toggle. 3533 */ 3534 element_ptr = GetQH32(uhcip, pp->pp_qh->element_ptr) & 3535 QH_ELEMENT_PTR_MASK; 3536 paddr_tail = TD_PADDR(pp->pp_qh->td_tailp); 3537 3538 /* 3539 * If a timeout occurs before a transfer has completed, 3540 * the timeout handler sets the CRC/Timeout bit and clears the Active 3541 * bit in the link_ptr for each td in the transfer. 3542 * It then waits (at least) 1 ms so that any tds the controller might 3543 * have been executing will have completed. 3544 * So at this point element_ptr will point to either: 3545 * 1) the next td for the transfer (which has not been executed, 3546 * and has the CRC/Timeout status bit set and Active bit cleared), 3547 * 2) the dummy td for this qh. 3548 * So if the element_ptr does not point to the dummy td, we know 3549 * it points to the next td that would have been executed. 3550 * That td has the data toggle we want to save. 3551 * All outstanding tds have been marked as CRC/Timeout, 3552 * so it doesn't matter which td we pass to uhci_parse_td_error 3553 * for the error status. 3554 */ 3555 if (element_ptr != paddr_tail) { 3556 paddr = (element_ptr & QH_ELEMENT_PTR_MASK); 3557 uhci_get_bulk_td_by_paddr(uhcip, pp->pp_qh->bulk_xfer_info, 3558 paddr, &td_pool_ptr); 3559 next_td = BULKTD_VADDR(td_pool_ptr, paddr); 3560 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3561 "uhci_handle_bulk_td_errors: next td = %p", 3562 (void *)next_td); 3563 3564 usb_err = uhci_parse_td_error(uhcip, pp, next_td); 3565 } else { 3566 usb_err = uhci_parse_td_error(uhcip, pp, td); 3567 } 3568 3569 /* 3570 * Update the link pointer. 3571 */ 3572 SetQH32(uhcip, pp->pp_qh->element_ptr, TD_PADDR(pp->pp_qh->td_tailp)); 3573 3574 /* 3575 * Send up number of bytes transferred before the error condition. 3576 */ 3577 if ((tw->tw_direction == PID_OUT) && tw->tw_data) { 3578 tw->tw_data->b_rptr += tw->tw_bytes_xfered; 3579 } 3580 3581 uhci_remove_bulk_tds_tws(uhcip, tw->tw_pipe_private, UHCI_IN_ERROR); 3582 3583 /* 3584 * When there are no pending bulk commands, point the end of the 3585 * lattice tree to NULL. This will make sure that the HC control 3586 * does not loop anymore and PCI bus is not affected. 3587 */ 3588 if (--uhcip->uhci_pending_bulk_cmds == 0) { 3589 uhcip->uhci_bulk_xfers_q_tail->link_ptr = HC_END_OF_LIST; 3590 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 3591 "uhci_handle_bulk_td_errors: count = %d", 3592 uhcip->uhci_pending_bulk_cmds); 3593 } 3594 3595 uhci_hcdi_callback(uhcip, pp, ph, tw, usb_err); 3596 uhci_deallocate_tw(uhcip, pp, tw); 3597 } 3598 3599 3600 /* 3601 * uhci_get_bulk_td_by_paddr: 3602 * Obtain the address of the TD pool the physical address falls in. 3603 * 3604 * td_pool_pp - pointer to the address of the TD pool containing the paddr 3605 */ 3606 /* ARGSUSED */ 3607 static void 3608 uhci_get_bulk_td_by_paddr( 3609 uhci_state_t *uhcip, 3610 uhci_bulk_isoc_xfer_t *info, 3611 uint32_t paddr, 3612 uhci_bulk_isoc_td_pool_t **td_pool_pp) 3613 { 3614 uint_t i = 0; 3615 3616 while (i < info->num_pools) { 3617 *td_pool_pp = &info->td_pools[i]; 3618 if (((*td_pool_pp)->cookie.dmac_address <= paddr) && 3619 (((*td_pool_pp)->cookie.dmac_address + 3620 (*td_pool_pp)->cookie.dmac_size) > paddr)) { 3621 3622 break; 3623 } 3624 i++; 3625 } 3626 3627 ASSERT(i < info->num_pools); 3628 } 3629 3630 3631 void 3632 uhci_remove_bulk_tds_tws( 3633 uhci_state_t *uhcip, 3634 uhci_pipe_private_t *pp, 3635 int what) 3636 { 3637 uint_t rval, i; 3638 uhci_td_t *head; 3639 uhci_td_t *head_next; 3640 usb_opaque_t curr_reqp; 3641 uhci_bulk_isoc_xfer_t *info; 3642 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 3643 3644 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 3645 3646 if ((info = pp->pp_qh->bulk_xfer_info) == NULL) { 3647 3648 return; 3649 } 3650 3651 head = uhcip->uhci_outst_tds_head; 3652 3653 while (head) { 3654 uhci_trans_wrapper_t *tw_tmp = head->tw; 3655 head_next = head->outst_td_next; 3656 3657 if (pp->pp_qh == tw_tmp->tw_pipe_private->pp_qh) { 3658 curr_reqp = tw_tmp->tw_curr_xfer_reqp; 3659 if (curr_reqp && 3660 ((what == UHCI_IN_CLOSE) || 3661 (what == UHCI_IN_RESET))) { 3662 uhci_hcdi_callback(uhcip, pp, 3663 pp->pp_pipe_handle, 3664 tw_tmp, USB_CR_FLUSHED); 3665 } /* end of curr_reqp */ 3666 3667 uhci_delete_td(uhcip, head); 3668 3669 if (what == UHCI_IN_CLOSE || what == UHCI_IN_RESET) { 3670 ASSERT(info->num_tds > 0); 3671 if (--info->num_tds == 0) { 3672 uhci_deallocate_tw(uhcip, pp, tw_tmp); 3673 3674 /* 3675 * This will make sure that the HC 3676 * does not loop anymore when there 3677 * are no pending bulk commands. 3678 */ 3679 if (--uhcip->uhci_pending_bulk_cmds 3680 == 0) { 3681 uhcip->uhci_bulk_xfers_q_tail-> 3682 link_ptr = HC_END_OF_LIST; 3683 USB_DPRINTF_L3(PRINT_MASK_ATTA, 3684 uhcip->uhci_log_hdl, 3685 "uhci_remove_bulk_tds_tws:" 3686 " count = %d", 3687 uhcip-> 3688 uhci_pending_bulk_cmds); 3689 } 3690 } 3691 } 3692 } 3693 3694 head = head_next; 3695 } 3696 3697 if (what == UHCI_IN_CLOSE || what == UHCI_IN_RESET) { 3698 ASSERT(info->num_tds == 0); 3699 } 3700 3701 for (i = 0; i < info->num_pools; i++) { 3702 td_pool_ptr = &info->td_pools[i]; 3703 rval = ddi_dma_unbind_handle(td_pool_ptr->dma_handle); 3704 ASSERT(rval == DDI_SUCCESS); 3705 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 3706 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 3707 } 3708 kmem_free(info->td_pools, (sizeof (uhci_bulk_isoc_td_pool_t) * 3709 info->num_pools)); 3710 kmem_free(info, sizeof (uhci_bulk_isoc_xfer_t)); 3711 pp->pp_qh->bulk_xfer_info = NULL; 3712 } 3713 3714 3715 /* 3716 * uhci_save_data_toggle () 3717 * Save the data toggle in the usba_device structure 3718 */ 3719 void 3720 uhci_save_data_toggle(uhci_pipe_private_t *pp) 3721 { 3722 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 3723 3724 /* Save the data toggle in the usb devices structure. */ 3725 mutex_enter(&ph->p_mutex); 3726 usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress, 3727 pp->pp_data_toggle); 3728 mutex_exit(&ph->p_mutex); 3729 } 3730 3731 /* 3732 * uhci_create_isoc_transfer_wrapper: 3733 * Create a Transaction Wrapper (TW) for isoc transfer. 3734 * This involves the allocating of DMA resources. 3735 * 3736 * For isoc transfers, one isoc transfer includes multiple packets 3737 * and each packet may have a different length. So each packet is 3738 * transfered by one TD. We only know the individual packet length 3739 * won't exceed 1023 bytes, but we don't know exactly the lengths. 3740 * It is hard to make one physically discontiguous DMA buffer which 3741 * can fit in all the TDs like what can be done to the ctrl/bulk/ 3742 * intr transfers. It is also undesirable to make one physically 3743 * contiguous DMA buffer for all the packets, since this may easily 3744 * fail when the system is in low memory. So an individual DMA 3745 * buffer is allocated for an individual isoc packet and each DMA 3746 * buffer is physically contiguous. An extra structure is allocated 3747 * to save the multiple DMA handles. 3748 */ 3749 static uhci_trans_wrapper_t * 3750 uhci_create_isoc_transfer_wrapper( 3751 uhci_state_t *uhcip, 3752 uhci_pipe_private_t *pp, 3753 usb_isoc_req_t *req, 3754 size_t length, 3755 usb_flags_t usb_flags) 3756 { 3757 int result; 3758 size_t real_length, strtlen, xfer_size; 3759 uhci_trans_wrapper_t *tw; 3760 ddi_device_acc_attr_t dev_attr; 3761 ddi_dma_attr_t dma_attr; 3762 int kmem_flag; 3763 int (*dmamem_wait)(caddr_t); 3764 uint_t i, j, ccount; 3765 usb_isoc_req_t *tmp_req = req; 3766 3767 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 3768 3769 if (UHCI_XFER_TYPE(&pp->pp_pipe_handle->p_ep) != USB_EP_ATTR_ISOCH) { 3770 3771 return (NULL); 3772 } 3773 3774 if ((req == NULL) && (UHCI_XFER_DIR(&pp->pp_pipe_handle->p_ep) == 3775 USB_EP_DIR_IN)) { 3776 tmp_req = (usb_isoc_req_t *)pp->pp_client_periodic_in_reqp; 3777 } 3778 3779 if (tmp_req == NULL) { 3780 3781 return (NULL); 3782 } 3783 3784 3785 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3786 "uhci_create_isoc_transfer_wrapper: length = 0x%lx flags = 0x%x", 3787 length, usb_flags); 3788 3789 /* SLEEP flag should not be used in interrupt context */ 3790 if (servicing_interrupt()) { 3791 kmem_flag = KM_NOSLEEP; 3792 dmamem_wait = DDI_DMA_DONTWAIT; 3793 } else { 3794 kmem_flag = KM_SLEEP; 3795 dmamem_wait = DDI_DMA_SLEEP; 3796 } 3797 3798 /* Allocate space for the transfer wrapper */ 3799 if ((tw = kmem_zalloc(sizeof (uhci_trans_wrapper_t), kmem_flag)) == 3800 NULL) { 3801 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3802 "uhci_create_isoc_transfer_wrapper: kmem_alloc failed"); 3803 3804 return (NULL); 3805 } 3806 3807 /* Allocate space for the isoc buffer handles */ 3808 strtlen = sizeof (uhci_isoc_buf_t) * tmp_req->isoc_pkts_count; 3809 if ((tw->tw_isoc_bufs = kmem_zalloc(strtlen, kmem_flag)) == NULL) { 3810 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3811 "uhci_create_isoc_transfer_wrapper: kmem_alloc " 3812 "isoc buffer failed"); 3813 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 3814 3815 return (NULL); 3816 } 3817 3818 bcopy(&uhcip->uhci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t)); 3819 dma_attr.dma_attr_sgllen = 1; 3820 3821 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 3822 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 3823 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 3824 3825 /* Store the transfer length */ 3826 tw->tw_length = length; 3827 3828 for (i = 0; i < tmp_req->isoc_pkts_count; i++) { 3829 tw->tw_isoc_bufs[i].index = i; 3830 3831 /* Allocate the DMA handle */ 3832 if ((result = ddi_dma_alloc_handle(uhcip->uhci_dip, &dma_attr, 3833 dmamem_wait, 0, &tw->tw_isoc_bufs[i].dma_handle)) != 3834 DDI_SUCCESS) { 3835 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3836 "uhci_create_isoc_transfer_wrapper: " 3837 "Alloc handle %d failed", i); 3838 3839 for (j = 0; j < i; j++) { 3840 result = ddi_dma_unbind_handle( 3841 tw->tw_isoc_bufs[j].dma_handle); 3842 ASSERT(result == USB_SUCCESS); 3843 ddi_dma_mem_free(&tw->tw_isoc_bufs[j]. 3844 mem_handle); 3845 ddi_dma_free_handle(&tw->tw_isoc_bufs[j]. 3846 dma_handle); 3847 } 3848 kmem_free(tw->tw_isoc_bufs, strtlen); 3849 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 3850 3851 return (NULL); 3852 } 3853 3854 /* Allocate the memory */ 3855 xfer_size = tmp_req->isoc_pkt_descr[i].isoc_pkt_length; 3856 if ((result = ddi_dma_mem_alloc(tw->tw_isoc_bufs[i].dma_handle, 3857 xfer_size, &dev_attr, DDI_DMA_CONSISTENT, dmamem_wait, 3858 NULL, (caddr_t *)&tw->tw_isoc_bufs[i].buf_addr, 3859 &real_length, &tw->tw_isoc_bufs[i].mem_handle)) != 3860 DDI_SUCCESS) { 3861 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3862 "uhci_create_isoc_transfer_wrapper: " 3863 "dma_mem_alloc %d fail", i); 3864 ddi_dma_free_handle(&tw->tw_isoc_bufs[i].dma_handle); 3865 3866 for (j = 0; j < i; j++) { 3867 result = ddi_dma_unbind_handle( 3868 tw->tw_isoc_bufs[j].dma_handle); 3869 ASSERT(result == USB_SUCCESS); 3870 ddi_dma_mem_free(&tw->tw_isoc_bufs[j]. 3871 mem_handle); 3872 ddi_dma_free_handle(&tw->tw_isoc_bufs[j]. 3873 dma_handle); 3874 } 3875 kmem_free(tw->tw_isoc_bufs, strtlen); 3876 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 3877 3878 return (NULL); 3879 } 3880 3881 ASSERT(real_length >= xfer_size); 3882 3883 /* Bind the handle */ 3884 result = ddi_dma_addr_bind_handle( 3885 tw->tw_isoc_bufs[i].dma_handle, NULL, 3886 (caddr_t)tw->tw_isoc_bufs[i].buf_addr, real_length, 3887 DDI_DMA_RDWR|DDI_DMA_CONSISTENT, dmamem_wait, NULL, 3888 &tw->tw_isoc_bufs[i].cookie, &ccount); 3889 3890 if ((result == DDI_DMA_MAPPED) && (ccount == 1)) { 3891 tw->tw_isoc_bufs[i].length = xfer_size; 3892 3893 continue; 3894 } else { 3895 USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3896 "uhci_create_isoc_transfer_wrapper: " 3897 "Bind handle %d failed", i); 3898 if (result == DDI_DMA_MAPPED) { 3899 result = ddi_dma_unbind_handle( 3900 tw->tw_isoc_bufs[i].dma_handle); 3901 ASSERT(result == USB_SUCCESS); 3902 } 3903 ddi_dma_mem_free(&tw->tw_isoc_bufs[i].mem_handle); 3904 ddi_dma_free_handle(&tw->tw_isoc_bufs[i].dma_handle); 3905 3906 for (j = 0; j < i; j++) { 3907 result = ddi_dma_unbind_handle( 3908 tw->tw_isoc_bufs[j].dma_handle); 3909 ASSERT(result == USB_SUCCESS); 3910 ddi_dma_mem_free(&tw->tw_isoc_bufs[j]. 3911 mem_handle); 3912 ddi_dma_free_handle(&tw->tw_isoc_bufs[j]. 3913 dma_handle); 3914 } 3915 kmem_free(tw->tw_isoc_bufs, strtlen); 3916 kmem_free(tw, sizeof (uhci_trans_wrapper_t)); 3917 3918 return (NULL); 3919 } 3920 } 3921 3922 tw->tw_ncookies = tmp_req->isoc_pkts_count; 3923 tw->tw_isoc_strtlen = strtlen; 3924 3925 /* 3926 * Only allow one wrapper to be added at a time. Insert the 3927 * new transaction wrapper into the list for this pipe. 3928 */ 3929 if (pp->pp_tw_head == NULL) { 3930 pp->pp_tw_head = tw; 3931 pp->pp_tw_tail = tw; 3932 } else { 3933 pp->pp_tw_tail->tw_next = tw; 3934 pp->pp_tw_tail = tw; 3935 ASSERT(tw->tw_next == NULL); 3936 } 3937 3938 /* Store a back pointer to the pipe private structure */ 3939 tw->tw_pipe_private = pp; 3940 3941 /* Store the transfer type - synchronous or asynchronous */ 3942 tw->tw_flags = usb_flags; 3943 3944 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 3945 "uhci_create_isoc_transfer_wrapper: tw = 0x%p, ncookies = %u", 3946 tw, tw->tw_ncookies); 3947 3948 return (tw); 3949 } 3950 3951 /* 3952 * uhci_insert_isoc_td: 3953 * - Create transfer wrapper 3954 * - Allocate memory for the isoc td's 3955 * - Fill up all the TD's and submit to the HC 3956 * - Update all the linked lists 3957 */ 3958 int 3959 uhci_insert_isoc_td( 3960 uhci_state_t *uhcip, 3961 usba_pipe_handle_data_t *ph, 3962 usb_isoc_req_t *isoc_req, 3963 size_t length, 3964 usb_flags_t flags) 3965 { 3966 int rval = USB_SUCCESS; 3967 int error; 3968 uint_t ddic; 3969 uint32_t i, j, index; 3970 uint32_t bytes_to_xfer; 3971 uint32_t expired_frames = 0; 3972 usb_frame_number_t start_frame, end_frame, current_frame; 3973 uhci_td_t *td_ptr; 3974 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 3975 uhci_trans_wrapper_t *tw; 3976 uhci_bulk_isoc_xfer_t *isoc_xfer_info; 3977 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 3978 3979 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 3980 "uhci_insert_isoc_td: ph = 0x%p isoc req = %p length = %lu", 3981 ph, (void *)isoc_req, length); 3982 3983 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 3984 3985 /* Allocate a transfer wrapper */ 3986 if ((tw = uhci_create_isoc_transfer_wrapper(uhcip, pp, isoc_req, 3987 length, flags)) == NULL) { 3988 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 3989 "uhci_insert_isoc_td: TW allocation failed"); 3990 3991 return (USB_NO_RESOURCES); 3992 } 3993 3994 /* Save current isochronous request pointer */ 3995 tw->tw_curr_xfer_reqp = (usb_opaque_t)isoc_req; 3996 3997 /* 3998 * Initialize the transfer wrapper. These values are useful 3999 * for sending back the reply. 4000 */ 4001 tw->tw_handle_td = uhci_handle_isoc_td; 4002 tw->tw_handle_callback_value = NULL; 4003 tw->tw_direction = (UHCI_XFER_DIR(&ph->p_ep) == USB_EP_DIR_OUT) ? 4004 PID_OUT : PID_IN; 4005 4006 /* 4007 * If the transfer isoc send, then copy the data from the request 4008 * to the transfer wrapper. 4009 */ 4010 if ((tw->tw_direction == PID_OUT) && length) { 4011 uchar_t *p; 4012 4013 ASSERT(isoc_req->isoc_data != NULL); 4014 p = isoc_req->isoc_data->b_rptr; 4015 4016 /* Copy the data into the message */ 4017 for (i = 0; i < isoc_req->isoc_pkts_count; i++) { 4018 ddi_rep_put8(tw->tw_isoc_bufs[i].mem_handle, 4019 p, (uint8_t *)tw->tw_isoc_bufs[i].buf_addr, 4020 isoc_req->isoc_pkt_descr[i].isoc_pkt_length, 4021 DDI_DEV_AUTOINCR); 4022 p += isoc_req->isoc_pkt_descr[i].isoc_pkt_length; 4023 } 4024 } 4025 4026 if (tw->tw_direction == PID_IN) { 4027 if ((rval = uhci_allocate_periodic_in_resource(uhcip, pp, tw, 4028 flags)) != USB_SUCCESS) { 4029 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4030 "uhci_insert_isoc_td: isoc_req_t alloc failed"); 4031 uhci_deallocate_tw(uhcip, pp, tw); 4032 4033 return (rval); 4034 } 4035 4036 isoc_req = (usb_isoc_req_t *)tw->tw_curr_xfer_reqp; 4037 } 4038 4039 tw->tw_isoc_req = (usb_isoc_req_t *)tw->tw_curr_xfer_reqp; 4040 4041 /* Get the pointer to the isoc_xfer_info structure */ 4042 isoc_xfer_info = (uhci_bulk_isoc_xfer_t *)&tw->tw_xfer_info; 4043 isoc_xfer_info->num_tds = isoc_req->isoc_pkts_count; 4044 4045 /* 4046 * Allocate memory for isoc tds 4047 */ 4048 if ((rval = uhci_alloc_bulk_isoc_tds(uhcip, isoc_req->isoc_pkts_count, 4049 isoc_xfer_info)) != USB_SUCCESS) { 4050 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4051 "uhci_alloc_bulk_isoc_td: Memory allocation failure"); 4052 4053 if (tw->tw_direction == PID_IN) { 4054 uhci_deallocate_periodic_in_resource(uhcip, pp, tw); 4055 } 4056 uhci_deallocate_tw(uhcip, pp, tw); 4057 4058 return (rval); 4059 } 4060 4061 /* 4062 * Get the isoc td pool address, buffer address and 4063 * max packet size that the device supports. 4064 */ 4065 td_pool_ptr = &isoc_xfer_info->td_pools[0]; 4066 td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 4067 index = 0; 4068 4069 /* 4070 * Fill up the isoc tds 4071 */ 4072 USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4073 "uhci_insert_isoc_td : isoc pkts %d", isoc_req->isoc_pkts_count); 4074 4075 for (i = 0; i < isoc_xfer_info->num_pools; i++) { 4076 for (j = 0; j < td_pool_ptr->num_tds; j++) { 4077 bytes_to_xfer = 4078 isoc_req->isoc_pkt_descr[index].isoc_pkt_length; 4079 4080 uhci_fill_in_bulk_isoc_td(uhcip, &td_ptr[j], 4081 (uhci_td_t *)NULL, HC_END_OF_LIST, ph, index, 4082 bytes_to_xfer, tw); 4083 td_ptr[j].isoc_pkt_index = index; 4084 index++; 4085 } 4086 4087 if (i < (isoc_xfer_info->num_pools - 1)) { 4088 td_pool_ptr = &isoc_xfer_info->td_pools[i + 1]; 4089 td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr; 4090 } 4091 } 4092 4093 /* 4094 * Get the starting frame number. 4095 * The client drivers sets the flag USB_ATTRS_ISOC_XFER_ASAP to inform 4096 * the HCD to care of starting frame number. 4097 * 4098 * Following code is very time critical. So, perform atomic execution. 4099 */ 4100 ddic = ddi_enter_critical(); 4101 current_frame = uhci_get_sw_frame_number(uhcip); 4102 4103 if (isoc_req->isoc_attributes & USB_ATTRS_ISOC_START_FRAME) { 4104 start_frame = isoc_req->isoc_frame_no; 4105 end_frame = start_frame + isoc_req->isoc_pkts_count; 4106 4107 /* Check available frames */ 4108 if ((end_frame - current_frame) < UHCI_MAX_ISOC_FRAMES) { 4109 if (current_frame > start_frame) { 4110 if ((current_frame + FRNUM_OFFSET) < 4111 end_frame) { 4112 expired_frames = current_frame + 4113 FRNUM_OFFSET - start_frame; 4114 start_frame = current_frame + 4115 FRNUM_OFFSET; 4116 } else { 4117 rval = USB_INVALID_START_FRAME; 4118 } 4119 } 4120 } else { 4121 rval = USB_INVALID_START_FRAME; 4122 } 4123 4124 } else if (isoc_req->isoc_attributes & USB_ATTRS_ISOC_XFER_ASAP) { 4125 start_frame = pp->pp_frame_num; 4126 4127 if (start_frame == INVALID_FRNUM) { 4128 start_frame = current_frame + FRNUM_OFFSET; 4129 } else if (current_frame > start_frame) { 4130 start_frame = current_frame + FRNUM_OFFSET; 4131 } 4132 4133 end_frame = start_frame + isoc_req->isoc_pkts_count; 4134 isoc_req->isoc_frame_no = start_frame; 4135 4136 } 4137 4138 if (rval != USB_SUCCESS) { 4139 4140 /* Exit the critical */ 4141 ddi_exit_critical(ddic); 4142 4143 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4144 "uhci_insert_isoc_td: Invalid starting frame number"); 4145 4146 if (tw->tw_direction == PID_IN) { 4147 uhci_deallocate_periodic_in_resource(uhcip, pp, tw); 4148 } 4149 4150 while (tw->tw_hctd_head) { 4151 uhci_delete_td(uhcip, tw->tw_hctd_head); 4152 } 4153 4154 for (i = 0; i < isoc_xfer_info->num_pools; i++) { 4155 td_pool_ptr = &isoc_xfer_info->td_pools[i]; 4156 error = ddi_dma_unbind_handle(td_pool_ptr->dma_handle); 4157 ASSERT(error == DDI_SUCCESS); 4158 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 4159 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 4160 } 4161 kmem_free(isoc_xfer_info->td_pools, 4162 (sizeof (uhci_bulk_isoc_td_pool_t) * 4163 isoc_xfer_info->num_pools)); 4164 4165 uhci_deallocate_tw(uhcip, pp, tw); 4166 4167 return (rval); 4168 } 4169 4170 for (i = 0; i < expired_frames; i++) { 4171 isoc_req->isoc_pkt_descr[i].isoc_pkt_status = 4172 USB_CR_NOT_ACCESSED; 4173 isoc_req->isoc_pkt_descr[i].isoc_pkt_actual_length = 4174 isoc_req->isoc_pkt_descr[i].isoc_pkt_length; 4175 uhci_get_isoc_td_by_index(uhcip, isoc_xfer_info, i, 4176 &td_ptr, &td_pool_ptr); 4177 uhci_delete_td(uhcip, td_ptr); 4178 --isoc_xfer_info->num_tds; 4179 } 4180 4181 /* 4182 * Add the TD's to the HC list 4183 */ 4184 start_frame = (start_frame & 0x3ff); 4185 for (; i < isoc_req->isoc_pkts_count; i++) { 4186 uhci_get_isoc_td_by_index(uhcip, isoc_xfer_info, i, 4187 &td_ptr, &td_pool_ptr); 4188 if (uhcip->uhci_isoc_q_tailp[start_frame]) { 4189 td_ptr->isoc_prev = 4190 uhcip->uhci_isoc_q_tailp[start_frame]; 4191 td_ptr->isoc_next = NULL; 4192 td_ptr->link_ptr = 4193 uhcip->uhci_isoc_q_tailp[start_frame]->link_ptr; 4194 uhcip->uhci_isoc_q_tailp[start_frame]->isoc_next = 4195 td_ptr; 4196 SetTD32(uhcip, 4197 uhcip->uhci_isoc_q_tailp[start_frame]->link_ptr, 4198 ISOCTD_PADDR(td_pool_ptr, td_ptr)); 4199 uhcip->uhci_isoc_q_tailp[start_frame] = td_ptr; 4200 } else { 4201 uhcip->uhci_isoc_q_tailp[start_frame] = td_ptr; 4202 td_ptr->isoc_next = NULL; 4203 td_ptr->isoc_prev = NULL; 4204 SetTD32(uhcip, td_ptr->link_ptr, 4205 GetFL32(uhcip, 4206 uhcip->uhci_frame_lst_tablep[start_frame])); 4207 SetFL32(uhcip, 4208 uhcip->uhci_frame_lst_tablep[start_frame], 4209 ISOCTD_PADDR(td_pool_ptr, td_ptr)); 4210 } 4211 td_ptr->starting_frame = start_frame; 4212 4213 if (++start_frame == NUM_FRAME_LST_ENTRIES) 4214 start_frame = 0; 4215 } 4216 4217 ddi_exit_critical(ddic); 4218 pp->pp_frame_num = end_frame; 4219 4220 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4221 "uhci_insert_isoc_td: current frame number 0x%llx, pipe frame num" 4222 " 0x%llx", current_frame, pp->pp_frame_num); 4223 4224 return (rval); 4225 } 4226 4227 4228 /* 4229 * uhci_get_isoc_td_by_index: 4230 * Obtain the addresses of the TD pool and the TD at the index. 4231 * 4232 * tdpp - pointer to the address of the TD at the isoc packet index 4233 * td_pool_pp - pointer to the address of the TD pool containing 4234 * the specified TD 4235 */ 4236 /* ARGSUSED */ 4237 static void 4238 uhci_get_isoc_td_by_index( 4239 uhci_state_t *uhcip, 4240 uhci_bulk_isoc_xfer_t *info, 4241 uint_t index, 4242 uhci_td_t **tdpp, 4243 uhci_bulk_isoc_td_pool_t **td_pool_pp) 4244 { 4245 uint_t i = 0, j = 0; 4246 uhci_td_t *td_ptr; 4247 4248 while (j < info->num_pools) { 4249 if ((i + info->td_pools[j].num_tds) <= index) { 4250 i += info->td_pools[j].num_tds; 4251 j++; 4252 } else { 4253 i = index - i; 4254 4255 break; 4256 } 4257 } 4258 4259 ASSERT(j < info->num_pools); 4260 *td_pool_pp = &info->td_pools[j]; 4261 td_ptr = (uhci_td_t *)((*td_pool_pp)->pool_addr); 4262 *tdpp = &td_ptr[i]; 4263 } 4264 4265 4266 /* 4267 * uhci_handle_isoc_td: 4268 * Handles the completed isoc tds 4269 */ 4270 void 4271 uhci_handle_isoc_td(uhci_state_t *uhcip, uhci_td_t *td) 4272 { 4273 uint_t rval, i; 4274 uint32_t pkt_index = td->isoc_pkt_index; 4275 usb_cr_t cr; 4276 uhci_trans_wrapper_t *tw = td->tw; 4277 usb_isoc_req_t *isoc_req = (usb_isoc_req_t *)tw->tw_isoc_req; 4278 uhci_pipe_private_t *pp = tw->tw_pipe_private; 4279 uhci_bulk_isoc_xfer_t *isoc_xfer_info = &tw->tw_xfer_info; 4280 usba_pipe_handle_data_t *usb_pp; 4281 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 4282 4283 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4284 "uhci_handle_isoc_td: td = 0x%p, pp = 0x%p, tw = 0x%p, req = 0x%p, " 4285 "index = %x", td, pp, tw, isoc_req, pkt_index); 4286 4287 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4288 4289 usb_pp = pp->pp_pipe_handle; 4290 4291 /* 4292 * Check whether there are any errors occurred. If so, update error 4293 * count and return it to the upper.But never return a non zero 4294 * completion reason. 4295 */ 4296 cr = USB_CR_OK; 4297 if (GetTD_status(uhcip, td) & TD_STATUS_MASK) { 4298 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4299 "uhci_handle_isoc_td: Error Occurred: TD Status = %x", 4300 GetTD_status(uhcip, td)); 4301 isoc_req->isoc_error_count++; 4302 } 4303 4304 if (isoc_req != NULL) { 4305 isoc_req->isoc_pkt_descr[pkt_index].isoc_pkt_status = cr; 4306 isoc_req->isoc_pkt_descr[pkt_index].isoc_pkt_actual_length = 4307 (GetTD_alen(uhcip, td) == ZERO_LENGTH) ? 0 : 4308 GetTD_alen(uhcip, td) + 1; 4309 } 4310 4311 uhci_delete_isoc_td(uhcip, td); 4312 4313 if (--isoc_xfer_info->num_tds != 0) { 4314 USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4315 "uhci_handle_isoc_td: Number of TDs %d", 4316 isoc_xfer_info->num_tds); 4317 4318 return; 4319 } 4320 4321 tw->tw_claim = UHCI_INTR_HDLR_CLAIMED; 4322 if (tw->tw_direction == PID_IN) { 4323 uhci_sendup_td_message(uhcip, cr, tw); 4324 4325 if ((uhci_handle_isoc_receive(uhcip, pp, tw)) != USB_SUCCESS) { 4326 USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4327 "uhci_handle_isoc_td: Drop message"); 4328 } 4329 4330 } else { 4331 /* update kstats only for OUT. sendup_td_msg() does it for IN */ 4332 uhci_do_byte_stats(uhcip, tw->tw_length, 4333 usb_pp->p_ep.bmAttributes, usb_pp->p_ep.bEndpointAddress); 4334 4335 uhci_hcdi_callback(uhcip, pp, usb_pp, tw, USB_CR_OK); 4336 } 4337 4338 for (i = 0; i < isoc_xfer_info->num_pools; i++) { 4339 td_pool_ptr = &isoc_xfer_info->td_pools[i]; 4340 rval = ddi_dma_unbind_handle(td_pool_ptr->dma_handle); 4341 ASSERT(rval == DDI_SUCCESS); 4342 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 4343 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 4344 } 4345 kmem_free(isoc_xfer_info->td_pools, 4346 (sizeof (uhci_bulk_isoc_td_pool_t) * 4347 isoc_xfer_info->num_pools)); 4348 uhci_deallocate_tw(uhcip, pp, tw); 4349 } 4350 4351 4352 /* 4353 * uhci_handle_isoc_receive: 4354 * - Sends the isoc data to the client 4355 * - Inserts another isoc receive request 4356 */ 4357 static int 4358 uhci_handle_isoc_receive( 4359 uhci_state_t *uhcip, 4360 uhci_pipe_private_t *pp, 4361 uhci_trans_wrapper_t *tw) 4362 { 4363 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4364 "uhci_handle_isoc_receive: tw = 0x%p", tw); 4365 4366 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4367 4368 /* 4369 * -- check for pipe state being polling before 4370 * inserting a new request. Check when is TD 4371 * de-allocation being done? (so we can reuse the same TD) 4372 */ 4373 if (uhci_start_isoc_receive_polling(uhcip, 4374 pp->pp_pipe_handle, (usb_isoc_req_t *)tw->tw_curr_xfer_reqp, 4375 0) != USB_SUCCESS) { 4376 USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4377 "uhci_handle_isoc_receive: receive polling failed"); 4378 4379 return (USB_FAILURE); 4380 } 4381 4382 return (USB_SUCCESS); 4383 } 4384 4385 4386 /* 4387 * uhci_delete_isoc_td: 4388 * - Delete from the outstanding command queue 4389 * - Delete from the tw queue 4390 * - Delete from the isoc queue 4391 * - Delete from the HOST CONTROLLER list 4392 */ 4393 static void 4394 uhci_delete_isoc_td(uhci_state_t *uhcip, uhci_td_t *td) 4395 { 4396 uint32_t starting_frame = td->starting_frame; 4397 4398 if ((td->isoc_next == NULL) && (td->isoc_prev == NULL)) { 4399 SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[starting_frame], 4400 GetTD32(uhcip, td->link_ptr)); 4401 uhcip->uhci_isoc_q_tailp[starting_frame] = 0; 4402 } else if (td->isoc_next == NULL) { 4403 td->isoc_prev->link_ptr = td->link_ptr; 4404 td->isoc_prev->isoc_next = NULL; 4405 uhcip->uhci_isoc_q_tailp[starting_frame] = td->isoc_prev; 4406 } else if (td->isoc_prev == NULL) { 4407 td->isoc_next->isoc_prev = NULL; 4408 SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[starting_frame], 4409 GetTD32(uhcip, td->link_ptr)); 4410 } else { 4411 td->isoc_prev->isoc_next = td->isoc_next; 4412 td->isoc_next->isoc_prev = td->isoc_prev; 4413 td->isoc_prev->link_ptr = td->link_ptr; 4414 } 4415 4416 uhci_delete_td(uhcip, td); 4417 } 4418 4419 4420 /* 4421 * uhci_send_isoc_receive 4422 * - Allocates usb_isoc_request 4423 * - Updates the isoc request 4424 * - Inserts the isoc td's into the HC processing list. 4425 */ 4426 int 4427 uhci_start_isoc_receive_polling( 4428 uhci_state_t *uhcip, 4429 usba_pipe_handle_data_t *ph, 4430 usb_isoc_req_t *isoc_req, 4431 usb_flags_t usb_flags) 4432 { 4433 int ii, error; 4434 size_t max_isoc_xfer_size, length; 4435 ushort_t isoc_pkt_count; 4436 uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private; 4437 usb_isoc_pkt_descr_t *isoc_pkt_descr; 4438 4439 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4440 "uhci_start_isoc_receive_polling: usb_flags = %x", usb_flags); 4441 4442 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4443 4444 max_isoc_xfer_size = ph->p_ep.wMaxPacketSize * UHCI_MAX_ISOC_PKTS; 4445 4446 if (isoc_req) { 4447 isoc_pkt_descr = isoc_req->isoc_pkt_descr; 4448 isoc_pkt_count = isoc_req->isoc_pkts_count; 4449 } else { 4450 isoc_pkt_descr = ((usb_isoc_req_t *) 4451 pp->pp_client_periodic_in_reqp)->isoc_pkt_descr; 4452 isoc_pkt_count = ((usb_isoc_req_t *) 4453 pp->pp_client_periodic_in_reqp)->isoc_pkts_count; 4454 } 4455 4456 for (ii = 0, length = 0; ii < isoc_pkt_count; ii++) { 4457 length += isoc_pkt_descr->isoc_pkt_length; 4458 isoc_pkt_descr++; 4459 } 4460 4461 /* Check the size of isochronous request */ 4462 if (length > max_isoc_xfer_size) { 4463 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4464 "uhci_start_isoc_receive_polling: " 4465 "Max isoc request size = %lx, Given isoc req size = %lx", 4466 max_isoc_xfer_size, length); 4467 4468 return (USB_FAILURE); 4469 } 4470 4471 /* Add the TD into the Host Controller's isoc list */ 4472 error = uhci_insert_isoc_td(uhcip, ph, isoc_req, length, usb_flags); 4473 4474 return (error); 4475 } 4476 4477 4478 /* 4479 * uhci_remove_isoc_tds_tws 4480 * This routine scans the pipe and removes all the td's 4481 * and transfer wrappers and deallocates the memory 4482 * associated with those td's and tw's. 4483 */ 4484 void 4485 uhci_remove_isoc_tds_tws(uhci_state_t *uhcip, uhci_pipe_private_t *pp) 4486 { 4487 uint_t rval, i; 4488 uhci_td_t *tmp_td, *td_head; 4489 usb_isoc_req_t *isoc_req; 4490 uhci_trans_wrapper_t *tmp_tw, *tw_head; 4491 uhci_bulk_isoc_xfer_t *isoc_xfer_info; 4492 uhci_bulk_isoc_td_pool_t *td_pool_ptr; 4493 4494 USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl, 4495 "uhci_remove_isoc_tds_tws: pp = %p", (void *)pp); 4496 4497 tw_head = pp->pp_tw_head; 4498 while (tw_head) { 4499 tmp_tw = tw_head; 4500 tw_head = tw_head->tw_next; 4501 td_head = tmp_tw->tw_hctd_head; 4502 if (tmp_tw->tw_direction == PID_IN) { 4503 uhci_deallocate_periodic_in_resource(uhcip, pp, 4504 tmp_tw); 4505 } else if (tmp_tw->tw_direction == PID_OUT) { 4506 uhci_hcdi_callback(uhcip, pp, pp->pp_pipe_handle, 4507 tmp_tw, USB_CR_FLUSHED); 4508 } 4509 4510 while (td_head) { 4511 tmp_td = td_head; 4512 td_head = td_head->tw_td_next; 4513 uhci_delete_isoc_td(uhcip, tmp_td); 4514 } 4515 4516 isoc_req = (usb_isoc_req_t *)tmp_tw->tw_isoc_req; 4517 if (isoc_req) { 4518 usb_free_isoc_req(isoc_req); 4519 } 4520 4521 ASSERT(tmp_tw->tw_hctd_head == NULL); 4522 4523 if (tmp_tw->tw_xfer_info.td_pools) { 4524 isoc_xfer_info = 4525 (uhci_bulk_isoc_xfer_t *)&tmp_tw->tw_xfer_info; 4526 for (i = 0; i < isoc_xfer_info->num_pools; i++) { 4527 td_pool_ptr = &isoc_xfer_info->td_pools[i]; 4528 rval = ddi_dma_unbind_handle( 4529 td_pool_ptr->dma_handle); 4530 ASSERT(rval == DDI_SUCCESS); 4531 ddi_dma_mem_free(&td_pool_ptr->mem_handle); 4532 ddi_dma_free_handle(&td_pool_ptr->dma_handle); 4533 } 4534 kmem_free(isoc_xfer_info->td_pools, 4535 (sizeof (uhci_bulk_isoc_td_pool_t) * 4536 isoc_xfer_info->num_pools)); 4537 } 4538 4539 uhci_deallocate_tw(uhcip, pp, tmp_tw); 4540 } 4541 } 4542 4543 4544 /* 4545 * uhci_isoc_update_sw_frame_number() 4546 * to avoid code duplication, call uhci_get_sw_frame_number() 4547 */ 4548 void 4549 uhci_isoc_update_sw_frame_number(uhci_state_t *uhcip) 4550 { 4551 (void) uhci_get_sw_frame_number(uhcip); 4552 } 4553 4554 4555 /* 4556 * uhci_get_sw_frame_number: 4557 * Hold the uhci_int_mutex before calling this routine. 4558 */ 4559 uint64_t 4560 uhci_get_sw_frame_number(uhci_state_t *uhcip) 4561 { 4562 uint64_t sw_frnum, hw_frnum, current_frnum; 4563 4564 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4565 4566 sw_frnum = uhcip->uhci_sw_frnum; 4567 hw_frnum = Get_OpReg16(FRNUM); 4568 4569 /* 4570 * Check bit 10 in the software counter and hardware frame counter. 4571 * If both are same, then don't increment the software frame counter 4572 * (Bit 10 of hw frame counter toggle for every 1024 frames) 4573 * The lower 11 bits of software counter contains the hardware frame 4574 * counter value. The MSB (bit 10) of software counter is incremented 4575 * for every 1024 frames either here or in get frame number routine. 4576 */ 4577 if ((sw_frnum & UHCI_BIT_10_MASK) == (hw_frnum & UHCI_BIT_10_MASK)) { 4578 /* The MSB of hw counter did not toggle */ 4579 current_frnum = ((sw_frnum & (SW_FRNUM_MASK)) | hw_frnum); 4580 } else { 4581 /* 4582 * The hw counter wrapped around. And the interrupt handler 4583 * did not get a chance to update the sw frame counter. 4584 * So, update the sw frame counter and return correct frame no. 4585 */ 4586 sw_frnum >>= UHCI_SIZE_OF_HW_FRNUM - 1; 4587 current_frnum = 4588 ((++sw_frnum << (UHCI_SIZE_OF_HW_FRNUM - 1)) | hw_frnum); 4589 } 4590 uhcip->uhci_sw_frnum = current_frnum; 4591 4592 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 4593 "uhci_get_sw_frame_number: sw=%ld hd=%ld", 4594 uhcip->uhci_sw_frnum, hw_frnum); 4595 4596 return (current_frnum); 4597 } 4598 4599 4600 /* 4601 * uhci_cmd_timeout_hdlr: 4602 * This routine will get called for every second. It checks for 4603 * timed out control commands/bulk commands. Timeout any commands 4604 * that exceeds the time out period specified by the pipe policy. 4605 */ 4606 void 4607 uhci_cmd_timeout_hdlr(void *arg) 4608 { 4609 uint_t flag = B_FALSE; 4610 uhci_td_t *head, *tmp_td; 4611 uhci_state_t *uhcip = (uhci_state_t *)arg; 4612 uhci_pipe_private_t *pp; 4613 4614 /* 4615 * Check whether any of the control xfers are timed out. 4616 * If so, complete those commands with time out as reason. 4617 */ 4618 mutex_enter(&uhcip->uhci_int_mutex); 4619 head = uhcip->uhci_outst_tds_head; 4620 4621 while (head) { 4622 /* 4623 * If timeout out is zero, then dont timeout command. 4624 */ 4625 if (head->tw->tw_timeout_cnt == 0) { 4626 head = head->outst_td_next; 4627 continue; 4628 } 4629 4630 if (!(head->tw->tw_flags & TW_TIMEOUT_FLAG)) { 4631 head->tw->tw_flags |= TW_TIMEOUT_FLAG; 4632 --head->tw->tw_timeout_cnt; 4633 } 4634 4635 /* only do it for bulk and control TDs */ 4636 if ((head->tw->tw_timeout_cnt == 0) && 4637 (head->tw->tw_handle_td != uhci_handle_isoc_td)) { 4638 4639 USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, 4640 "Command timed out: td = %p", (void *)head); 4641 4642 head->tw->tw_claim = UHCI_TIMEOUT_HDLR_CLAIMED; 4643 4644 /* 4645 * Check finally whether the command completed 4646 */ 4647 if (GetTD_status(uhcip, head) & UHCI_TD_ACTIVE) { 4648 SetTD32(uhcip, head->link_ptr, 4649 GetTD32(uhcip, head->link_ptr) | 4650 HC_END_OF_LIST); 4651 pp = head->tw->tw_pipe_private; 4652 SetQH32(uhcip, pp->pp_qh->element_ptr, 4653 GetQH32(uhcip, pp->pp_qh->element_ptr) | 4654 HC_END_OF_LIST); 4655 } 4656 4657 flag = B_TRUE; 4658 } 4659 4660 head = head->outst_td_next; 4661 } 4662 4663 if (flag) { 4664 (void) uhci_wait_for_sof(uhcip); 4665 } 4666 4667 head = uhcip->uhci_outst_tds_head; 4668 while (head) { 4669 if (head->tw->tw_flags & TW_TIMEOUT_FLAG) { 4670 head->tw->tw_flags &= ~TW_TIMEOUT_FLAG; 4671 } 4672 if (head->tw->tw_claim == UHCI_TIMEOUT_HDLR_CLAIMED) { 4673 head->tw->tw_claim = UHCI_NOT_CLAIMED; 4674 tmp_td = head->tw->tw_hctd_head; 4675 while (tmp_td) { 4676 SetTD_status(uhcip, tmp_td, 4677 UHCI_TD_CRC_TIMEOUT); 4678 tmp_td = tmp_td->tw_td_next; 4679 } 4680 } 4681 head = head->outst_td_next; 4682 } 4683 4684 /* 4685 * Process the td which was completed before shifting from normal 4686 * mode to polled mode 4687 */ 4688 if (uhcip->uhci_polled_flag == UHCI_POLLED_FLAG_TRUE) { 4689 uhci_process_submitted_td_queue(uhcip); 4690 uhcip->uhci_polled_flag = UHCI_POLLED_FLAG_FALSE; 4691 } else if (flag) { 4692 /* Process the completed/timed out commands */ 4693 uhci_process_submitted_td_queue(uhcip); 4694 } 4695 4696 /* Re-register the control/bulk/intr commands' timeout handler */ 4697 if (uhcip->uhci_cmd_timeout_id) { 4698 uhcip->uhci_cmd_timeout_id = timeout(uhci_cmd_timeout_hdlr, 4699 (void *)uhcip, UHCI_ONE_SECOND); 4700 } 4701 4702 mutex_exit(&uhcip->uhci_int_mutex); 4703 } 4704 4705 4706 /* 4707 * uhci_wait_for_sof: 4708 * Wait for the start of the next frame (implying any changes made in the 4709 * lattice have now taken effect). 4710 * To be sure this is the case, we wait for the completion of the current 4711 * frame (which might have already been pending), then another complete 4712 * frame to ensure everything has taken effect. 4713 */ 4714 int 4715 uhci_wait_for_sof(uhci_state_t *uhcip) 4716 { 4717 int n; 4718 ushort_t cmd_reg; 4719 usb_frame_number_t before_frame_number, after_frame_number; 4720 clock_t time, rval; 4721 USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl, 4722 "uhci_wait_for_sof: uhcip = %p", uhcip); 4723 4724 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4725 before_frame_number = uhci_get_sw_frame_number(uhcip); 4726 for (n = 0; n < MAX_SOF_WAIT_COUNT; n++) { 4727 SetTD_ioc(uhcip, uhcip->uhci_sof_td, 1); 4728 uhcip->uhci_cv_signal = B_TRUE; 4729 4730 time = ddi_get_lbolt() + UHCI_ONE_SECOND; 4731 rval = cv_timedwait(&uhcip->uhci_cv_SOF, 4732 &uhcip->uhci_int_mutex, time); 4733 4734 after_frame_number = uhci_get_sw_frame_number(uhcip); 4735 if ((rval == -1) && 4736 (after_frame_number <= before_frame_number)) { 4737 cmd_reg = Get_OpReg16(USBCMD); 4738 Set_OpReg16(USBCMD, (cmd_reg | USBCMD_REG_HC_RUN)); 4739 Set_OpReg16(USBINTR, ENABLE_ALL_INTRS); 4740 after_frame_number = uhci_get_sw_frame_number(uhcip); 4741 } 4742 before_frame_number = after_frame_number; 4743 } 4744 4745 SetTD_ioc(uhcip, uhcip->uhci_sof_td, 0); 4746 4747 return (uhcip->uhci_cv_signal ? USB_FAILURE : USB_SUCCESS); 4748 4749 } 4750 4751 /* 4752 * uhci_allocate_periodic_in_resource: 4753 * Allocate interrupt/isochronous request structure for the 4754 * interrupt/isochronous IN transfer. 4755 */ 4756 int 4757 uhci_allocate_periodic_in_resource( 4758 uhci_state_t *uhcip, 4759 uhci_pipe_private_t *pp, 4760 uhci_trans_wrapper_t *tw, 4761 usb_flags_t flags) 4762 { 4763 size_t length = 0; 4764 usb_opaque_t client_periodic_in_reqp; 4765 usb_intr_req_t *cur_intr_req; 4766 usb_isoc_req_t *curr_isoc_reqp; 4767 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 4768 4769 USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4770 "uhci_allocate_periodic_in_resource:\n\t" 4771 "ph = 0x%p, pp = 0x%p, tw = 0x%p, flags = 0x%x", ph, pp, tw, flags); 4772 4773 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4774 4775 /* Check the current periodic in request pointer */ 4776 if (tw->tw_curr_xfer_reqp) { 4777 USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4778 "uhci_allocate_periodic_in_resource: Interrupt " 4779 "request structure already exists: " 4780 "allocation failed"); 4781 4782 return (USB_SUCCESS); 4783 } 4784 4785 /* Get the client periodic in request pointer */ 4786 client_periodic_in_reqp = pp->pp_client_periodic_in_reqp; 4787 4788 /* 4789 * If it a periodic IN request and periodic request is NULL, 4790 * allocate corresponding usb periodic IN request for the 4791 * current periodic polling request and copy the information 4792 * from the saved periodic request structure. 4793 */ 4794 if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_INTR) { 4795 /* Get the interrupt transfer length */ 4796 length = ((usb_intr_req_t *)client_periodic_in_reqp)-> 4797 intr_len; 4798 4799 cur_intr_req = usba_hcdi_dup_intr_req(ph->p_dip, 4800 (usb_intr_req_t *)client_periodic_in_reqp, length, flags); 4801 if (cur_intr_req == NULL) { 4802 USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4803 "uhci_allocate_periodic_in_resource: Interrupt " 4804 "request structure allocation failed"); 4805 4806 return (USB_NO_RESOURCES); 4807 } 4808 4809 /* Check and save the timeout value */ 4810 tw->tw_timeout_cnt = (cur_intr_req->intr_attributes & 4811 USB_ATTRS_ONE_XFER) ? cur_intr_req->intr_timeout: 0; 4812 tw->tw_curr_xfer_reqp = (usb_opaque_t)cur_intr_req; 4813 tw->tw_length = cur_intr_req->intr_len; 4814 } else { 4815 ASSERT(client_periodic_in_reqp != NULL); 4816 4817 if ((curr_isoc_reqp = usba_hcdi_dup_isoc_req(ph->p_dip, 4818 (usb_isoc_req_t *)client_periodic_in_reqp, flags)) == 4819 NULL) { 4820 USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4821 "uhci_allocate_periodic_in_resource: Isochronous " 4822 "request structure allocation failed"); 4823 4824 return (USB_NO_RESOURCES); 4825 } 4826 4827 /* 4828 * Save the client's isochronous request pointer and 4829 * length of isochronous transfer in transfer wrapper. 4830 * The dup'ed request is saved in pp_client_periodic_in_reqp 4831 */ 4832 tw->tw_curr_xfer_reqp = 4833 (usb_opaque_t)pp->pp_client_periodic_in_reqp; 4834 pp->pp_client_periodic_in_reqp = (usb_opaque_t)curr_isoc_reqp; 4835 tw->tw_length = curr_isoc_reqp->isoc_pkts_length; 4836 } 4837 4838 mutex_enter(&ph->p_mutex); 4839 ph->p_req_count++; 4840 mutex_exit(&ph->p_mutex); 4841 4842 return (USB_SUCCESS); 4843 } 4844 4845 4846 /* 4847 * uhci_deallocate_periodic_in_resource: 4848 * Deallocate interrupt/isochronous request structure for the 4849 * interrupt/isochronous IN transfer. 4850 */ 4851 void 4852 uhci_deallocate_periodic_in_resource( 4853 uhci_state_t *uhcip, 4854 uhci_pipe_private_t *pp, 4855 uhci_trans_wrapper_t *tw) 4856 { 4857 usb_opaque_t curr_xfer_reqp; 4858 usba_pipe_handle_data_t *ph = pp->pp_pipe_handle; 4859 4860 USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4861 "uhci_deallocate_periodic_in_resource: " 4862 "pp = 0x%p tw = 0x%p", pp, tw); 4863 4864 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4865 4866 curr_xfer_reqp = tw->tw_curr_xfer_reqp; 4867 if (curr_xfer_reqp) { 4868 /* 4869 * Reset periodic in request usb isoch 4870 * packet request pointers to null. 4871 */ 4872 tw->tw_curr_xfer_reqp = NULL; 4873 tw->tw_isoc_req = NULL; 4874 4875 mutex_enter(&ph->p_mutex); 4876 ph->p_req_count--; 4877 mutex_exit(&ph->p_mutex); 4878 4879 /* 4880 * Free pre-allocated interrupt or isochronous requests. 4881 */ 4882 switch (UHCI_XFER_TYPE(&ph->p_ep)) { 4883 case USB_EP_ATTR_INTR: 4884 usb_free_intr_req((usb_intr_req_t *)curr_xfer_reqp); 4885 break; 4886 case USB_EP_ATTR_ISOCH: 4887 usb_free_isoc_req((usb_isoc_req_t *)curr_xfer_reqp); 4888 break; 4889 } 4890 } 4891 } 4892 4893 4894 /* 4895 * uhci_hcdi_callback() 4896 * convenience wrapper around usba_hcdi_callback() 4897 */ 4898 void 4899 uhci_hcdi_callback(uhci_state_t *uhcip, uhci_pipe_private_t *pp, 4900 usba_pipe_handle_data_t *ph, uhci_trans_wrapper_t *tw, usb_cr_t cr) 4901 { 4902 usb_opaque_t curr_xfer_reqp; 4903 4904 USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl, 4905 "uhci_hcdi_callback: ph = 0x%p, tw = 0x%p, cr = 0x%x", ph, tw, cr); 4906 4907 ASSERT(mutex_owned(&uhcip->uhci_int_mutex)); 4908 4909 if (tw && tw->tw_curr_xfer_reqp) { 4910 curr_xfer_reqp = tw->tw_curr_xfer_reqp; 4911 tw->tw_curr_xfer_reqp = NULL; 4912 tw->tw_isoc_req = NULL; 4913 } else { 4914 ASSERT(pp->pp_client_periodic_in_reqp != NULL); 4915 4916 curr_xfer_reqp = pp->pp_client_periodic_in_reqp; 4917 pp->pp_client_periodic_in_reqp = NULL; 4918 } 4919 4920 ASSERT(curr_xfer_reqp != NULL); 4921 4922 mutex_exit(&uhcip->uhci_int_mutex); 4923 usba_hcdi_cb(ph, curr_xfer_reqp, cr); 4924 mutex_enter(&uhcip->uhci_int_mutex); 4925 } 4926 4927 4928 #ifdef DEBUG 4929 static void 4930 uhci_print_td(uhci_state_t *uhcip, uhci_td_t *td) 4931 { 4932 uint_t *ptr = (uint_t *)td; 4933 4934 #ifndef lint 4935 _NOTE(NO_COMPETING_THREADS_NOW); 4936 #endif 4937 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 4938 "\tDWORD 1 0x%x\t DWORD 2 0x%x", ptr[0], ptr[1]); 4939 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 4940 "\tDWORD 3 0x%x\t DWORD 4 0x%x", ptr[2], ptr[3]); 4941 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 4942 "\tBytes xfered = %d", td->tw->tw_bytes_xfered); 4943 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 4944 "\tBytes Pending = %d", td->tw->tw_bytes_pending); 4945 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 4946 "Queue Head Details:"); 4947 uhci_print_qh(uhcip, td->tw->tw_pipe_private->pp_qh); 4948 4949 #ifndef lint 4950 _NOTE(COMPETING_THREADS_NOW); 4951 #endif 4952 } 4953 4954 4955 static void 4956 uhci_print_qh(uhci_state_t *uhcip, queue_head_t *qh) 4957 { 4958 uint_t *ptr = (uint_t *)qh; 4959 4960 USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl, 4961 "\tLink Ptr = %x Element Ptr = %x", ptr[0], ptr[1]); 4962 } 4963 #endif 4964