xref: /illumos-gate/usr/src/uts/common/io/usb/hcd/uhci/uhciutil.c (revision 524e558aae3e99de2bdab73592f925ea489fbe07)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Universal Host Controller Driver (UHCI)
30  *
31  * The UHCI driver is a driver which interfaces to the Universal
32  * Serial Bus Driver (USBA) and the Host Controller (HC). The interface to
33  * the Host Controller is defined by the UHCI.
34  * This file contains misc functions.
35  */
36 #include <sys/usb/hcd/uhci/uhcid.h>
37 #include <sys/usb/hcd/uhci/uhciutil.h>
38 #include <sys/usb/hcd/uhci/uhcipolled.h>
39 
40 #include <sys/disp.h>
41 
42 /* Globals */
43 extern uint_t	uhci_td_pool_size;			/* Num TDs */
44 extern uint_t	uhci_qh_pool_size;			/* Num QHs */
45 extern ushort_t	uhci_tree_bottom_nodes[];
46 extern void	*uhci_statep;
47 
48 /* function prototypes */
49 static void	uhci_build_interrupt_lattice(uhci_state_t *uhcip);
50 static int	uhci_init_frame_lst_table(dev_info_t *dip, uhci_state_t *uhcip);
51 
52 static uint_t	uhci_lattice_height(uint_t bandwidth);
53 static uint_t	uhci_lattice_parent(uint_t node);
54 static uint_t	uhci_leftmost_leaf(uint_t node, uint_t height);
55 static uint_t	uhci_compute_total_bandwidth(usb_ep_descr_t *endpoint,
56 		    usb_port_status_t port_status);
57 
58 static int	uhci_bandwidth_adjust(uhci_state_t *uhcip,
59 		    usb_ep_descr_t *endpoint, usb_port_status_t port_status);
60 
61 static uhci_td_t *uhci_allocate_td_from_pool(uhci_state_t *uhcip);
62 static void	uhci_fill_in_td(uhci_state_t *uhcip,
63 		    uhci_td_t *td, uhci_td_t *current_dummy,
64 		    uint32_t buffer_offset, size_t length,
65 		    uhci_pipe_private_t	*pp, uchar_t PID,
66 		    usb_req_attrs_t attrs, uhci_trans_wrapper_t *tw);
67 static uint32_t	uhci_get_tw_paddr_by_offs(uhci_state_t *uhcip,
68 		    uint32_t buffer_offset, size_t length,
69 		    uhci_trans_wrapper_t *tw);
70 static uhci_trans_wrapper_t *uhci_create_transfer_wrapper(
71 		    uhci_state_t *uhcip, uhci_pipe_private_t *pp,
72 		    size_t length, usb_flags_t usb_flags);
73 static uhci_trans_wrapper_t *uhci_create_isoc_transfer_wrapper(
74 		    uhci_state_t *uhcip, uhci_pipe_private_t *pp,
75 		    usb_isoc_req_t *req, size_t length,
76 		    usb_flags_t usb_flags);
77 
78 static int	uhci_create_setup_pkt(uhci_state_t *uhcip,
79 		    uhci_pipe_private_t	*pp, uhci_trans_wrapper_t *tw);
80 static void	uhci_insert_ctrl_qh(uhci_state_t *uhcip,
81 		    uhci_pipe_private_t *pp);
82 static void	uhci_remove_ctrl_qh(uhci_state_t *uhcip,
83 		    uhci_pipe_private_t *pp);
84 static void	uhci_insert_intr_qh(uhci_state_t *uhcip,
85 		    uhci_pipe_private_t *pp);
86 static void	uhci_remove_intr_qh(uhci_state_t *uhcip,
87 		    uhci_pipe_private_t *pp);
88 static void	uhci_remove_bulk_qh(uhci_state_t *uhcip,
89 		    uhci_pipe_private_t *pp);
90 static void	uhci_insert_bulk_qh(uhci_state_t *uhcip,
91 		    uhci_pipe_private_t *pp);
92 static void	uhci_handle_bulk_td_errors(uhci_state_t *uhcip, uhci_td_t *td);
93 static int	uhci_alloc_memory_for_tds(uhci_state_t *uhcip, uint_t num_tds,
94 		    uhci_bulk_isoc_xfer_t *info);
95 static int	uhci_alloc_bulk_isoc_tds(uhci_state_t *uhcip, uint_t num_tds,
96 		    uhci_bulk_isoc_xfer_t *info);
97 static void	uhci_get_isoc_td_by_index(uhci_state_t *uhcip,
98 		    uhci_bulk_isoc_xfer_t *info, uint_t index,
99 		    uhci_td_t **tdpp, uhci_bulk_isoc_td_pool_t **td_pool_pp);
100 static void	uhci_get_bulk_td_by_paddr(uhci_state_t *uhcip,
101 		    uhci_bulk_isoc_xfer_t *info, uint32_t paddr,
102 		    uhci_bulk_isoc_td_pool_t **td_pool_pp);
103 
104 static	int	uhci_handle_isoc_receive(uhci_state_t *uhcip,
105 		uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw);
106 static void	uhci_delete_isoc_td(uhci_state_t *uhcip,
107 		    uhci_td_t *td);
108 #ifdef DEBUG
109 static void	uhci_print_td(uhci_state_t *uhcip, uhci_td_t *td);
110 static void	uhci_print_qh(uhci_state_t *uhcip, queue_head_t *qh);
111 #endif
112 
113 
114 /*
115  * uhci_build_interrupt_lattice:
116  *
117  * Construct the interrupt lattice tree using static Queue Head pointers.
118  * This interrupt lattice tree will have total of 63 queue heads and the
119  * Host Controller (HC) processes queue heads every frame.
120  */
121 static void
122 uhci_build_interrupt_lattice(uhci_state_t *uhcip)
123 {
124 	int			half_list = NUM_INTR_QH_LISTS / 2;
125 	uint16_t		i, j, k;
126 	uhci_td_t		*sof_td, *isoc_td;
127 	uintptr_t		addr;
128 	queue_head_t		*list_array = uhcip->uhci_qh_pool_addr;
129 	queue_head_t		*tmp_qh;
130 	frame_lst_table_t	*frame_lst_tablep =
131 					uhcip->uhci_frame_lst_tablep;
132 
133 	USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
134 	    "uhci_build_interrupt_lattice:");
135 
136 	/*
137 	 * Reserve the first 63 queue head structures in the pool as static
138 	 * queue heads & these are required for constructing interrupt
139 	 * lattice tree.
140 	 */
141 	for (i = 0; i < NUM_INTR_QH_LISTS; i++) {
142 		SetQH32(uhcip, list_array[i].link_ptr, HC_END_OF_LIST);
143 		SetQH32(uhcip, list_array[i].element_ptr, HC_END_OF_LIST);
144 		list_array[i].qh_flag		= QUEUE_HEAD_FLAG_STATIC;
145 		list_array[i].node		= i;
146 	}
147 
148 	/* Build the interrupt lattice tree */
149 	for (i = 0; i < half_list - 1; i++) {
150 		/*
151 		 * The next  pointer in the host controller  queue head
152 		 * descriptor must contain an iommu address. Calculate
153 		 * the offset into the cpu address and add this to the
154 		 * starting iommu address.
155 		 */
156 		addr = QH_PADDR(&list_array[i]) | HC_QUEUE_HEAD;
157 
158 		SetQH32(uhcip, list_array[2*i + 1].link_ptr, addr);
159 		SetQH32(uhcip, list_array[2*i + 2].link_ptr, addr);
160 	}
161 
162 	/*
163 	 * Initialize the interrupt list in the Frame list Table
164 	 * so that it points to the bottom of the tree.
165 	 */
166 	for (i = 0, j = 0; i < pow_2(TREE_HEIGHT); i++) {
167 		addr = QH_PADDR(&list_array[half_list + i - 1]);
168 		for (k = 0; k <  pow_2(VIRTUAL_TREE_HEIGHT); k++) {
169 			SetFL32(uhcip,
170 			    frame_lst_tablep[uhci_tree_bottom_nodes[j++]],
171 			    addr | HC_QUEUE_HEAD);
172 		}
173 	}
174 
175 	/*
176 	 *  Create a controller and bulk Queue heads
177 	 */
178 	uhcip->uhci_ctrl_xfers_q_head = uhci_alloc_queue_head(uhcip);
179 	tmp_qh = uhcip->uhci_ctrl_xfers_q_tail = uhcip->uhci_ctrl_xfers_q_head;
180 
181 	SetQH32(uhcip, list_array[0].link_ptr,
182 		(QH_PADDR(tmp_qh) | HC_QUEUE_HEAD));
183 
184 	uhcip->uhci_bulk_xfers_q_head = uhci_alloc_queue_head(uhcip);
185 	uhcip->uhci_bulk_xfers_q_tail = uhcip->uhci_bulk_xfers_q_head;
186 	SetQH32(uhcip, tmp_qh->link_ptr,
187 	    (QH_PADDR(uhcip->uhci_bulk_xfers_q_head)|HC_QUEUE_HEAD));
188 
189 	SetQH32(uhcip, uhcip->uhci_bulk_xfers_q_head->link_ptr, HC_END_OF_LIST);
190 
191 	/*
192 	 * Add a dummy TD to the static queue head 0. THis is used
193 	 * to generate an at the end of frame.
194 	 */
195 	sof_td = uhci_allocate_td_from_pool(uhcip);
196 
197 	SetQH32(uhcip, list_array[0].element_ptr,
198 	    TD_PADDR(sof_td) | HC_TD_HEAD);
199 	SetTD32(uhcip, sof_td->link_ptr, HC_END_OF_LIST);
200 	uhcip->uhci_sof_td = sof_td;
201 
202 	/*
203 	 * Add a dummy td that is used to generate an interrupt for
204 	 * every 1024 frames.
205 	 */
206 	isoc_td = uhci_allocate_td_from_pool(uhcip);
207 	SetTD32(uhcip, isoc_td->link_ptr, HC_END_OF_LIST);
208 	uhcip->uhci_isoc_td = isoc_td;
209 
210 	uhcip->uhci_isoc_qh = uhci_alloc_queue_head(uhcip);
211 	SetQH32(uhcip, uhcip->uhci_isoc_qh->link_ptr,
212 	    GetFL32(uhcip, uhcip->uhci_frame_lst_tablep[MAX_FRAME_NUM]));
213 	SetQH32(uhcip, uhcip->uhci_isoc_qh->element_ptr, TD_PADDR(isoc_td));
214 	SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[MAX_FRAME_NUM],
215 			QH_PADDR(uhcip->uhci_isoc_qh) | HC_QUEUE_HEAD);
216 }
217 
218 
219 /*
220  * uhci_allocate_pools:
221  *	Allocate the system memory for the Queue Heads Descriptor and
222  *	for the Transfer Descriptor (TD) pools. Both QH and TD structures
223  *	must be aligned to a 16 byte boundary.
224  */
225 int
226 uhci_allocate_pools(uhci_state_t *uhcip)
227 {
228 	dev_info_t		*dip = uhcip->uhci_dip;
229 	size_t			real_length;
230 	int			i, result;
231 	uint_t			ccount;
232 	ddi_device_acc_attr_t	dev_attr;
233 
234 	USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
235 	    "uhci_allocate_pools:");
236 
237 	/* The host controller will be little endian */
238 	dev_attr.devacc_attr_version		= DDI_DEVICE_ATTR_V0;
239 	dev_attr.devacc_attr_endian_flags	= DDI_STRUCTURE_LE_ACC;
240 	dev_attr.devacc_attr_dataorder		= DDI_STRICTORDER_ACC;
241 
242 	/* Allocate the TD pool DMA handle */
243 	if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 0,
244 	    &uhcip->uhci_td_pool_dma_handle) != DDI_SUCCESS) {
245 
246 		return (USB_FAILURE);
247 	}
248 
249 	/* Allocate the memory for the TD pool */
250 	if (ddi_dma_mem_alloc(uhcip->uhci_td_pool_dma_handle,
251 	    uhci_td_pool_size * sizeof (uhci_td_t),
252 	    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
253 	    (caddr_t *)&uhcip->uhci_td_pool_addr, &real_length,
254 	    &uhcip->uhci_td_pool_mem_handle)) {
255 
256 		return (USB_FAILURE);
257 	}
258 
259 	/* Map the TD pool into the I/O address space */
260 	result = ddi_dma_addr_bind_handle(uhcip->uhci_td_pool_dma_handle,
261 	    NULL, (caddr_t)uhcip->uhci_td_pool_addr, real_length,
262 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
263 	    NULL, &uhcip->uhci_td_pool_cookie, &ccount);
264 
265 	bzero((void *)uhcip->uhci_td_pool_addr,
266 	    uhci_td_pool_size * sizeof (uhci_td_t));
267 
268 	/* Process the result */
269 	if (result == DDI_DMA_MAPPED) {
270 		/* The cookie count should be 1 */
271 		if (ccount != 1) {
272 			USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
273 			    "uhci_allocate_pools: More than 1 cookie");
274 
275 			return (USB_FAILURE);
276 		}
277 	} else {
278 		USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
279 		    "uhci_allocate_pools: Result = %d", result);
280 
281 		uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result);
282 
283 		return (USB_FAILURE);
284 	}
285 
286 	uhcip->uhci_dma_addr_bind_flag |= UHCI_TD_POOL_BOUND;
287 
288 	/* Initialize the TD pool */
289 	for (i = 0; i < uhci_td_pool_size; i++) {
290 		uhcip->uhci_td_pool_addr[i].flag = TD_FLAG_FREE;
291 	}
292 
293 	/* Allocate the TD pool DMA handle */
294 	if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP,
295 	    0, &uhcip->uhci_qh_pool_dma_handle) != DDI_SUCCESS) {
296 
297 		return (USB_FAILURE);
298 	}
299 
300 	/* Allocate the memory for the QH pool */
301 	if (ddi_dma_mem_alloc(uhcip->uhci_qh_pool_dma_handle,
302 	    uhci_qh_pool_size * sizeof (queue_head_t),
303 	    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
304 	    (caddr_t *)&uhcip->uhci_qh_pool_addr, &real_length,
305 	    &uhcip->uhci_qh_pool_mem_handle) != DDI_SUCCESS) {
306 
307 		return (USB_FAILURE);
308 	}
309 
310 	result = ddi_dma_addr_bind_handle(uhcip->uhci_qh_pool_dma_handle,
311 	    NULL, (caddr_t)uhcip->uhci_qh_pool_addr, real_length,
312 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
313 	    &uhcip->uhci_qh_pool_cookie, &ccount);
314 
315 	/* Process the result */
316 	if (result == DDI_DMA_MAPPED) {
317 		/* The cookie count should be 1 */
318 		if (ccount != 1) {
319 			USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
320 			    "uhci_allocate_pools: More than 1 cookie");
321 
322 			return (USB_FAILURE);
323 		}
324 	} else {
325 		uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result);
326 
327 		return (USB_FAILURE);
328 	}
329 
330 	uhcip->uhci_dma_addr_bind_flag |= UHCI_QH_POOL_BOUND;
331 
332 	bzero((void *)uhcip->uhci_qh_pool_addr,
333 	    uhci_qh_pool_size * sizeof (queue_head_t));
334 
335 	/* Initialize the QH pool */
336 	for (i = 0; i < uhci_qh_pool_size; i ++) {
337 		uhcip->uhci_qh_pool_addr[i].qh_flag = QUEUE_HEAD_FLAG_FREE;
338 	}
339 
340 	USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
341 	    "uhci_allocate_pools: Completed");
342 
343 	return (USB_SUCCESS);
344 }
345 
346 
347 /*
348  * uhci_free_pools:
349  *	Cleanup on attach failure or detach
350  */
351 void
352 uhci_free_pools(uhci_state_t *uhcip)
353 {
354 	int			i, flag, rval;
355 	uhci_td_t		*td;
356 	uhci_trans_wrapper_t	*tw;
357 
358 	USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
359 						"uhci_free_pools:");
360 
361 	if (uhcip->uhci_td_pool_addr && uhcip->uhci_td_pool_mem_handle) {
362 		for (i = 0; i < uhci_td_pool_size; i ++) {
363 			td = &uhcip->uhci_td_pool_addr[i];
364 
365 			flag = uhcip->uhci_td_pool_addr[i].flag;
366 			if ((flag != TD_FLAG_FREE) &&
367 			    (flag != TD_FLAG_DUMMY) && (td->tw != NULL)) {
368 				tw = td->tw;
369 				uhci_free_tw(uhcip, tw);
370 			}
371 
372 		}
373 
374 		if (uhcip->uhci_dma_addr_bind_flag & UHCI_TD_POOL_BOUND) {
375 			rval = ddi_dma_unbind_handle(
376 					uhcip->uhci_td_pool_dma_handle);
377 			ASSERT(rval == DDI_SUCCESS);
378 		}
379 
380 		ddi_dma_mem_free(&uhcip->uhci_td_pool_mem_handle);
381 	}
382 
383 	/* Free the TD pool */
384 	if (uhcip->uhci_td_pool_dma_handle) {
385 		ddi_dma_free_handle(&uhcip->uhci_td_pool_dma_handle);
386 	}
387 
388 	if (uhcip->uhci_qh_pool_addr && uhcip->uhci_qh_pool_mem_handle) {
389 		if (uhcip->uhci_dma_addr_bind_flag & UHCI_QH_POOL_BOUND) {
390 			rval = ddi_dma_unbind_handle(
391 					uhcip->uhci_qh_pool_dma_handle);
392 			ASSERT(rval == DDI_SUCCESS);
393 		}
394 		ddi_dma_mem_free(&uhcip->uhci_qh_pool_mem_handle);
395 	}
396 
397 	/* Free the QH pool */
398 	if (uhcip->uhci_qh_pool_dma_handle) {
399 		ddi_dma_free_handle(&uhcip->uhci_qh_pool_dma_handle);
400 	}
401 
402 	/* Free the Frame list Table area */
403 	if (uhcip->uhci_frame_lst_tablep && uhcip->uhci_flt_mem_handle) {
404 		if (uhcip->uhci_dma_addr_bind_flag & UHCI_FLA_POOL_BOUND) {
405 			rval = ddi_dma_unbind_handle(
406 					uhcip->uhci_flt_dma_handle);
407 			ASSERT(rval == DDI_SUCCESS);
408 		}
409 		ddi_dma_mem_free(&uhcip->uhci_flt_mem_handle);
410 	}
411 
412 	if (uhcip->uhci_flt_dma_handle) {
413 		ddi_dma_free_handle(&uhcip->uhci_flt_dma_handle);
414 	}
415 }
416 
417 
418 /*
419  * uhci_decode_ddi_dma_addr_bind_handle_result:
420  *	Process the return values of ddi_dma_addr_bind_handle()
421  */
422 void
423 uhci_decode_ddi_dma_addr_bind_handle_result(uhci_state_t *uhcip, int result)
424 {
425 	char *msg;
426 
427 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
428 	    "uhci_decode_ddi_dma_addr_bind_handle_result:");
429 
430 	switch (result) {
431 	case DDI_DMA_PARTIAL_MAP:
432 		msg = "Partial transfers not allowed";
433 		break;
434 	case DDI_DMA_INUSE:
435 		msg = "Handle is in use";
436 		break;
437 	case DDI_DMA_NORESOURCES:
438 		msg = "No resources";
439 		break;
440 	case DDI_DMA_NOMAPPING:
441 		msg = "No mapping";
442 		break;
443 	case DDI_DMA_TOOBIG:
444 		msg = "Object is too big";
445 		break;
446 	default:
447 		msg = "Unknown dma error";
448 	}
449 
450 	USB_DPRINTF_L4(PRINT_MASK_ALL, uhcip->uhci_log_hdl, "%s", msg);
451 }
452 
453 
454 /*
455  * uhci_init_ctlr:
456  *	Initialize the Host Controller (HC).
457  */
458 int
459 uhci_init_ctlr(uhci_state_t *uhcip)
460 {
461 	dev_info_t *dip = uhcip->uhci_dip;
462 	uint_t	cmd_reg;
463 	uint_t	frame_base_addr;
464 
465 	USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, "uhci_init_ctlr:");
466 
467 	/*
468 	 * When USB legacy mode is enabled, the BIOS manages the USB keyboard
469 	 * attached to the UHCI controller. It has been observed that some
470 	 * times the BIOS does not clear the interrupts in the legacy mode
471 	 * register in the PCI configuration space. So, disable the SMI intrs
472 	 * and route the intrs to PIRQD here.
473 	 */
474 	pci_config_put16(uhcip->uhci_config_handle,
475 	    LEGACYMODE_REG_OFFSET, LEGACYMODE_REG_INIT_VALUE);
476 
477 	/*
478 	 * Disable all the interrupts.
479 	 */
480 	Set_OpReg16(USBINTR, DISABLE_ALL_INTRS);
481 
482 	mutex_enter(&uhcip->uhci_int_mutex);
483 	cmd_reg = Get_OpReg16(USBCMD);
484 	cmd_reg &= (~USBCMD_REG_HC_RUN);
485 
486 	/* Stop the controller */
487 	Set_OpReg16(USBCMD, cmd_reg);
488 
489 	/* Reset the host controller */
490 	Set_OpReg16(USBCMD, USBCMD_REG_GBL_RESET);
491 
492 	/* Wait 10ms for reset to complete */
493 	mutex_exit(&uhcip->uhci_int_mutex);
494 	delay(drv_usectohz(UHCI_RESET_DELAY));
495 	mutex_enter(&uhcip->uhci_int_mutex);
496 
497 	Set_OpReg16(USBCMD, 0);
498 
499 	/* Set the frame number to zero */
500 	Set_OpReg16(FRNUM, 0);
501 
502 	/* Initialize the Frame list base address area */
503 	if (uhci_init_frame_lst_table(dip, uhcip) != USB_SUCCESS) {
504 		mutex_exit(&uhcip->uhci_int_mutex);
505 
506 		return (USB_FAILURE);
507 	}
508 
509 	/* Save the contents of the Frame Interval Registers */
510 	uhcip->uhci_frame_interval = Get_OpReg8(SOFMOD);
511 
512 	frame_base_addr = uhcip->uhci_flt_cookie.dmac_address;
513 
514 	/* Set the Frame list base address */
515 	Set_OpReg32(FRBASEADD, frame_base_addr);
516 
517 	/*
518 	 * Begin sending SOFs
519 	 * Set the Host Controller Functional State to Operational
520 	 */
521 	cmd_reg = Get_OpReg16(USBCMD);
522 	cmd_reg |= (USBCMD_REG_HC_RUN | USBCMD_REG_MAXPKT_64 |
523 				USBCMD_REG_CONFIG_FLAG);
524 
525 	Set_OpReg16(USBCMD, cmd_reg);
526 	mutex_exit(&uhcip->uhci_int_mutex);
527 
528 	/*
529 	 * Verify the Command and interrupt enable registers,
530 	 * a sanity check whether actually initialized or not
531 	 */
532 	cmd_reg = Get_OpReg16(USBCMD);
533 
534 	if (!(cmd_reg & (USBCMD_REG_HC_RUN | USBCMD_REG_MAXPKT_64 |
535 	    USBCMD_REG_CONFIG_FLAG))) {
536 		USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
537 		    "uhci_init_ctlr: Controller initialization failed");
538 
539 		return (USB_FAILURE);
540 	}
541 
542 	/*
543 	 * Set the ioc bit of the isoc intr td. This enables
544 	 * the generation of an interrupt for every 1024 frames.
545 	 */
546 	SetTD_ioc(uhcip, uhcip->uhci_isoc_td, 1);
547 
548 	/* Set the flag that uhci controller has been initialized. */
549 	uhcip->uhci_ctlr_init_flag = B_TRUE;
550 
551 	USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
552 	    "uhci_init_ctlr: Completed");
553 
554 	return (USB_SUCCESS);
555 }
556 
557 
558 /*
559  * uhci_uninit_ctlr:
560  *	uninitialize the Host Controller (HC).
561  */
562 void
563 uhci_uninit_ctlr(uhci_state_t *uhcip)
564 {
565 	if (uhcip->uhci_regs_handle) {
566 		/* Disable all the interrupts. */
567 		Set_OpReg16(USBINTR, DISABLE_ALL_INTRS);
568 
569 		/* Complete the current transaction and then halt. */
570 		Set_OpReg16(USBCMD, 0);
571 
572 		/* Wait for sometime */
573 		mutex_exit(&uhcip->uhci_int_mutex);
574 		delay(drv_usectohz(UHCI_TIMEWAIT));
575 		mutex_enter(&uhcip->uhci_int_mutex);
576 	}
577 }
578 
579 
580 /*
581  * uhci_map_regs:
582  *	The Host Controller (HC) contains a set of on-chip operational
583  *	registers and which should be mapped into a non-cacheable
584  *	portion of the system addressable space.
585  */
586 int
587 uhci_map_regs(uhci_state_t *uhcip)
588 {
589 	dev_info_t		*dip = uhcip->uhci_dip;
590 	int			index;
591 	uint32_t		regs_prop_len;
592 	int32_t			*regs_list;
593 	uint16_t		command_reg;
594 	ddi_device_acc_attr_t	attr;
595 
596 	USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, "uhci_map_regs:");
597 
598 	/* The host controller will be little endian */
599 	attr.devacc_attr_version	= DDI_DEVICE_ATTR_V0;
600 	attr.devacc_attr_endian_flags	= DDI_STRUCTURE_LE_ACC;
601 	attr.devacc_attr_dataorder	= DDI_STRICTORDER_ACC;
602 
603 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, uhcip->uhci_dip,
604 	    DDI_PROP_DONTPASS, "reg", &regs_list, &regs_prop_len) !=
605 	    DDI_PROP_SUCCESS) {
606 
607 		return (USB_FAILURE);
608 	}
609 
610 	for (index = 0; index * 5 < regs_prop_len; index++) {
611 		if (regs_list[index * 5] & UHCI_PROP_MASK) {
612 			break;
613 		}
614 	}
615 
616 	/*
617 	 * Deallocate the memory allocated by the ddi_prop_lookup_int_array
618 	 */
619 	ddi_prop_free(regs_list);
620 
621 	if (index * 5 >= regs_prop_len) {
622 
623 		return (USB_FAILURE);
624 	}
625 
626 	/* Map in operational registers */
627 	if (ddi_regs_map_setup(dip, index, (caddr_t *)&uhcip->uhci_regsp,
628 	    0, sizeof (hc_regs_t), &attr, &uhcip->uhci_regs_handle) !=
629 	    DDI_SUCCESS) {
630 		USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
631 		    "ddi_regs_map_setup: failed");
632 
633 		return (USB_FAILURE);
634 	}
635 
636 	if (pci_config_setup(dip, &uhcip->uhci_config_handle) != DDI_SUCCESS) {
637 		USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
638 		    "uhci_map_regs: Config error");
639 
640 		return (USB_FAILURE);
641 	}
642 
643 	/* Make sure Memory Access Enable and Master Enable are set */
644 	command_reg = pci_config_get16(uhcip->uhci_config_handle,
645 	    PCI_CONF_COMM);
646 	if (!(command_reg & (PCI_COMM_MAE | PCI_COMM_ME))) {
647 		USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
648 		    "uhci_map_regs: No MAE/ME");
649 	}
650 
651 	command_reg |= PCI_COMM_MAE | PCI_COMM_ME;
652 	pci_config_put16(uhcip->uhci_config_handle, PCI_CONF_COMM, command_reg);
653 
654 	/*
655 	 * Check whether I/O base address is configured and enabled.
656 	 */
657 	if (!(command_reg & PCI_COMM_IO)) {
658 		USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
659 		    "I/O Base address access disabled");
660 
661 		return (USB_FAILURE);
662 	}
663 	/*
664 	 * Get the IO base address of the controller
665 	 */
666 	uhcip->uhci_iobase = (pci_config_get16(uhcip->uhci_config_handle,
667 	    PCI_CONF_IOBASE) & PCI_CONF_IOBASE_MASK);
668 
669 	USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
670 	    "uhci_map_regs: Completed");
671 
672 	return (USB_SUCCESS);
673 }
674 
675 
676 void
677 uhci_unmap_regs(uhci_state_t *uhcip)
678 {
679 	/* Unmap the UHCI registers */
680 	if (uhcip->uhci_regs_handle) {
681 		/* Reset the host controller */
682 		Set_OpReg16(USBCMD, USBCMD_REG_GBL_RESET);
683 
684 		ddi_regs_map_free(&uhcip->uhci_regs_handle);
685 	}
686 
687 	if (uhcip->uhci_config_handle) {
688 		pci_config_teardown(&uhcip->uhci_config_handle);
689 	}
690 }
691 
692 
693 /*
694  * uhci_set_dma_attributes:
695  *	Set the limits in the DMA attributes structure. Most of the values used
696  *	in the	DMA limit structres are the default values as specified by  the
697  *	Writing PCI device drivers document.
698  */
699 void
700 uhci_set_dma_attributes(uhci_state_t *uhcip)
701 {
702 	USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
703 	    "uhci_set_dma_attributes:");
704 
705 	/* Initialize the DMA attributes */
706 	uhcip->uhci_dma_attr.dma_attr_version = DMA_ATTR_V0;
707 	uhcip->uhci_dma_attr.dma_attr_addr_lo = 0x00000000ull;
708 	uhcip->uhci_dma_attr.dma_attr_addr_hi = 0xfffffff0ull;
709 
710 	/* 32 bit addressing */
711 	uhcip->uhci_dma_attr.dma_attr_count_max = 0xffffffull;
712 
713 	/*
714 	 * Setting the dam_att_align to 512, some times fails the
715 	 * binding handle. I dont know why ? But setting to 16 will
716 	 * be right for our case (16 byte alignment required per
717 	 * UHCI spec for TD descriptors).
718 	 */
719 
720 	/* 16 byte alignment */
721 	uhcip->uhci_dma_attr.dma_attr_align = 0x10;
722 
723 	/*
724 	 * Since PCI  specification is byte alignment, the
725 	 * burstsize field should be set to 1 for PCI devices.
726 	 */
727 	uhcip->uhci_dma_attr.dma_attr_burstsizes = 0x1;
728 
729 	uhcip->uhci_dma_attr.dma_attr_minxfer	= 0x1;
730 	uhcip->uhci_dma_attr.dma_attr_maxxfer	= 0xffffffull;
731 	uhcip->uhci_dma_attr.dma_attr_seg	= 0xffffffffull;
732 	uhcip->uhci_dma_attr.dma_attr_sgllen	= 1;
733 	uhcip->uhci_dma_attr.dma_attr_granular	= 1;
734 	uhcip->uhci_dma_attr.dma_attr_flags	= 0;
735 }
736 
737 
738 uint_t
739 pow_2(uint_t x)
740 {
741 	return ((x == 0) ? 1 : (1 << x));
742 }
743 
744 
745 uint_t
746 log_2(uint_t x)
747 {
748 	int ret_val = 0;
749 
750 	while (x != 1) {
751 		ret_val++;
752 		x = x >> 1;
753 	}
754 
755 	return (ret_val);
756 }
757 
758 
759 /*
760  * uhci_obtain_state:
761  */
762 uhci_state_t *
763 uhci_obtain_state(dev_info_t *dip)
764 {
765 	int instance = ddi_get_instance(dip);
766 	uhci_state_t *state = ddi_get_soft_state(uhci_statep, instance);
767 
768 	ASSERT(state != NULL);
769 
770 	return (state);
771 }
772 
773 
774 /*
775  * uhci_alloc_hcdi_ops:
776  *	The HCDI interfaces or entry points are the software interfaces used by
777  *	the Universal Serial Bus Driver  (USBA) to  access the services of the
778  *	Host Controller Driver (HCD).  During HCD initialization, inform  USBA
779  *	about all available HCDI interfaces or entry points.
780  */
781 usba_hcdi_ops_t *
782 uhci_alloc_hcdi_ops(uhci_state_t *uhcip)
783 {
784 	usba_hcdi_ops_t	*hcdi_ops;
785 
786 	USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl,
787 	    "uhci_alloc_hcdi_ops:");
788 
789 	hcdi_ops = usba_alloc_hcdi_ops();
790 
791 	hcdi_ops->usba_hcdi_pipe_open = uhci_hcdi_pipe_open;
792 	hcdi_ops->usba_hcdi_pipe_close	= uhci_hcdi_pipe_close;
793 	hcdi_ops->usba_hcdi_pipe_reset = uhci_hcdi_pipe_reset;
794 
795 	hcdi_ops->usba_hcdi_pipe_ctrl_xfer = uhci_hcdi_pipe_ctrl_xfer;
796 	hcdi_ops->usba_hcdi_pipe_bulk_xfer = uhci_hcdi_pipe_bulk_xfer;
797 	hcdi_ops->usba_hcdi_pipe_intr_xfer = uhci_hcdi_pipe_intr_xfer;
798 	hcdi_ops->usba_hcdi_pipe_isoc_xfer = uhci_hcdi_pipe_isoc_xfer;
799 
800 	hcdi_ops->usba_hcdi_bulk_transfer_size = uhci_hcdi_bulk_transfer_size;
801 	hcdi_ops->usba_hcdi_pipe_stop_intr_polling =
802 					uhci_hcdi_pipe_stop_intr_polling;
803 	hcdi_ops->usba_hcdi_pipe_stop_isoc_polling =
804 					uhci_hcdi_pipe_stop_isoc_polling;
805 
806 	hcdi_ops->usba_hcdi_get_current_frame_number =
807 					uhci_hcdi_get_current_frame_number;
808 	hcdi_ops->usba_hcdi_get_max_isoc_pkts = uhci_hcdi_get_max_isoc_pkts;
809 
810 	hcdi_ops->usba_hcdi_console_input_init = uhci_hcdi_polled_input_init;
811 	hcdi_ops->usba_hcdi_console_input_enter = uhci_hcdi_polled_input_enter;
812 	hcdi_ops->usba_hcdi_console_read = uhci_hcdi_polled_read;
813 	hcdi_ops->usba_hcdi_console_input_exit = uhci_hcdi_polled_input_exit;
814 	hcdi_ops->usba_hcdi_console_input_fini = uhci_hcdi_polled_input_fini;
815 
816 	return (hcdi_ops);
817 }
818 
819 
820 /*
821  * uhci_init_frame_lst_table :
822  *	Allocate the system memory and initialize Host Controller
823  *	Frame list table area The starting of the Frame list Table
824  *	area must be 4096 byte aligned.
825  */
826 static int
827 uhci_init_frame_lst_table(dev_info_t *dip, uhci_state_t *uhcip)
828 {
829 	int			result;
830 	uint_t			ccount;
831 	size_t			real_length;
832 	ddi_device_acc_attr_t	dev_attr;
833 
834 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
835 
836 	USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
837 	    "uhci_init_frame_lst_table:");
838 
839 	/* The host controller will be little endian */
840 	dev_attr.devacc_attr_version		= DDI_DEVICE_ATTR_V0;
841 	dev_attr.devacc_attr_endian_flags	= DDI_STRUCTURE_LE_ACC;
842 	dev_attr.devacc_attr_dataorder		= DDI_STRICTORDER_ACC;
843 
844 	/* 4K alignment required */
845 	uhcip->uhci_dma_attr.dma_attr_align = 0x1000;
846 
847 	/* Create space for the HCCA block */
848 	if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP,
849 	    0, &uhcip->uhci_flt_dma_handle) != DDI_SUCCESS) {
850 
851 		return (USB_FAILURE);
852 	}
853 
854 	/* Reset to default 16 bytes */
855 	uhcip->uhci_dma_attr.dma_attr_align = 0x10;
856 
857 	if (ddi_dma_mem_alloc(uhcip->uhci_flt_dma_handle,
858 	    SIZE_OF_FRAME_LST_TABLE, &dev_attr, DDI_DMA_CONSISTENT,
859 	    DDI_DMA_SLEEP, 0, (caddr_t *)&uhcip->uhci_frame_lst_tablep,
860 	    &real_length, &uhcip->uhci_flt_mem_handle)) {
861 
862 		return (USB_FAILURE);
863 	}
864 
865 	/* Map the whole Frame list base area into the I/O address space */
866 	result = ddi_dma_addr_bind_handle(uhcip->uhci_flt_dma_handle,
867 	    NULL, (caddr_t)uhcip->uhci_frame_lst_tablep, real_length,
868 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
869 	    &uhcip->uhci_flt_cookie, &ccount);
870 
871 	if (result == DDI_DMA_MAPPED) {
872 		/* The cookie count should be 1 */
873 		if (ccount != 1) {
874 			USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
875 			    "uhci_init_frame_list_table: More than 1 cookie");
876 
877 			return (USB_FAILURE);
878 		}
879 	} else {
880 		uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result);
881 
882 		return (USB_FAILURE);
883 	}
884 
885 	uhcip->uhci_dma_addr_bind_flag |= UHCI_FLA_POOL_BOUND;
886 
887 	bzero((void *)uhcip->uhci_frame_lst_tablep, real_length);
888 
889 	/* Initialize the interrupt lists */
890 	uhci_build_interrupt_lattice(uhcip);
891 
892 	return (USB_SUCCESS);
893 }
894 
895 
896 /*
897  * uhci_alloc_queue_head:
898  *	Allocate a queue head
899  */
900 queue_head_t *
901 uhci_alloc_queue_head(uhci_state_t *uhcip)
902 {
903 	int		index;
904 	uhci_td_t	*dummy_td;
905 	queue_head_t	*queue_head;
906 
907 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
908 	    "uhci_alloc_queue_head");
909 
910 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
911 
912 	/* Allocate a dummy td first. */
913 	if ((dummy_td = uhci_allocate_td_from_pool(uhcip)) == NULL) {
914 
915 		USB_DPRINTF_L2(PRINT_MASK_ALLOC,  uhcip->uhci_log_hdl,
916 		"uhci_alloc_queue_head: allocate td from pool failed");
917 
918 		return (NULL);
919 	}
920 
921 	/*
922 	 * The first 63 queue heads in the Queue Head (QH)
923 	 * buffer pool are reserved for building interrupt lattice
924 	 * tree. Search for a blank Queue head in the QH buffer pool.
925 	 */
926 	for (index = NUM_STATIC_NODES; index < uhci_qh_pool_size; index++) {
927 		if (uhcip->uhci_qh_pool_addr[index].qh_flag ==
928 		    QUEUE_HEAD_FLAG_FREE) {
929 			break;
930 		}
931 	}
932 
933 	USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
934 	    "uhci_alloc_queue_head: Allocated %d", index);
935 
936 	if (index == uhci_qh_pool_size) {
937 		USB_DPRINTF_L2(PRINT_MASK_ALLOC,  uhcip->uhci_log_hdl,
938 		    "uhci_alloc_queue_head: All QH exhausted");
939 
940 		/* Free the dummy td allocated for this qh. */
941 		dummy_td->flag = TD_FLAG_FREE;
942 
943 		return (NULL);
944 	}
945 
946 	queue_head = &uhcip->uhci_qh_pool_addr[index];
947 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
948 	    "uhci_alloc_queue_head: Allocated address 0x%p", queue_head);
949 
950 	bzero((void *)queue_head, sizeof (queue_head_t));
951 	SetQH32(uhcip, queue_head->link_ptr, HC_END_OF_LIST);
952 	SetQH32(uhcip, queue_head->element_ptr, HC_END_OF_LIST);
953 	queue_head->prev_qh	= NULL;
954 	queue_head->qh_flag	= QUEUE_HEAD_FLAG_BUSY;
955 
956 	bzero((char *)dummy_td, sizeof (uhci_td_t));
957 	queue_head->td_tailp	= dummy_td;
958 	SetQH32(uhcip, queue_head->element_ptr, TD_PADDR(dummy_td));
959 
960 	return (queue_head);
961 }
962 
963 
964 /*
965  * uhci_allocate_bandwidth:
966  *	Figure out whether or not this interval may be supported. Return
967  *	the index into the  lattice if it can be supported.  Return
968  *	allocation failure if it can not be supported.
969  */
970 int
971 uhci_allocate_bandwidth(
972 	uhci_state_t		*uhcip,
973 	usba_pipe_handle_data_t	*pipe_handle,
974 	uint_t			*node)
975 {
976 	int		bandwidth;	/* Requested bandwidth */
977 	uint_t		min, min_index;
978 	uint_t		i;
979 	uint_t		height;		/* Bandwidth's height in the tree */
980 	uint_t		leftmost;
981 	uint_t		length;
982 	uint32_t	paddr;
983 	queue_head_t	*tmp_qh;
984 	usb_ep_descr_t	*endpoint = &pipe_handle->p_ep;
985 
986 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
987 
988 	/*
989 	 * Calculate the length in bytes of a transaction on this
990 	 * periodic endpoint.
991 	 */
992 	mutex_enter(&pipe_handle->p_usba_device->usb_mutex);
993 
994 	length = uhci_compute_total_bandwidth(endpoint,
995 			pipe_handle->p_usba_device->usb_port_status);
996 	mutex_exit(&pipe_handle->p_usba_device->usb_mutex);
997 
998 	/*
999 	 * If the length in bytes plus the allocated bandwidth exceeds
1000 	 * the maximum, return bandwidth allocation failure.
1001 	 */
1002 	if ((length + uhcip->uhci_bandwidth_intr_min +
1003 		uhcip->uhci_bandwidth_isoch_sum) > (MAX_PERIODIC_BANDWIDTH)) {
1004 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1005 		    "uhci_allocate_bandwidth: "
1006 		    "Reached maximum bandwidth value and cannot allocate "
1007 		    "bandwidth for a given Interrupt/Isoch endpoint");
1008 
1009 		return (USB_NO_BANDWIDTH);
1010 	}
1011 
1012 	/*
1013 	 * ISOC xfers are not supported at this point type
1014 	 */
1015 	if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_ISOCH) {
1016 		uhcip->uhci_bandwidth_isoch_sum += length;
1017 
1018 		return (USB_SUCCESS);
1019 	}
1020 
1021 	/*
1022 	 * This is an interrupt endpoint.
1023 	 * Adjust bandwidth to be a power of 2
1024 	 */
1025 	mutex_enter(&pipe_handle->p_usba_device->usb_mutex);
1026 	bandwidth = uhci_bandwidth_adjust(uhcip, endpoint,
1027 			pipe_handle->p_usba_device->usb_port_status);
1028 	mutex_exit(&pipe_handle->p_usba_device->usb_mutex);
1029 
1030 	/*
1031 	 * If this bandwidth can't be supported,
1032 	 * return allocation failure.
1033 	 */
1034 	if (bandwidth == USB_FAILURE) {
1035 
1036 		return (USB_FAILURE);
1037 	}
1038 
1039 	USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl,
1040 	    "The new bandwidth is %d", bandwidth);
1041 
1042 	/* Find the leaf with the smallest allocated bandwidth */
1043 	min_index = 0;
1044 	min = uhcip->uhci_bandwidth[0];
1045 
1046 	for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) {
1047 		if (uhcip->uhci_bandwidth[i] < min) {
1048 			min_index = i;
1049 			min = uhcip->uhci_bandwidth[i];
1050 		}
1051 	}
1052 
1053 	USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl,
1054 	    "The leaf with minimal bandwidth %d, "
1055 	    "The smallest bandwidth %d", min_index, min);
1056 
1057 	/*
1058 	 * Find the index into the lattice given the
1059 	 * leaf with the smallest allocated bandwidth.
1060 	 */
1061 	height = uhci_lattice_height(bandwidth);
1062 	USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl,
1063 	    "The height is %d", height);
1064 
1065 	*node = uhci_tree_bottom_nodes[min_index];
1066 
1067 	/* check if there are isocs TDs scheduled for this frame */
1068 	if (uhcip->uhci_isoc_q_tailp[*node]) {
1069 		paddr = (uhcip->uhci_isoc_q_tailp[*node]->link_ptr &
1070 						FRAME_LST_PTR_MASK);
1071 	} else {
1072 		paddr = (uhcip->uhci_frame_lst_tablep[*node] &
1073 						FRAME_LST_PTR_MASK);
1074 	}
1075 
1076 	tmp_qh = QH_VADDR(paddr);
1077 	*node = tmp_qh->node;
1078 	for (i = 0; i < height; i++) {
1079 		*node = uhci_lattice_parent(*node);
1080 	}
1081 
1082 	USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl,
1083 	    "The real node is %d", *node);
1084 
1085 	/*
1086 	 * Find the leftmost leaf in the subtree specified by the node.
1087 	 */
1088 	leftmost = uhci_leftmost_leaf(*node, height);
1089 	USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl,
1090 	    "Leftmost %d", leftmost);
1091 
1092 	for (i = leftmost; i < leftmost +
1093 	    (NUM_FRAME_LST_ENTRIES/bandwidth); i ++) {
1094 
1095 		if ((length + uhcip->uhci_bandwidth_isoch_sum +
1096 		uhcip->uhci_bandwidth[i]) > MAX_PERIODIC_BANDWIDTH) {
1097 
1098 			USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1099 			    "uhci_allocate_bandwidth: "
1100 			    "Reached maximum bandwidth value and cannot "
1101 			    "allocate bandwidth for Interrupt endpoint");
1102 
1103 			return (USB_NO_BANDWIDTH);
1104 		}
1105 	}
1106 
1107 	/*
1108 	 * All the leaves for this node must be updated with the bandwidth.
1109 	 */
1110 	for (i = leftmost; i < leftmost +
1111 	    (NUM_FRAME_LST_ENTRIES/bandwidth); i ++) {
1112 		uhcip->uhci_bandwidth[i] += length;
1113 	}
1114 
1115 	/* Find the leaf with the smallest allocated bandwidth */
1116 	min_index = 0;
1117 	min = uhcip->uhci_bandwidth[0];
1118 
1119 	for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) {
1120 		if (uhcip->uhci_bandwidth[i] < min) {
1121 			min_index = i;
1122 			min = uhcip->uhci_bandwidth[i];
1123 		}
1124 	}
1125 
1126 	/* Save the minimum for later use */
1127 	uhcip->uhci_bandwidth_intr_min = min;
1128 
1129 	return (USB_SUCCESS);
1130 }
1131 
1132 
1133 /*
1134  * uhci_deallocate_bandwidth:
1135  *	Deallocate bandwidth for the given node in the lattice
1136  *	and the length of transfer.
1137  */
1138 void
1139 uhci_deallocate_bandwidth(uhci_state_t *uhcip,
1140     usba_pipe_handle_data_t *pipe_handle)
1141 {
1142 	uint_t		bandwidth;
1143 	uint_t		height;
1144 	uint_t		leftmost;
1145 	uint_t		i;
1146 	uint_t		min;
1147 	usb_ep_descr_t	*endpoint = &pipe_handle->p_ep;
1148 	uint_t		node, length;
1149 	uhci_pipe_private_t *pp =
1150 			(uhci_pipe_private_t *)pipe_handle->p_hcd_private;
1151 
1152 	/* This routine is protected by the uhci_int_mutex */
1153 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1154 
1155 	/* Obtain the length */
1156 	mutex_enter(&pipe_handle->p_usba_device->usb_mutex);
1157 	length = uhci_compute_total_bandwidth(endpoint,
1158 			pipe_handle->p_usba_device->usb_port_status);
1159 	mutex_exit(&pipe_handle->p_usba_device->usb_mutex);
1160 
1161 	/*
1162 	 * If this is an isochronous endpoint, just delete endpoint's
1163 	 * bandwidth from the total allocated isochronous bandwidth.
1164 	 */
1165 	if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_ISOCH) {
1166 		uhcip->uhci_bandwidth_isoch_sum -= length;
1167 
1168 		return;
1169 	}
1170 
1171 	/* Obtain the node */
1172 	node = pp->pp_node;
1173 
1174 	/* Adjust bandwidth to be a power of 2 */
1175 	mutex_enter(&pipe_handle->p_usba_device->usb_mutex);
1176 	bandwidth = uhci_bandwidth_adjust(uhcip, endpoint,
1177 			pipe_handle->p_usba_device->usb_port_status);
1178 	mutex_exit(&pipe_handle->p_usba_device->usb_mutex);
1179 
1180 	/* Find the height in the tree */
1181 	height = uhci_lattice_height(bandwidth);
1182 
1183 	/*
1184 	 * Find the leftmost leaf in the subtree specified by the node
1185 	 */
1186 	leftmost = uhci_leftmost_leaf(node, height);
1187 
1188 	/* Delete the bandwith from the appropriate lists */
1189 	for (i = leftmost; i < leftmost + (NUM_FRAME_LST_ENTRIES/bandwidth);
1190 	    i ++) {
1191 		uhcip->uhci_bandwidth[i] -= length;
1192 	}
1193 
1194 	min = uhcip->uhci_bandwidth[0];
1195 
1196 	/* Recompute the minimum */
1197 	for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) {
1198 		if (uhcip->uhci_bandwidth[i] < min) {
1199 			min = uhcip->uhci_bandwidth[i];
1200 		}
1201 	}
1202 
1203 	/* Save the minimum for later use */
1204 	uhcip->uhci_bandwidth_intr_min = min;
1205 }
1206 
1207 
1208 /*
1209  * uhci_compute_total_bandwidth:
1210  *
1211  * Given a periodic endpoint (interrupt or isochronous) determine the total
1212  * bandwidth for one transaction. The UHCI host controller traverses the
1213  * endpoint descriptor lists on a first-come-first-serve basis. When the HC
1214  * services an endpoint, only a single transaction attempt is made. The  HC
1215  * moves to the next Endpoint Descriptor after the first transaction attempt
1216  * rather than finishing the entire Transfer Descriptor. Therefore, when  a
1217  * Transfer Descriptor is inserted into the lattice, we will only count the
1218  * number of bytes for one transaction.
1219  *
1220  * The following are the formulas used for calculating bandwidth in terms
1221  * bytes and it is for the single USB full speed and low speed	transaction
1222  * respectively. The protocol overheads will be different for each of  type
1223  * of USB transfer and all these formulas & protocol overheads are  derived
1224  * from the 5.9.3 section of USB Specification & with the help of Bandwidth
1225  * Analysis white paper which is posted on the USB  developer forum.
1226  *
1227  * Full-Speed:
1228  *	  Protocol overhead  + ((MaxPacketSize * 7)/6 )  + Host_Delay
1229  *
1230  * Low-Speed:
1231  *		Protocol overhead  + Hub LS overhead +
1232  *		  (Low-Speed clock * ((MaxPacketSize * 7)/6 )) + Host_Delay
1233  */
1234 static uint_t
1235 uhci_compute_total_bandwidth(usb_ep_descr_t *endpoint,
1236 		usb_port_status_t port_status)
1237 {
1238 	uint_t		bandwidth;
1239 	ushort_t	MaxPacketSize = endpoint->wMaxPacketSize;
1240 
1241 	/* Add Host Controller specific delay to required bandwidth */
1242 	bandwidth = HOST_CONTROLLER_DELAY;
1243 
1244 	/* Add bit-stuffing overhead */
1245 	MaxPacketSize = (ushort_t)((MaxPacketSize * 7) / 6);
1246 
1247 	/* Low Speed interrupt transaction */
1248 	if (port_status == USBA_LOW_SPEED_DEV) {
1249 		/* Low Speed interrupt transaction */
1250 		bandwidth += (LOW_SPEED_PROTO_OVERHEAD +
1251 				HUB_LOW_SPEED_PROTO_OVERHEAD +
1252 				(LOW_SPEED_CLOCK * MaxPacketSize));
1253 	} else {
1254 		/* Full Speed transaction */
1255 		bandwidth += MaxPacketSize;
1256 
1257 		if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_INTR) {
1258 			/* Full Speed interrupt transaction */
1259 			bandwidth += FS_NON_ISOC_PROTO_OVERHEAD;
1260 		} else {
1261 			/* Isochronus and input transaction */
1262 			if (UHCI_XFER_DIR(endpoint) == USB_EP_DIR_IN) {
1263 				bandwidth += FS_ISOC_INPUT_PROTO_OVERHEAD;
1264 			} else {
1265 				/* Isochronus and output transaction */
1266 				bandwidth += FS_ISOC_OUTPUT_PROTO_OVERHEAD;
1267 			}
1268 		}
1269 	}
1270 
1271 	return (bandwidth);
1272 }
1273 
1274 
1275 /*
1276  * uhci_bandwidth_adjust:
1277  */
1278 static int
1279 uhci_bandwidth_adjust(
1280 	uhci_state_t		*uhcip,
1281 	usb_ep_descr_t		*endpoint,
1282 	usb_port_status_t	port_status)
1283 {
1284 	int	i = 0;
1285 	uint_t	interval;
1286 
1287 	/*
1288 	 * Get the polling interval from the endpoint descriptor
1289 	 */
1290 	interval = endpoint->bInterval;
1291 
1292 	/*
1293 	 * The bInterval value in the endpoint descriptor can range
1294 	 * from 1 to 255ms. The interrupt lattice has 32 leaf nodes,
1295 	 * and the host controller cycles through these nodes every
1296 	 * 32ms. The longest polling  interval that the  controller
1297 	 * supports is 32ms.
1298 	 */
1299 
1300 	/*
1301 	 * Return an error if the polling interval is less than 1ms
1302 	 * and greater than 255ms
1303 	 */
1304 	if ((interval < MIN_POLL_INTERVAL) || (interval > MAX_POLL_INTERVAL)) {
1305 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1306 		    "uhci_bandwidth_adjust: Endpoint's poll interval must be "
1307 		    "between %d and %d ms", MIN_POLL_INTERVAL,
1308 		    MAX_POLL_INTERVAL);
1309 
1310 		return (USB_FAILURE);
1311 	}
1312 
1313 	/*
1314 	 * According USB Specifications, a  full-speed endpoint can
1315 	 * specify a desired polling interval 1ms to 255ms and a low
1316 	 * speed  endpoints are limited to  specifying only 10ms to
1317 	 * 255ms. But some old keyboards & mice uses polling interval
1318 	 * of 8ms. For compatibility  purpose, we are using polling
1319 	 * interval between 8ms & 255ms for low speed endpoints.
1320 	 */
1321 	if ((port_status == USBA_LOW_SPEED_DEV) &&
1322 	    (interval < MIN_LOW_SPEED_POLL_INTERVAL)) {
1323 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1324 		    "uhci_bandwidth_adjust: Low speed endpoint's poll interval "
1325 		    "must be >= %d ms, adjusted",
1326 		    MIN_LOW_SPEED_POLL_INTERVAL);
1327 
1328 		interval = MIN_LOW_SPEED_POLL_INTERVAL;
1329 	}
1330 
1331 	/*
1332 	 * If polling interval is greater than 32ms,
1333 	 * adjust polling interval equal to 32ms.
1334 	 */
1335 	if (interval > 32) {
1336 		interval = 32;
1337 	}
1338 
1339 	/*
1340 	 * Find the nearest power of 2 that's less
1341 	 * than interval.
1342 	 */
1343 	while ((pow_2(i)) <= interval) {
1344 		i++;
1345 	}
1346 
1347 	return (pow_2((i - 1)));
1348 }
1349 
1350 
1351 /*
1352  * uhci_lattice_height:
1353  *	Given the requested bandwidth, find the height in the tree at
1354  *	which the nodes for this bandwidth fall.  The height is measured
1355  *	as the number of nodes from the leaf to the level specified by
1356  *	bandwidth The root of the tree is at height TREE_HEIGHT.
1357  */
1358 static uint_t
1359 uhci_lattice_height(uint_t bandwidth)
1360 {
1361 	return (TREE_HEIGHT - (log_2(bandwidth)));
1362 }
1363 
1364 
1365 static uint_t
1366 uhci_lattice_parent(uint_t node)
1367 {
1368 	return (((node % 2) == 0) ? ((node/2) - 1) : (node/2));
1369 }
1370 
1371 
1372 /*
1373  * uhci_leftmost_leaf:
1374  *	Find the leftmost leaf in the subtree specified by the node.
1375  *	Height refers to number of nodes from the bottom of the tree
1376  *	to the node,  including the node.
1377  */
1378 static uint_t
1379 uhci_leftmost_leaf(uint_t node, uint_t height)
1380 {
1381 	node = pow_2(height + VIRTUAL_TREE_HEIGHT) * (node+1) -
1382 			NUM_FRAME_LST_ENTRIES;
1383 	return (node);
1384 }
1385 
1386 
1387 /*
1388  * uhci_insert_qh:
1389  *	Add the Queue Head (QH) into the Host Controller's (HC)
1390  *	appropriate queue head list.
1391  */
1392 void
1393 uhci_insert_qh(uhci_state_t *uhcip, usba_pipe_handle_data_t *ph)
1394 {
1395 	uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private;
1396 
1397 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1398 	    "uhci_insert_qh:");
1399 
1400 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1401 
1402 	switch (UHCI_XFER_TYPE(&ph->p_ep)) {
1403 	case USB_EP_ATTR_CONTROL:
1404 		uhci_insert_ctrl_qh(uhcip, pp);
1405 		break;
1406 	case USB_EP_ATTR_BULK:
1407 		uhci_insert_bulk_qh(uhcip, pp);
1408 		break;
1409 	case USB_EP_ATTR_INTR:
1410 		uhci_insert_intr_qh(uhcip, pp);
1411 		break;
1412 	case USB_EP_ATTR_ISOCH:
1413 			USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
1414 		    "uhci_insert_qh: Illegal request");
1415 		break;
1416 	}
1417 }
1418 
1419 
1420 /*
1421  * uhci_insert_ctrl_qh:
1422  *	Insert a control QH into the Host Controller's (HC) control QH list.
1423  */
1424 static void
1425 uhci_insert_ctrl_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp)
1426 {
1427 	queue_head_t *qh = pp->pp_qh;
1428 
1429 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1430 	    "uhci_insert_ctrl_qh:");
1431 
1432 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1433 
1434 	if (uhcip->uhci_ctrl_xfers_q_head == uhcip->uhci_ctrl_xfers_q_tail) {
1435 		uhcip->uhci_ctrl_xfers_q_head->prev_qh	= UHCI_INVALID_PTR;
1436 	}
1437 
1438 	SetQH32(uhcip, qh->link_ptr,
1439 	    GetQH32(uhcip, uhcip->uhci_ctrl_xfers_q_tail->link_ptr));
1440 	qh->prev_qh = uhcip->uhci_ctrl_xfers_q_tail;
1441 	SetQH32(uhcip, uhcip->uhci_ctrl_xfers_q_tail->link_ptr,
1442 				QH_PADDR(qh) | HC_QUEUE_HEAD);
1443 	uhcip->uhci_ctrl_xfers_q_tail = qh;
1444 
1445 }
1446 
1447 
1448 /*
1449  * uhci_insert_bulk_qh:
1450  *	Insert a bulk QH into the Host Controller's (HC) bulk QH list.
1451  */
1452 static void
1453 uhci_insert_bulk_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp)
1454 {
1455 	queue_head_t *qh = pp->pp_qh;
1456 
1457 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1458 	    "uhci_insert_bulk_qh:");
1459 
1460 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1461 
1462 	if (uhcip->uhci_bulk_xfers_q_head == uhcip->uhci_bulk_xfers_q_tail) {
1463 		uhcip->uhci_bulk_xfers_q_head->prev_qh = UHCI_INVALID_PTR;
1464 	} else if (uhcip->uhci_bulk_xfers_q_head->link_ptr ==
1465 		    uhcip->uhci_bulk_xfers_q_tail->link_ptr) {
1466 
1467 		/* If there is already a loop, we should keep the loop. */
1468 		qh->link_ptr = uhcip->uhci_bulk_xfers_q_tail->link_ptr;
1469 	}
1470 
1471 	qh->prev_qh = uhcip->uhci_bulk_xfers_q_tail;
1472 	SetQH32(uhcip, uhcip->uhci_bulk_xfers_q_tail->link_ptr,
1473 		QH_PADDR(qh) | HC_QUEUE_HEAD);
1474 	uhcip->uhci_bulk_xfers_q_tail = qh;
1475 }
1476 
1477 
1478 /*
1479  * uhci_insert_intr_qh:
1480  *	Insert a periodic Queue head i.e Interrupt queue head into the
1481  *	Host Controller's (HC) interrupt lattice tree.
1482  */
1483 static void
1484 uhci_insert_intr_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp)
1485 {
1486 	uint_t		node = pp->pp_node;	/* The appropriate node was */
1487 						/* found during the opening */
1488 						/* of the pipe.  */
1489 	queue_head_t	*qh = pp->pp_qh;
1490 	queue_head_t	*next_lattice_qh, *lattice_qh;
1491 
1492 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1493 	    "uhci_insert_intr_qh:");
1494 
1495 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1496 
1497 	/* Find the lattice queue head */
1498 	lattice_qh = &uhcip->uhci_qh_pool_addr[node];
1499 	next_lattice_qh =
1500 	    QH_VADDR(GetQH32(uhcip, lattice_qh->link_ptr) & QH_LINK_PTR_MASK);
1501 
1502 	next_lattice_qh->prev_qh = qh;
1503 	qh->link_ptr	= lattice_qh->link_ptr;
1504 	qh->prev_qh	= lattice_qh;
1505 	SetQH32(uhcip, lattice_qh->link_ptr, QH_PADDR(qh) | HC_QUEUE_HEAD);
1506 	pp->pp_data_toggle = 0;
1507 }
1508 
1509 
1510 /*
1511  * uhci_insert_intr_td:
1512  *	Create a TD and a data buffer for an interrupt endpoint.
1513  */
1514 int
1515 uhci_insert_intr_td(
1516 	uhci_state_t		*uhcip,
1517 	usba_pipe_handle_data_t	*ph,
1518 	usb_intr_req_t		*req,
1519 	usb_flags_t		flags)
1520 {
1521 	int			error, pipe_dir;
1522 	uint_t			length, mps;
1523 	uint32_t		buf_offs;
1524 	uhci_td_t		*tmp_td;
1525 	usb_intr_req_t		*intr_reqp;
1526 	uhci_pipe_private_t	*pp = (uhci_pipe_private_t *)ph->p_hcd_private;
1527 	uhci_trans_wrapper_t	*tw;
1528 
1529 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1530 	    "uhci_insert_intr_td: req: 0x%p", req);
1531 
1532 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1533 
1534 	/* Get the interrupt pipe direction */
1535 	pipe_dir = UHCI_XFER_DIR(&ph->p_ep);
1536 
1537 	/* Get the current interrupt request pointer */
1538 	if (req) {
1539 		length = req->intr_len;
1540 	} else {
1541 		ASSERT(pipe_dir == USB_EP_DIR_IN);
1542 		length = (pp->pp_client_periodic_in_reqp) ?
1543 		    (((usb_intr_req_t *)pp->
1544 		    pp_client_periodic_in_reqp)->intr_len) :
1545 		    ph->p_ep.wMaxPacketSize;
1546 	}
1547 
1548 	/* Check the size of interrupt request */
1549 	if (length > UHCI_MAX_TD_XFER_SIZE) {
1550 
1551 		/* the length shouldn't exceed 8K */
1552 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1553 		    "uhci_insert_intr_td: Intr request size 0x%lx is "
1554 		    "more than 0x%x", length, UHCI_MAX_TD_XFER_SIZE);
1555 
1556 	    return (USB_INVALID_REQUEST);
1557 	}
1558 
1559 	USB_DPRINTF_L3(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1560 	    "uhci_insert_intr_td: length: 0x%lx", length);
1561 
1562 	/* Allocate a transaction wrapper */
1563 	if ((tw = uhci_create_transfer_wrapper(uhcip, pp, length, flags)) ==
1564 	    NULL) {
1565 
1566 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1567 		    "uhci_insert_intr_td: TW allocation failed");
1568 
1569 		return (USB_NO_RESOURCES);
1570 	}
1571 
1572 	/*
1573 	 * Initialize the callback and any callback
1574 	 * data for when the td completes.
1575 	 */
1576 	tw->tw_handle_td = uhci_handle_intr_td;
1577 	tw->tw_handle_callback_value = NULL;
1578 	tw->tw_direction = (pipe_dir == USB_EP_DIR_OUT) ?
1579 							PID_OUT : PID_IN;
1580 	tw->tw_curr_xfer_reqp = (usb_opaque_t)req;
1581 
1582 	/*
1583 	 * If it is an Interrupt IN request and interrupt request is NULL,
1584 	 * allocate the usb interrupt request structure for the current
1585 	 * interrupt polling request.
1586 	 */
1587 	if (tw->tw_direction == PID_IN) {
1588 		if ((error = uhci_allocate_periodic_in_resource(uhcip,
1589 		    pp, tw, flags)) != USB_SUCCESS) {
1590 			USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1591 			    "uhci_insert_intr_td: Interrupt request structure "
1592 			    "allocation failed");
1593 
1594 			/* free the transfer wrapper */
1595 			uhci_deallocate_tw(uhcip, pp, tw);
1596 
1597 			return (error);
1598 		}
1599 	}
1600 
1601 	intr_reqp = (usb_intr_req_t *)tw->tw_curr_xfer_reqp;
1602 	ASSERT(tw->tw_curr_xfer_reqp != NULL);
1603 
1604 	tw->tw_timeout_cnt = (intr_reqp->intr_attributes & USB_ATTRS_ONE_XFER) ?
1605 	    intr_reqp->intr_timeout : 0;
1606 
1607 	/* DATA IN */
1608 	if (tw->tw_direction == PID_IN) {
1609 		/* Insert the td onto the queue head */
1610 		error = uhci_insert_hc_td(uhcip, 0,
1611 			length, pp, tw, PID_IN, intr_reqp->intr_attributes);
1612 
1613 		if (error != USB_SUCCESS) {
1614 
1615 			uhci_deallocate_periodic_in_resource(uhcip, pp, tw);
1616 			/* free the transfer wrapper */
1617 			uhci_deallocate_tw(uhcip, pp, tw);
1618 
1619 			return (USB_NO_RESOURCES);
1620 		}
1621 		tw->tw_bytes_xfered = 0;
1622 
1623 		return (USB_SUCCESS);
1624 	}
1625 
1626 	/* DATA OUT */
1627 	ASSERT(req->intr_data != NULL);
1628 
1629 	/* Copy the data into the message */
1630 	ddi_rep_put8(tw->tw_accesshandle, req->intr_data->b_rptr,
1631 		(uint8_t *)tw->tw_buf, req->intr_len, DDI_DEV_AUTOINCR);
1632 
1633 	/* set tw->tw_claim flag, so that nobody else works on this tw. */
1634 	tw->tw_claim = UHCI_INTR_HDLR_CLAIMED;
1635 
1636 	mps = ph->p_ep.wMaxPacketSize;
1637 	buf_offs = 0;
1638 
1639 	/* Insert tds onto the queue head */
1640 	while (length > 0) {
1641 
1642 		error = uhci_insert_hc_td(uhcip, buf_offs,
1643 				(length > mps) ? mps : length,
1644 				pp, tw, PID_OUT,
1645 				intr_reqp->intr_attributes);
1646 
1647 		if (error != USB_SUCCESS) {
1648 			/* no resource. */
1649 			break;
1650 		}
1651 
1652 		if (length <= mps) {
1653 			/* inserted all data. */
1654 			length = 0;
1655 
1656 		} else {
1657 
1658 			buf_offs += mps;
1659 			length -= mps;
1660 		}
1661 	}
1662 
1663 	if (error != USB_SUCCESS) {
1664 
1665 		USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
1666 		    "uhci_insert_intr_td: allocate td failed, free resource");
1667 
1668 		/* remove all the tds */
1669 		while (tw->tw_hctd_head != NULL) {
1670 			uhci_delete_td(uhcip, tw->tw_hctd_head);
1671 		}
1672 
1673 		tw->tw_claim = UHCI_NOT_CLAIMED;
1674 		uhci_deallocate_tw(uhcip, pp, tw);
1675 
1676 		return (error);
1677 	}
1678 
1679 	/* allow HC to xfer the tds of this tw */
1680 	tmp_td = tw->tw_hctd_head;
1681 	while (tmp_td != NULL) {
1682 
1683 		SetTD_status(uhcip, tmp_td, UHCI_TD_ACTIVE);
1684 		tmp_td = tmp_td->tw_td_next;
1685 	}
1686 
1687 	tw->tw_bytes_xfered = 0;
1688 	tw->tw_claim = UHCI_NOT_CLAIMED;
1689 
1690 	return (error);
1691 }
1692 
1693 
1694 /*
1695  * uhci_create_transfer_wrapper:
1696  *	Create a Transaction Wrapper (TW) for non-isoc transfer types.
1697  *	This involves the allocating of DMA resources.
1698  *
1699  *	For non-isoc transfers, one DMA handle and one DMA buffer are
1700  *	allocated per transfer. The DMA buffer may contain multiple
1701  *	DMA cookies and the cookies should meet certain alignment
1702  *	requirement to be able to fit in the multiple TDs. The alignment
1703  *	needs to ensure:
1704  *	1. the size of a cookie be larger than max TD length (0x500)
1705  *	2. the size of a cookie be a multiple of wMaxPacketSize of the
1706  *	ctrl/bulk pipes
1707  *
1708  *	wMaxPacketSize for ctrl and bulk pipes may be 8, 16, 32 or 64 bytes.
1709  *	So the alignment should be a multiple of 64. wMaxPacketSize for intr
1710  *	pipes is a little different since it only specifies the max to be
1711  *	64 bytes, but as long as an intr transfer is limited to max TD length,
1712  *	any alignment can work if the cookie size is larger than max TD length.
1713  *
1714  *	Considering the above conditions, 2K alignment is used. 4K alignment
1715  *	should also be fine.
1716  */
1717 static uhci_trans_wrapper_t *
1718 uhci_create_transfer_wrapper(
1719 	uhci_state_t		*uhcip,
1720 	uhci_pipe_private_t	*pp,
1721 	size_t			length,
1722 	usb_flags_t		usb_flags)
1723 {
1724 	size_t			real_length;
1725 	uhci_trans_wrapper_t	*tw;
1726 	ddi_device_acc_attr_t	dev_attr;
1727 	ddi_dma_attr_t		dma_attr;
1728 	int			kmem_flag;
1729 	int			(*dmamem_wait)(caddr_t);
1730 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
1731 
1732 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1733 	    "uhci_create_transfer_wrapper: length = 0x%lx flags = 0x%x",
1734 	    length, usb_flags);
1735 
1736 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1737 
1738 	/* isochronous pipe should not call into this function */
1739 	if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_ISOCH) {
1740 
1741 		return (NULL);
1742 	}
1743 
1744 	/* SLEEP flag should not be used in interrupt context */
1745 	if (servicing_interrupt()) {
1746 		kmem_flag = KM_NOSLEEP;
1747 		dmamem_wait = DDI_DMA_DONTWAIT;
1748 	} else {
1749 		kmem_flag = KM_SLEEP;
1750 		dmamem_wait = DDI_DMA_SLEEP;
1751 	}
1752 
1753 	/* Allocate space for the transfer wrapper */
1754 	if ((tw = kmem_zalloc(sizeof (uhci_trans_wrapper_t), kmem_flag)) ==
1755 	    NULL) {
1756 		USB_DPRINTF_L2(PRINT_MASK_LISTS,  uhcip->uhci_log_hdl,
1757 		    "uhci_create_transfer_wrapper: kmem_alloc failed");
1758 
1759 		return (NULL);
1760 	}
1761 
1762 	/* allow sg lists for transfer wrapper dma memory */
1763 	bcopy(&uhcip->uhci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t));
1764 	dma_attr.dma_attr_sgllen = UHCI_DMA_ATTR_SGLLEN;
1765 	dma_attr.dma_attr_align = UHCI_DMA_ATTR_ALIGN;
1766 
1767 	/* Store the transfer length */
1768 	tw->tw_length = length;
1769 
1770 	/* Allocate the DMA handle */
1771 	if (ddi_dma_alloc_handle(uhcip->uhci_dip, &dma_attr, dmamem_wait,
1772 	    0, &tw->tw_dmahandle) != DDI_SUCCESS) {
1773 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1774 		    "uhci_create_transfer_wrapper: Alloc handle failed");
1775 		kmem_free(tw, sizeof (uhci_trans_wrapper_t));
1776 
1777 		return (NULL);
1778 	}
1779 
1780 	dev_attr.devacc_attr_version		= DDI_DEVICE_ATTR_V0;
1781 	dev_attr.devacc_attr_endian_flags	= DDI_STRUCTURE_LE_ACC;
1782 	dev_attr.devacc_attr_dataorder		= DDI_STRICTORDER_ACC;
1783 
1784 	/* Allocate the memory */
1785 	if (ddi_dma_mem_alloc(tw->tw_dmahandle, tw->tw_length, &dev_attr,
1786 	    DDI_DMA_CONSISTENT, dmamem_wait, NULL, (caddr_t *)&tw->tw_buf,
1787 	    &real_length, &tw->tw_accesshandle) != DDI_SUCCESS) {
1788 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1789 		    "uhci_create_transfer_wrapper: dma_mem_alloc fail");
1790 		ddi_dma_free_handle(&tw->tw_dmahandle);
1791 		kmem_free(tw, sizeof (uhci_trans_wrapper_t));
1792 
1793 		return (NULL);
1794 	}
1795 
1796 	ASSERT(real_length >= length);
1797 
1798 	/* Bind the handle */
1799 	if (ddi_dma_addr_bind_handle(tw->tw_dmahandle, NULL,
1800 	    (caddr_t)tw->tw_buf, real_length, DDI_DMA_RDWR|DDI_DMA_CONSISTENT,
1801 	    dmamem_wait, NULL, &tw->tw_cookie, &tw->tw_ncookies) !=
1802 	    DDI_DMA_MAPPED) {
1803 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1804 		    "uhci_create_transfer_wrapper: Bind handle failed");
1805 		ddi_dma_mem_free(&tw->tw_accesshandle);
1806 		ddi_dma_free_handle(&tw->tw_dmahandle);
1807 		kmem_free(tw, sizeof (uhci_trans_wrapper_t));
1808 
1809 		return (NULL);
1810 	}
1811 
1812 	tw->tw_cookie_idx = 0;
1813 	tw->tw_dma_offs = 0;
1814 
1815 	/*
1816 	 * Only allow one wrapper to be added at a time. Insert the
1817 	 * new transaction wrapper into the list for this pipe.
1818 	 */
1819 	if (pp->pp_tw_head == NULL) {
1820 		pp->pp_tw_head = tw;
1821 		pp->pp_tw_tail = tw;
1822 	} else {
1823 		pp->pp_tw_tail->tw_next = tw;
1824 		pp->pp_tw_tail = tw;
1825 		ASSERT(tw->tw_next == NULL);
1826 	}
1827 
1828 	/* Store a back pointer to the pipe private structure */
1829 	tw->tw_pipe_private = pp;
1830 
1831 	/* Store the transfer type - synchronous or asynchronous */
1832 	tw->tw_flags = usb_flags;
1833 
1834 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1835 	    "uhci_create_transfer_wrapper: tw = 0x%p, ncookies = %u",
1836 	    tw, tw->tw_ncookies);
1837 
1838 	return (tw);
1839 }
1840 
1841 
1842 /*
1843  * uhci_insert_hc_td:
1844  *	Insert a Transfer Descriptor (TD) on an QH.
1845  */
1846 int
1847 uhci_insert_hc_td(
1848 	uhci_state_t		*uhcip,
1849 	uint32_t		buffer_offset,
1850 	size_t			hcgtd_length,
1851 	uhci_pipe_private_t	*pp,
1852 	uhci_trans_wrapper_t	*tw,
1853 	uchar_t			PID,
1854 	usb_req_attrs_t		attrs)
1855 {
1856 	uhci_td_t	*td, *current_dummy;
1857 	queue_head_t	*qh = pp->pp_qh;
1858 
1859 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1860 
1861 	if ((td = uhci_allocate_td_from_pool(uhcip)) == NULL) {
1862 
1863 		return (USB_NO_RESOURCES);
1864 	}
1865 
1866 	current_dummy = qh->td_tailp;
1867 
1868 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
1869 	    "uhci_insert_hc_td: td %p, attrs = 0x%x", td, attrs);
1870 
1871 	/*
1872 	 * Fill in the current dummy td and
1873 	 * add the new dummy to the end.
1874 	 */
1875 	uhci_fill_in_td(uhcip, td, current_dummy, buffer_offset,
1876 	    hcgtd_length, pp, PID, attrs, tw);
1877 
1878 	/*
1879 	 * Allow HC hardware xfer the td, except interrupt out td.
1880 	 */
1881 	if ((tw->tw_handle_td != uhci_handle_intr_td) || (PID != PID_OUT)) {
1882 
1883 		SetTD_status(uhcip, current_dummy, UHCI_TD_ACTIVE);
1884 	}
1885 
1886 	/* Insert this td onto the tw */
1887 
1888 	if (tw->tw_hctd_head == NULL) {
1889 		ASSERT(tw->tw_hctd_tail == NULL);
1890 		tw->tw_hctd_head = current_dummy;
1891 		tw->tw_hctd_tail = current_dummy;
1892 	} else {
1893 		/* Add the td to the end of the list */
1894 		tw->tw_hctd_tail->tw_td_next = current_dummy;
1895 		tw->tw_hctd_tail = current_dummy;
1896 	}
1897 
1898 	/*
1899 	 * Insert the TD on to the QH. When this occurs,
1900 	 * the Host Controller will see the newly filled in TD
1901 	 */
1902 	current_dummy->outst_td_next	 = NULL;
1903 	current_dummy->outst_td_prev	 = uhcip->uhci_outst_tds_tail;
1904 	if (uhcip->uhci_outst_tds_head == NULL) {
1905 		uhcip->uhci_outst_tds_head = current_dummy;
1906 	} else {
1907 		uhcip->uhci_outst_tds_tail->outst_td_next = current_dummy;
1908 	}
1909 	uhcip->uhci_outst_tds_tail = current_dummy;
1910 	current_dummy->tw = tw;
1911 
1912 	return (USB_SUCCESS);
1913 }
1914 
1915 
1916 /*
1917  * uhci_fill_in_td:
1918  *	Fill in the fields of a Transfer Descriptor (TD).
1919  */
1920 static void
1921 uhci_fill_in_td(
1922 	uhci_state_t		*uhcip,
1923 	uhci_td_t		*td,
1924 	uhci_td_t		*current_dummy,
1925 	uint32_t		buffer_offset,
1926 	size_t			length,
1927 	uhci_pipe_private_t	*pp,
1928 	uchar_t			PID,
1929 	usb_req_attrs_t		attrs,
1930 	uhci_trans_wrapper_t	*tw)
1931 {
1932 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
1933 	uint32_t		buf_addr;
1934 
1935 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
1936 	    "uhci_fill_in_td: td 0x%p buf_offs 0x%x len 0x%lx "
1937 	    "attrs 0x%x", td, buffer_offset, length, attrs);
1938 
1939 	/*
1940 	 * If this is an isochronous TD, just return
1941 	 */
1942 	if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_ISOCH) {
1943 
1944 		return;
1945 	}
1946 
1947 	/* The maximum transfer length of UHCI cannot exceed 0x500 bytes */
1948 	ASSERT(length <= UHCI_MAX_TD_XFER_SIZE);
1949 
1950 	bzero((char *)td, sizeof (uhci_td_t));	/* Clear the TD */
1951 	SetTD32(uhcip, current_dummy->link_ptr, TD_PADDR(td));
1952 
1953 	if (attrs & USB_ATTRS_SHORT_XFER_OK) {
1954 		SetTD_spd(uhcip, current_dummy, 1);
1955 	}
1956 
1957 	mutex_enter(&ph->p_usba_device->usb_mutex);
1958 	if (ph->p_usba_device->usb_port_status == USBA_LOW_SPEED_DEV) {
1959 		SetTD_ls(uhcip, current_dummy, LOW_SPEED_DEVICE);
1960 	}
1961 
1962 	SetTD_c_err(uhcip, current_dummy, UHCI_MAX_ERR_COUNT);
1963 	SetTD_mlen(uhcip, current_dummy, (length == 0) ? 0x7ff: (length - 1));
1964 	SetTD_dtogg(uhcip, current_dummy, pp->pp_data_toggle);
1965 
1966 	/* Adjust the data toggle bit */
1967 	ADJ_DATA_TOGGLE(pp);
1968 
1969 	SetTD_devaddr(uhcip, current_dummy,  ph->p_usba_device->usb_addr);
1970 	SetTD_endpt(uhcip, current_dummy,
1971 		ph->p_ep.bEndpointAddress & END_POINT_ADDRESS_MASK);
1972 	SetTD_PID(uhcip, current_dummy, PID);
1973 	SetTD_ioc(uhcip, current_dummy, INTERRUPT_ON_COMPLETION);
1974 
1975 	buf_addr = uhci_get_tw_paddr_by_offs(uhcip, buffer_offset, length, tw);
1976 	SetTD32(uhcip, current_dummy->buffer_address, buf_addr);
1977 
1978 	td->qh_td_prev			= current_dummy;
1979 	current_dummy->qh_td_prev	= NULL;
1980 	pp->pp_qh->td_tailp		= td;
1981 	mutex_exit(&ph->p_usba_device->usb_mutex);
1982 }
1983 
1984 /*
1985  * uhci_get_tw_paddr_by_offs:
1986  *	Walk through the DMA cookies of a TW buffer to retrieve
1987  *	the device address used for a TD.
1988  *
1989  * buffer_offset - the starting offset into the TW buffer, where the
1990  *                 TD should transfer from. When a TW has more than
1991  *                 one TD, the TDs must be filled in increasing order.
1992  */
1993 static uint32_t
1994 uhci_get_tw_paddr_by_offs(
1995 	uhci_state_t		*uhcip,
1996 	uint32_t		buffer_offset,
1997 	size_t			length,
1998 	uhci_trans_wrapper_t	*tw)
1999 {
2000 	uint32_t		buf_addr;
2001 	int			rem_len;
2002 
2003 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
2004 	    "uhci_get_tw_paddr_by_offs: buf_offs 0x%x len 0x%lx",
2005 	    buffer_offset, length);
2006 
2007 	/*
2008 	 * TDs must be filled in increasing DMA offset order.
2009 	 * tw_dma_offs is initialized to be 0 at TW creation and
2010 	 * is only increased in this function.
2011 	 */
2012 	ASSERT(length == 0 || buffer_offset >= tw->tw_dma_offs);
2013 
2014 	if (length == 0) {
2015 		buf_addr = 0;
2016 
2017 		return (buf_addr);
2018 	}
2019 
2020 	/*
2021 	 * Advance to the next DMA cookie until finding the cookie
2022 	 * that buffer_offset falls in.
2023 	 * It is very likely this loop will never repeat more than
2024 	 * once. It is here just to accommodate the case buffer_offset
2025 	 * is increased by multiple cookies during two consecutive
2026 	 * calls into this function. In that case, the interim DMA
2027 	 * buffer is allowed to be skipped.
2028 	 */
2029 	while ((tw->tw_dma_offs + tw->tw_cookie.dmac_size) <=
2030 	    buffer_offset) {
2031 		/*
2032 		 * tw_dma_offs always points to the starting offset
2033 		 * of a cookie
2034 		 */
2035 		tw->tw_dma_offs += tw->tw_cookie.dmac_size;
2036 		ddi_dma_nextcookie(tw->tw_dmahandle, &tw->tw_cookie);
2037 		tw->tw_cookie_idx++;
2038 		ASSERT(tw->tw_cookie_idx < tw->tw_ncookies);
2039 	}
2040 
2041 	/*
2042 	 * Counting the remained buffer length to be filled in
2043 	 * the TDs for current DMA cookie
2044 	 */
2045 	rem_len = (tw->tw_dma_offs + tw->tw_cookie.dmac_size) -
2046 	    buffer_offset;
2047 
2048 	/* Calculate the beginning address of the buffer */
2049 	ASSERT(length <= rem_len);
2050 	buf_addr = (buffer_offset - tw->tw_dma_offs) +
2051 	    tw->tw_cookie.dmac_address;
2052 
2053 	USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
2054 	    "uhci_get_tw_paddr_by_offs: dmac_addr 0x%p dmac_size "
2055 	    "0x%lx idx %d", buf_addr, tw->tw_cookie.dmac_size,
2056 	    tw->tw_cookie_idx);
2057 
2058 	return (buf_addr);
2059 }
2060 
2061 
2062 /*
2063  * uhci_modify_td_active_bits:
2064  *	Sets active bit in all the tds of QH to INACTIVE so that
2065  *	the HC stops processing the TD's related to the QH.
2066  */
2067 void
2068 uhci_modify_td_active_bits(
2069 	uhci_state_t		*uhcip,
2070 	uhci_pipe_private_t	*pp)
2071 {
2072 	uhci_td_t		*td_head;
2073 	usb_ep_descr_t		*ept = &pp->pp_pipe_handle->p_ep;
2074 	uhci_trans_wrapper_t	*tw_head = pp->pp_tw_head;
2075 
2076 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
2077 	    "uhci_modify_td_active_bits: tw head %p", (void *)tw_head);
2078 
2079 	while (tw_head != NULL) {
2080 		tw_head->tw_claim = UHCI_MODIFY_TD_BITS_CLAIMED;
2081 		td_head = tw_head->tw_hctd_head;
2082 
2083 		while (td_head) {
2084 			if (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_ISOCH) {
2085 				SetTD_status(uhcip, td_head,
2086 				    GetTD_status(uhcip, td_head) & TD_INACTIVE);
2087 			} else {
2088 				SetTD32(uhcip, td_head->link_ptr,
2089 				    GetTD32(uhcip, td_head->link_ptr) |
2090 				    HC_END_OF_LIST);
2091 			}
2092 
2093 			td_head = td_head->tw_td_next;
2094 		}
2095 		tw_head = tw_head->tw_next;
2096 	}
2097 }
2098 
2099 
2100 /*
2101  * uhci_insert_ctrl_td:
2102  *	Create a TD and a data buffer for a control Queue Head.
2103  */
2104 int
2105 uhci_insert_ctrl_td(
2106 	uhci_state_t		*uhcip,
2107 	usba_pipe_handle_data_t  *ph,
2108 	usb_ctrl_req_t		*ctrl_reqp,
2109 	usb_flags_t		flags)
2110 {
2111 	uhci_pipe_private_t  *pp = (uhci_pipe_private_t *)ph->p_hcd_private;
2112 	uhci_trans_wrapper_t *tw;
2113 	size_t	ctrl_buf_size;
2114 
2115 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2116 	    "uhci_insert_ctrl_td: timeout: 0x%x", ctrl_reqp->ctrl_timeout);
2117 
2118 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
2119 
2120 	/*
2121 	 * If we have a control data phase, make the data buffer start
2122 	 * on the next 64-byte boundary so as to ensure the DMA cookie
2123 	 * can fit in the multiple TDs. The buffer in the range of
2124 	 * [SETUP_SIZE, UHCI_CTRL_EPT_MAX_SIZE) is just for padding
2125 	 * and not to be transferred.
2126 	 */
2127 	if (ctrl_reqp->ctrl_wLength) {
2128 		ctrl_buf_size = UHCI_CTRL_EPT_MAX_SIZE +
2129 		    ctrl_reqp->ctrl_wLength;
2130 	} else {
2131 		ctrl_buf_size = SETUP_SIZE;
2132 	}
2133 
2134 	/* Allocate a transaction wrapper */
2135 	if ((tw = uhci_create_transfer_wrapper(uhcip, pp,
2136 	    ctrl_buf_size, flags)) == NULL) {
2137 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2138 		    "uhci_insert_ctrl_td: TW allocation failed");
2139 
2140 		return (USB_NO_RESOURCES);
2141 	}
2142 
2143 	pp->pp_data_toggle = 0;
2144 
2145 	tw->tw_curr_xfer_reqp = (usb_opaque_t)ctrl_reqp;
2146 	tw->tw_bytes_xfered = 0;
2147 	tw->tw_bytes_pending = ctrl_reqp->ctrl_wLength;
2148 	tw->tw_timeout_cnt = max(UHCI_CTRL_TIMEOUT, ctrl_reqp->ctrl_timeout);
2149 
2150 	/*
2151 	 * Initialize the callback and any callback
2152 	 * data for when the td completes.
2153 	 */
2154 	tw->tw_handle_td = uhci_handle_ctrl_td;
2155 	tw->tw_handle_callback_value = NULL;
2156 
2157 	if ((uhci_create_setup_pkt(uhcip, pp, tw)) != USB_SUCCESS) {
2158 		tw->tw_ctrl_state = 0;
2159 
2160 		/* free the transfer wrapper */
2161 		uhci_deallocate_tw(uhcip, pp, tw);
2162 
2163 		return (USB_NO_RESOURCES);
2164 	}
2165 
2166 	tw->tw_ctrl_state = SETUP;
2167 
2168 	return (USB_SUCCESS);
2169 }
2170 
2171 
2172 /*
2173  * uhci_create_setup_pkt:
2174  *	create a setup packet to initiate a control transfer.
2175  *
2176  *	OHCI driver has seen the case where devices fail if there is
2177  *	more than one control transfer to the device within a frame.
2178  *	So, the UHCI ensures that only one TD will be put on the control
2179  *	pipe to one device (to be consistent with OHCI driver).
2180  */
2181 static int
2182 uhci_create_setup_pkt(
2183 	uhci_state_t		*uhcip,
2184 	uhci_pipe_private_t	*pp,
2185 	uhci_trans_wrapper_t	*tw)
2186 {
2187 	int		sdata;
2188 	usb_ctrl_req_t	*req = (usb_ctrl_req_t *)tw->tw_curr_xfer_reqp;
2189 
2190 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
2191 	    "uhci_create_setup_pkt: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%p",
2192 	    req->ctrl_bmRequestType, req->ctrl_bRequest, req->ctrl_wValue,
2193 	    req->ctrl_wIndex, req->ctrl_wLength, (void *)req->ctrl_data);
2194 
2195 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
2196 	ASSERT(tw != NULL);
2197 
2198 	/* Create the first four bytes of the setup packet */
2199 	sdata = (req->ctrl_bmRequestType | (req->ctrl_bRequest << 8) |
2200 			(req->ctrl_wValue << 16));
2201 	ddi_put32(tw->tw_accesshandle, (uint_t *)tw->tw_buf, sdata);
2202 
2203 	/* Create the second four bytes */
2204 	sdata = (uint32_t)(req->ctrl_wIndex | (req->ctrl_wLength << 16));
2205 	ddi_put32(tw->tw_accesshandle,
2206 	    (uint_t *)(tw->tw_buf + sizeof (uint_t)), sdata);
2207 
2208 	/*
2209 	 * The TD's are placed on the QH one at a time.
2210 	 * Once this TD is placed on the done list, the
2211 	 * data or status phase TD will be enqueued.
2212 	 */
2213 	if ((uhci_insert_hc_td(uhcip, 0, SETUP_SIZE,
2214 	    pp, tw, PID_SETUP, req->ctrl_attributes)) != USB_SUCCESS) {
2215 
2216 		return (USB_NO_RESOURCES);
2217 	}
2218 
2219 	USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
2220 	    "Create_setup: pp = 0x%p, attrs = 0x%x", pp, req->ctrl_attributes);
2221 
2222 	/*
2223 	 * If this control transfer has a data phase, record the
2224 	 * direction. If the data phase is an OUT transaction ,
2225 	 * copy the data into the buffer of the transfer wrapper.
2226 	 */
2227 	if (req->ctrl_wLength != 0) {
2228 		/* There is a data stage.  Find the direction */
2229 		if (req->ctrl_bmRequestType & USB_DEV_REQ_DEV_TO_HOST) {
2230 			tw->tw_direction = PID_IN;
2231 		} else {
2232 			tw->tw_direction = PID_OUT;
2233 
2234 			/* Copy the data into the buffer */
2235 			ddi_rep_put8(tw->tw_accesshandle,
2236 			    req->ctrl_data->b_rptr,
2237 			    (uint8_t *)(tw->tw_buf + UHCI_CTRL_EPT_MAX_SIZE),
2238 			    req->ctrl_wLength,
2239 			    DDI_DEV_AUTOINCR);
2240 		}
2241 	}
2242 
2243 	return (USB_SUCCESS);
2244 }
2245 
2246 
2247 /*
2248  * uhci_create_stats:
2249  *	Allocate and initialize the uhci kstat structures
2250  */
2251 void
2252 uhci_create_stats(uhci_state_t *uhcip)
2253 {
2254 	int			i;
2255 	char			kstatname[KSTAT_STRLEN];
2256 	char			*usbtypes[USB_N_COUNT_KSTATS] =
2257 				    {"ctrl", "isoch", "bulk", "intr"};
2258 	uint_t			instance = uhcip->uhci_instance;
2259 	const char		*dname = ddi_driver_name(uhcip->uhci_dip);
2260 	uhci_intrs_stats_t	*isp;
2261 
2262 	if (UHCI_INTRS_STATS(uhcip) == NULL) {
2263 		(void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,intrs",
2264 		    dname, instance);
2265 		UHCI_INTRS_STATS(uhcip) = kstat_create("usba", instance,
2266 		    kstatname, "usb_interrupts", KSTAT_TYPE_NAMED,
2267 		    sizeof (uhci_intrs_stats_t) / sizeof (kstat_named_t),
2268 		    KSTAT_FLAG_PERSISTENT);
2269 
2270 		if (UHCI_INTRS_STATS(uhcip) != NULL) {
2271 			isp = UHCI_INTRS_STATS_DATA(uhcip);
2272 			kstat_named_init(&isp->uhci_intrs_hc_halted,
2273 			    "HC Halted", KSTAT_DATA_UINT64);
2274 			kstat_named_init(&isp->uhci_intrs_hc_process_err,
2275 			    "HC Process Errors", KSTAT_DATA_UINT64);
2276 			kstat_named_init(&isp->uhci_intrs_host_sys_err,
2277 			    "Host Sys Errors", KSTAT_DATA_UINT64);
2278 			kstat_named_init(&isp->uhci_intrs_resume_detected,
2279 			    "Resume Detected", KSTAT_DATA_UINT64);
2280 			kstat_named_init(&isp->uhci_intrs_usb_err_intr,
2281 			    "USB Error", KSTAT_DATA_UINT64);
2282 			kstat_named_init(&isp->uhci_intrs_usb_intr,
2283 			    "USB Interrupts", KSTAT_DATA_UINT64);
2284 			kstat_named_init(&isp->uhci_intrs_total,
2285 			    "Total Interrupts", KSTAT_DATA_UINT64);
2286 			kstat_named_init(&isp->uhci_intrs_not_claimed,
2287 			    "Not Claimed", KSTAT_DATA_UINT64);
2288 
2289 			UHCI_INTRS_STATS(uhcip)->ks_private = uhcip;
2290 			UHCI_INTRS_STATS(uhcip)->ks_update = nulldev;
2291 			kstat_install(UHCI_INTRS_STATS(uhcip));
2292 		}
2293 	}
2294 
2295 	if (UHCI_TOTAL_STATS(uhcip) == NULL) {
2296 		(void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,total",
2297 		    dname, instance);
2298 		UHCI_TOTAL_STATS(uhcip) = kstat_create("usba", instance,
2299 		    kstatname, "usb_byte_count", KSTAT_TYPE_IO, 1,
2300 		    KSTAT_FLAG_PERSISTENT);
2301 
2302 		if (UHCI_TOTAL_STATS(uhcip) != NULL) {
2303 			kstat_install(UHCI_TOTAL_STATS(uhcip));
2304 		}
2305 	}
2306 
2307 	for (i = 0; i < USB_N_COUNT_KSTATS; i++) {
2308 		if (uhcip->uhci_count_stats[i] == NULL) {
2309 			(void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,%s",
2310 			    dname, instance, usbtypes[i]);
2311 			uhcip->uhci_count_stats[i] = kstat_create("usba",
2312 			    instance, kstatname, "usb_byte_count",
2313 			    KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT);
2314 
2315 			if (uhcip->uhci_count_stats[i] != NULL) {
2316 				kstat_install(uhcip->uhci_count_stats[i]);
2317 			}
2318 		}
2319 	}
2320 }
2321 
2322 
2323 /*
2324  * uhci_destroy_stats:
2325  *	Clean up uhci kstat structures
2326  */
2327 void
2328 uhci_destroy_stats(uhci_state_t *uhcip)
2329 {
2330 	int i;
2331 
2332 	if (UHCI_INTRS_STATS(uhcip)) {
2333 		kstat_delete(UHCI_INTRS_STATS(uhcip));
2334 		UHCI_INTRS_STATS(uhcip) = NULL;
2335 	}
2336 
2337 	if (UHCI_TOTAL_STATS(uhcip)) {
2338 		kstat_delete(UHCI_TOTAL_STATS(uhcip));
2339 		UHCI_TOTAL_STATS(uhcip) = NULL;
2340 	}
2341 
2342 	for (i = 0; i < USB_N_COUNT_KSTATS; i++) {
2343 		if (uhcip->uhci_count_stats[i]) {
2344 			kstat_delete(uhcip->uhci_count_stats[i]);
2345 			uhcip->uhci_count_stats[i] = NULL;
2346 		}
2347 	}
2348 }
2349 
2350 
2351 void
2352 uhci_do_intrs_stats(uhci_state_t *uhcip, int val)
2353 {
2354 	if (UHCI_INTRS_STATS(uhcip) == NULL) {
2355 
2356 		return;
2357 	}
2358 
2359 	UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_total.value.ui64++;
2360 	switch (val) {
2361 	case USBSTS_REG_HC_HALTED:
2362 		UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_hc_halted.value.ui64++;
2363 		break;
2364 	case USBSTS_REG_HC_PROCESS_ERR:
2365 		UHCI_INTRS_STATS_DATA(uhcip)->
2366 			uhci_intrs_hc_process_err.value.ui64++;
2367 		break;
2368 	case USBSTS_REG_HOST_SYS_ERR:
2369 		UHCI_INTRS_STATS_DATA(uhcip)->
2370 			uhci_intrs_host_sys_err.value.ui64++;
2371 		break;
2372 	case USBSTS_REG_RESUME_DETECT:
2373 		UHCI_INTRS_STATS_DATA(uhcip)->
2374 			uhci_intrs_resume_detected.value.ui64++;
2375 		break;
2376 	case USBSTS_REG_USB_ERR_INTR:
2377 		UHCI_INTRS_STATS_DATA(uhcip)->
2378 			uhci_intrs_usb_err_intr.value.ui64++;
2379 		break;
2380 	case USBSTS_REG_USB_INTR:
2381 		UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_usb_intr.value.ui64++;
2382 		break;
2383 	default:
2384 		UHCI_INTRS_STATS_DATA(uhcip)->
2385 			uhci_intrs_not_claimed.value.ui64++;
2386 		break;
2387 	}
2388 }
2389 
2390 
2391 void
2392 uhci_do_byte_stats(uhci_state_t *uhcip, size_t len, uint8_t attr, uint8_t addr)
2393 {
2394 	uint8_t type = attr & USB_EP_ATTR_MASK;
2395 	uint8_t dir = addr & USB_EP_DIR_MASK;
2396 
2397 	switch (dir) {
2398 	case USB_EP_DIR_IN:
2399 		UHCI_TOTAL_STATS_DATA(uhcip)->reads++;
2400 		UHCI_TOTAL_STATS_DATA(uhcip)->nread += len;
2401 		switch (type) {
2402 		case USB_EP_ATTR_CONTROL:
2403 			UHCI_CTRL_STATS(uhcip)->reads++;
2404 			UHCI_CTRL_STATS(uhcip)->nread += len;
2405 			break;
2406 		case USB_EP_ATTR_BULK:
2407 			UHCI_BULK_STATS(uhcip)->reads++;
2408 			UHCI_BULK_STATS(uhcip)->nread += len;
2409 			break;
2410 		case USB_EP_ATTR_INTR:
2411 			UHCI_INTR_STATS(uhcip)->reads++;
2412 			UHCI_INTR_STATS(uhcip)->nread += len;
2413 			break;
2414 		case USB_EP_ATTR_ISOCH:
2415 			UHCI_ISOC_STATS(uhcip)->reads++;
2416 			UHCI_ISOC_STATS(uhcip)->nread += len;
2417 			break;
2418 		}
2419 		break;
2420 	case USB_EP_DIR_OUT:
2421 		UHCI_TOTAL_STATS_DATA(uhcip)->writes++;
2422 		UHCI_TOTAL_STATS_DATA(uhcip)->nwritten += len;
2423 		switch (type) {
2424 		case USB_EP_ATTR_CONTROL:
2425 			UHCI_CTRL_STATS(uhcip)->writes++;
2426 			UHCI_CTRL_STATS(uhcip)->nwritten += len;
2427 			break;
2428 		case USB_EP_ATTR_BULK:
2429 			UHCI_BULK_STATS(uhcip)->writes++;
2430 			UHCI_BULK_STATS(uhcip)->nwritten += len;
2431 			break;
2432 		case USB_EP_ATTR_INTR:
2433 			UHCI_INTR_STATS(uhcip)->writes++;
2434 			UHCI_INTR_STATS(uhcip)->nwritten += len;
2435 			break;
2436 		case USB_EP_ATTR_ISOCH:
2437 			UHCI_ISOC_STATS(uhcip)->writes++;
2438 			UHCI_ISOC_STATS(uhcip)->nwritten += len;
2439 			break;
2440 		}
2441 		break;
2442 	}
2443 }
2444 
2445 
2446 /*
2447  * uhci_free_tw:
2448  *	Free the Transfer Wrapper (TW).
2449  */
2450 void
2451 uhci_free_tw(uhci_state_t *uhcip, uhci_trans_wrapper_t *tw)
2452 {
2453 	int rval, i;
2454 
2455 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, "uhci_free_tw:");
2456 
2457 	ASSERT(tw != NULL);
2458 
2459 	if (tw->tw_isoc_strtlen > 0) {
2460 		ASSERT(tw->tw_isoc_bufs != NULL);
2461 		for (i = 0; i < tw->tw_ncookies; i++) {
2462 			rval = ddi_dma_unbind_handle(
2463 			    tw->tw_isoc_bufs[i].dma_handle);
2464 			ASSERT(rval == USB_SUCCESS);
2465 			ddi_dma_mem_free(&tw->tw_isoc_bufs[i].mem_handle);
2466 			ddi_dma_free_handle(&tw->tw_isoc_bufs[i].dma_handle);
2467 		}
2468 		kmem_free(tw->tw_isoc_bufs, tw->tw_isoc_strtlen);
2469 	} else if (tw->tw_dmahandle != NULL) {
2470 		rval = ddi_dma_unbind_handle(tw->tw_dmahandle);
2471 		ASSERT(rval == DDI_SUCCESS);
2472 
2473 		ddi_dma_mem_free(&tw->tw_accesshandle);
2474 		ddi_dma_free_handle(&tw->tw_dmahandle);
2475 	}
2476 
2477 	kmem_free(tw, sizeof (uhci_trans_wrapper_t));
2478 }
2479 
2480 
2481 /*
2482  * uhci_deallocate_tw:
2483  *	Deallocate of a Transaction Wrapper (TW) and this involves
2484  *	the freeing of DMA resources.
2485  */
2486 void
2487 uhci_deallocate_tw(uhci_state_t *uhcip,
2488     uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw)
2489 {
2490 	uhci_trans_wrapper_t	*head;
2491 
2492 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
2493 	    "uhci_deallocate_tw:");
2494 
2495 	/*
2496 	 * If the transfer wrapper has no Host Controller (HC)
2497 	 * Transfer Descriptors (TD) associated with it,  then
2498 	 * remove the transfer wrapper. The transfers are done
2499 	 * in FIFO order, so this should be the first transfer
2500 	 * wrapper on the list.
2501 	 */
2502 	if (tw->tw_hctd_head != NULL) {
2503 		ASSERT(tw->tw_hctd_tail != NULL);
2504 
2505 		return;
2506 	}
2507 
2508 	ASSERT(tw->tw_hctd_tail == NULL);
2509 	ASSERT(pp->pp_tw_head != NULL);
2510 
2511 	/*
2512 	 * If pp->pp_tw_head is NULL, set the tail also to NULL.
2513 	 */
2514 	head = pp->pp_tw_head;
2515 
2516 	if (head == tw) {
2517 		pp->pp_tw_head = head->tw_next;
2518 		if (pp->pp_tw_head == NULL) {
2519 			pp->pp_tw_tail = NULL;
2520 		}
2521 	} else {
2522 		while (head->tw_next != tw)
2523 			head = head->tw_next;
2524 		head->tw_next = tw->tw_next;
2525 		if (tw->tw_next == NULL) {
2526 			pp->pp_tw_tail = head;
2527 		}
2528 	}
2529 	uhci_free_tw(uhcip, tw);
2530 }
2531 
2532 
2533 void
2534 uhci_delete_td(uhci_state_t *uhcip, uhci_td_t *td)
2535 {
2536 	uhci_td_t		*tmp_td;
2537 	uhci_trans_wrapper_t	*tw = td->tw;
2538 
2539 	if ((td->outst_td_next == NULL) && (td->outst_td_prev == NULL)) {
2540 		uhcip->uhci_outst_tds_head = NULL;
2541 		uhcip->uhci_outst_tds_tail = NULL;
2542 	} else if (td->outst_td_next == NULL) {
2543 		td->outst_td_prev->outst_td_next = NULL;
2544 		uhcip->uhci_outst_tds_tail = td->outst_td_prev;
2545 	} else if (td->outst_td_prev == NULL) {
2546 		td->outst_td_next->outst_td_prev = NULL;
2547 		uhcip->uhci_outst_tds_head = td->outst_td_next;
2548 	} else {
2549 		td->outst_td_prev->outst_td_next = td->outst_td_next;
2550 		td->outst_td_next->outst_td_prev = td->outst_td_prev;
2551 	}
2552 
2553 	tmp_td = tw->tw_hctd_head;
2554 
2555 	if (tmp_td != td) {
2556 		while (tmp_td->tw_td_next != td) {
2557 			tmp_td = tmp_td->tw_td_next;
2558 		}
2559 		ASSERT(tmp_td);
2560 		tmp_td->tw_td_next = td->tw_td_next;
2561 		if (td->tw_td_next == NULL) {
2562 			tw->tw_hctd_tail = tmp_td;
2563 		}
2564 	} else {
2565 		tw->tw_hctd_head = tw->tw_hctd_head->tw_td_next;
2566 		if (tw->tw_hctd_head == NULL) {
2567 			tw->tw_hctd_tail = NULL;
2568 		}
2569 	}
2570 
2571 	td->flag  = TD_FLAG_FREE;
2572 }
2573 
2574 
2575 void
2576 uhci_remove_tds_tws(
2577 	uhci_state_t		*uhcip,
2578 	usba_pipe_handle_data_t	*ph)
2579 {
2580 	usb_opaque_t		curr_reqp;
2581 	uhci_pipe_private_t	*pp = (uhci_pipe_private_t *)ph->p_hcd_private;
2582 	usb_ep_descr_t		*ept = &pp->pp_pipe_handle->p_ep;
2583 	uhci_trans_wrapper_t	*tw_tmp;
2584 	uhci_trans_wrapper_t	*tw_head = pp->pp_tw_head;
2585 
2586 	while (tw_head != NULL) {
2587 		tw_tmp = tw_head;
2588 		tw_head = tw_head->tw_next;
2589 
2590 		curr_reqp = tw_tmp->tw_curr_xfer_reqp;
2591 		if (curr_reqp) {
2592 			/* do this for control/bulk/intr */
2593 			if ((tw_tmp->tw_direction == PID_IN) &&
2594 			    (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_INTR)) {
2595 				uhci_deallocate_periodic_in_resource(uhcip,
2596 				    pp, tw_tmp);
2597 			} else {
2598 				uhci_hcdi_callback(uhcip, pp,
2599 				    pp->pp_pipe_handle, tw_tmp, USB_CR_FLUSHED);
2600 			}
2601 		} /* end of curr_reqp */
2602 
2603 		if (tw_tmp->tw_claim != UHCI_MODIFY_TD_BITS_CLAIMED) {
2604 			continue;
2605 		}
2606 
2607 		while (tw_tmp->tw_hctd_head != NULL) {
2608 			uhci_delete_td(uhcip, tw_tmp->tw_hctd_head);
2609 		}
2610 
2611 		uhci_deallocate_tw(uhcip, pp, tw_tmp);
2612 	}
2613 }
2614 
2615 
2616 /*
2617  * uhci_remove_qh:
2618  *	Remove the Queue Head from the Host Controller's
2619  *	appropriate QH list.
2620  */
2621 void
2622 uhci_remove_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp)
2623 {
2624 	uhci_td_t	*dummy_td;
2625 
2626 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
2627 
2628 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2629 	    "uhci_remove_qh:");
2630 
2631 	dummy_td = pp->pp_qh->td_tailp;
2632 	dummy_td->flag = TD_FLAG_FREE;
2633 
2634 	switch (UHCI_XFER_TYPE(&pp->pp_pipe_handle->p_ep)) {
2635 	case USB_EP_ATTR_CONTROL:
2636 		uhci_remove_ctrl_qh(uhcip, pp);
2637 		break;
2638 	case USB_EP_ATTR_BULK:
2639 		uhci_remove_bulk_qh(uhcip, pp);
2640 		break;
2641 	case USB_EP_ATTR_INTR:
2642 		uhci_remove_intr_qh(uhcip, pp);
2643 		break;
2644 	}
2645 }
2646 
2647 
2648 static void
2649 uhci_remove_intr_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp)
2650 {
2651 	queue_head_t   *qh = pp->pp_qh;
2652 	queue_head_t   *next_lattice_qh =
2653 		    QH_VADDR(GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK);
2654 
2655 	qh->prev_qh->link_ptr	 = qh->link_ptr;
2656 	next_lattice_qh->prev_qh = qh->prev_qh;
2657 	qh->qh_flag = QUEUE_HEAD_FLAG_FREE;
2658 
2659 }
2660 
2661 /*
2662  * uhci_remove_bulk_qh:
2663  *	Remove a bulk QH from the Host Controller's QH list. There may be a
2664  *	loop for bulk QHs, we must care about this while removing a bulk QH.
2665  */
2666 static void
2667 uhci_remove_bulk_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp)
2668 {
2669 	queue_head_t   *qh = pp->pp_qh;
2670 	queue_head_t   *next_lattice_qh;
2671 	uint32_t	paddr;
2672 
2673 	paddr = (GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK);
2674 	next_lattice_qh = (qh == uhcip->uhci_bulk_xfers_q_tail) ?
2675 	    0 : QH_VADDR(paddr);
2676 
2677 	if ((qh == uhcip->uhci_bulk_xfers_q_tail) &&
2678 	    (qh->prev_qh == uhcip->uhci_bulk_xfers_q_head)) {
2679 		SetQH32(uhcip, qh->prev_qh->link_ptr, HC_END_OF_LIST);
2680 	} else {
2681 		qh->prev_qh->link_ptr = qh->link_ptr;
2682 	}
2683 
2684 	if (next_lattice_qh == NULL) {
2685 		uhcip->uhci_bulk_xfers_q_tail = qh->prev_qh;
2686 	} else {
2687 		next_lattice_qh->prev_qh = qh->prev_qh;
2688 	}
2689 
2690 	qh->qh_flag = QUEUE_HEAD_FLAG_FREE;
2691 
2692 }
2693 
2694 
2695 static void
2696 uhci_remove_ctrl_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp)
2697 {
2698 	queue_head_t   *qh = pp->pp_qh;
2699 	queue_head_t   *next_lattice_qh =
2700 	    QH_VADDR(GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK);
2701 
2702 	qh->prev_qh->link_ptr = qh->link_ptr;
2703 	if (next_lattice_qh->prev_qh != NULL) {
2704 		next_lattice_qh->prev_qh = qh->prev_qh;
2705 	} else {
2706 		uhcip->uhci_ctrl_xfers_q_tail = qh->prev_qh;
2707 	}
2708 
2709 	qh->qh_flag = QUEUE_HEAD_FLAG_FREE;
2710 }
2711 
2712 
2713 /*
2714  * uhci_allocate_td_from_pool:
2715  *	Allocate a Transfer Descriptor (TD) from the TD buffer pool.
2716  */
2717 static uhci_td_t *
2718 uhci_allocate_td_from_pool(uhci_state_t *uhcip)
2719 {
2720 	int		index;
2721 	uhci_td_t	*td;
2722 
2723 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
2724 
2725 	/*
2726 	 * Search for a blank Transfer Descriptor (TD)
2727 	 * in the TD buffer pool.
2728 	 */
2729 	for (index = 0; index < uhci_td_pool_size; index ++) {
2730 		if (uhcip->uhci_td_pool_addr[index].flag == TD_FLAG_FREE) {
2731 			break;
2732 		}
2733 	}
2734 
2735 	if (index == uhci_td_pool_size) {
2736 		USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
2737 		    "uhci_allocate_td_from_pool: TD exhausted");
2738 
2739 		return (NULL);
2740 	}
2741 
2742 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
2743 	    "uhci_allocate_td_from_pool: Allocated %d", index);
2744 
2745 	/* Create a new dummy for the end of the TD list */
2746 	td = &uhcip->uhci_td_pool_addr[index];
2747 
2748 	/* Mark the newly allocated TD as a dummy */
2749 	td->flag =  TD_FLAG_DUMMY;
2750 	td->qh_td_prev	=  NULL;
2751 
2752 	return (td);
2753 }
2754 
2755 
2756 /*
2757  * uhci_insert_bulk_td:
2758  */
2759 int
2760 uhci_insert_bulk_td(
2761 	uhci_state_t		*uhcip,
2762 	usba_pipe_handle_data_t	*ph,
2763 	usb_bulk_req_t		*req,
2764 	usb_flags_t		flags)
2765 {
2766 	size_t			length;
2767 	uint_t			mps;	/* MaxPacketSize */
2768 	uint_t			num_bulk_tds, i, j;
2769 	uint32_t		buf_offs;
2770 	uhci_td_t		*bulk_td_ptr;
2771 	uhci_td_t		*current_dummy, *tmp_td;
2772 	uhci_pipe_private_t	*pp = (uhci_pipe_private_t *)ph->p_hcd_private;
2773 	uhci_trans_wrapper_t	*tw;
2774 	uhci_bulk_isoc_xfer_t	*bulk_xfer_info;
2775 	uhci_bulk_isoc_td_pool_t *td_pool_ptr;
2776 
2777 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2778 	    "uhci_insert_bulk_td: req: 0x%p, flags = 0x%x", req, flags);
2779 
2780 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
2781 
2782 	/*
2783 	 * Create transfer wrapper
2784 	 */
2785 	if ((tw = uhci_create_transfer_wrapper(uhcip, pp, req->bulk_len,
2786 	    flags)) == NULL) {
2787 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2788 		    "uhci_insert_bulk_td: TW allocation failed");
2789 
2790 		return (USB_NO_RESOURCES);
2791 	}
2792 
2793 	tw->tw_bytes_xfered		= 0;
2794 	tw->tw_bytes_pending		= req->bulk_len;
2795 	tw->tw_handle_td		= uhci_handle_bulk_td;
2796 	tw->tw_handle_callback_value	= (usb_opaque_t)req->bulk_data;
2797 	tw->tw_timeout_cnt		= req->bulk_timeout;
2798 	tw->tw_data			= req->bulk_data;
2799 	tw->tw_curr_xfer_reqp		= (usb_opaque_t)req;
2800 
2801 	/* Get the bulk pipe direction */
2802 	tw->tw_direction = (UHCI_XFER_DIR(&ph->p_ep) == USB_EP_DIR_OUT) ?
2803 							PID_OUT : PID_IN;
2804 
2805 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2806 	    "uhci_insert_bulk_td: direction: 0x%x", tw->tw_direction);
2807 
2808 	/* If the DATA OUT, copy the data into transfer buffer. */
2809 	if (tw->tw_direction == PID_OUT) {
2810 		ASSERT(req->bulk_data != NULL);
2811 
2812 		/* Copy the data into the message */
2813 		ddi_rep_put8(tw->tw_accesshandle, req->bulk_data->b_rptr,
2814 		    (uint8_t *)tw->tw_buf, req->bulk_len, DDI_DEV_AUTOINCR);
2815 	}
2816 
2817 	/* Get the max packet size.  */
2818 	length = mps = pp->pp_pipe_handle->p_ep.wMaxPacketSize;
2819 
2820 	/*
2821 	 * Calculate number of TD's to insert in the current frame interval.
2822 	 * Max number TD's allowed (driver implementation) is 128
2823 	 * in one frame interval. Once all the TD's are completed
2824 	 * then the remaining TD's will be inserted into the lattice
2825 	 * in the uhci_handle_bulk_td().
2826 	 */
2827 	if ((tw->tw_bytes_pending / mps) >= MAX_NUM_BULK_TDS_PER_XFER) {
2828 		num_bulk_tds = MAX_NUM_BULK_TDS_PER_XFER;
2829 	} else {
2830 		num_bulk_tds = (tw->tw_bytes_pending / mps);
2831 
2832 		if (tw->tw_bytes_pending % mps) {
2833 			num_bulk_tds++;
2834 			length = (tw->tw_bytes_pending % mps);
2835 		}
2836 	}
2837 
2838 	/*
2839 	 * Allocate memory for the bulk xfer information structure
2840 	 */
2841 	if ((bulk_xfer_info = kmem_zalloc(
2842 	    sizeof (uhci_bulk_isoc_xfer_t), KM_NOSLEEP)) == NULL) {
2843 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2844 		    "uhci_insert_bulk_td: kmem_zalloc failed");
2845 
2846 		/* Free the transfer wrapper */
2847 		uhci_deallocate_tw(uhcip, pp, tw);
2848 
2849 		return (USB_FAILURE);
2850 	}
2851 
2852 	/* Allocate memory for the bulk TD's */
2853 	if (uhci_alloc_bulk_isoc_tds(uhcip, num_bulk_tds, bulk_xfer_info) !=
2854 	    USB_SUCCESS) {
2855 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2856 		    "uhci_insert_bulk_td: alloc_bulk_isoc_tds failed");
2857 
2858 		kmem_free(bulk_xfer_info, sizeof (uhci_bulk_isoc_xfer_t));
2859 
2860 		/* Free the transfer wrapper */
2861 		uhci_deallocate_tw(uhcip, pp, tw);
2862 
2863 		return (USB_FAILURE);
2864 	}
2865 
2866 	td_pool_ptr = &bulk_xfer_info->td_pools[0];
2867 	bulk_td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr;
2868 	bulk_td_ptr[0].qh_td_prev = NULL;
2869 	current_dummy = pp->pp_qh->td_tailp;
2870 	buf_offs = 0;
2871 	pp->pp_qh->bulk_xfer_info = bulk_xfer_info;
2872 
2873 	/* Fill up all the bulk TD's */
2874 	for (i = 0; i < bulk_xfer_info->num_pools; i++) {
2875 		for (j = 0; j < (td_pool_ptr->num_tds - 1); j++) {
2876 			uhci_fill_in_bulk_isoc_td(uhcip, &bulk_td_ptr[j],
2877 			    &bulk_td_ptr[j+1], BULKTD_PADDR(td_pool_ptr,
2878 			    &bulk_td_ptr[j+1]), ph, buf_offs, mps, tw);
2879 			buf_offs += mps;
2880 		}
2881 
2882 		/* fill in the last TD */
2883 		if (i == (bulk_xfer_info->num_pools - 1)) {
2884 			uhci_fill_in_bulk_isoc_td(uhcip, &bulk_td_ptr[j],
2885 			    current_dummy, TD_PADDR(current_dummy),
2886 			    ph, buf_offs, length, tw);
2887 		} else {
2888 			/* fill in the TD at the tail of a pool */
2889 			tmp_td = &bulk_td_ptr[j];
2890 			td_pool_ptr = &bulk_xfer_info->td_pools[i + 1];
2891 			bulk_td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr;
2892 			uhci_fill_in_bulk_isoc_td(uhcip, tmp_td,
2893 			    &bulk_td_ptr[0], BULKTD_PADDR(td_pool_ptr,
2894 			    &bulk_td_ptr[0]), ph, buf_offs, mps, tw);
2895 			buf_offs += mps;
2896 		}
2897 	}
2898 
2899 	bulk_xfer_info->num_tds	= num_bulk_tds;
2900 
2901 	/*
2902 	 * Point the end of the lattice tree to the start of the bulk xfers
2903 	 * queue head. This allows the HC to execute the same Queue Head/TD
2904 	 * in the same frame. There are some bulk devices, which NAKs after
2905 	 * completing each TD. As a result, the performance on such devices
2906 	 * is very bad.  This loop will  provide a chance to execute NAk'ed
2907 	 * bulk TDs again in the same frame.
2908 	 */
2909 	if (uhcip->uhci_pending_bulk_cmds++ == 0) {
2910 		uhcip->uhci_bulk_xfers_q_tail->link_ptr =
2911 			uhcip->uhci_bulk_xfers_q_head->link_ptr;
2912 		USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
2913 		    "uhci_insert_bulk_td: count = %d no tds  %d",
2914 		    uhcip->uhci_pending_bulk_cmds, num_bulk_tds);
2915 	}
2916 
2917 	/* Insert on the bulk queue head for the execution by HC */
2918 	SetQH32(uhcip, pp->pp_qh->element_ptr,
2919 	    bulk_xfer_info->td_pools[0].cookie.dmac_address);
2920 
2921 	return (USB_SUCCESS);
2922 }
2923 
2924 
2925 /*
2926  * uhci_fill_in_bulk_isoc_td
2927  *     Fills the bulk/isoc TD
2928  *
2929  * offset - different meanings for bulk and isoc TDs:
2930  *          starting offset into the TW buffer for a bulk TD
2931  *          and the index into the isoc packet list for an isoc TD
2932  */
2933 void
2934 uhci_fill_in_bulk_isoc_td(uhci_state_t *uhcip, uhci_td_t *current_td,
2935 	uhci_td_t		*next_td,
2936 	uint32_t		next_td_paddr,
2937 	usba_pipe_handle_data_t	*ph,
2938 	uint_t			offset,
2939 	uint_t			length,
2940 	uhci_trans_wrapper_t	*tw)
2941 {
2942 	uhci_pipe_private_t	*pp = (uhci_pipe_private_t *)ph->p_hcd_private;
2943 	usb_ep_descr_t		*ept = &pp->pp_pipe_handle->p_ep;
2944 	uint32_t		buf_addr;
2945 
2946 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2947 	    "uhci_fill_in_bulk_isoc_td: tw 0x%p offs 0x%x length 0x%x",
2948 	    tw, offset, length);
2949 
2950 	bzero((char *)current_td, sizeof (uhci_td_t));
2951 	SetTD32(uhcip, current_td->link_ptr, next_td_paddr | HC_DEPTH_FIRST);
2952 
2953 	switch (UHCI_XFER_TYPE(ept)) {
2954 	case USB_EP_ATTR_ISOCH:
2955 		if (((usb_isoc_req_t *)tw->tw_curr_xfer_reqp)->isoc_attributes
2956 		    & USB_ATTRS_SHORT_XFER_OK) {
2957 			SetTD_spd(uhcip, current_td, 1);
2958 		}
2959 		break;
2960 	case USB_EP_ATTR_BULK:
2961 		if (((usb_bulk_req_t *)tw->tw_curr_xfer_reqp)->bulk_attributes
2962 		    & USB_ATTRS_SHORT_XFER_OK) {
2963 			SetTD_spd(uhcip, current_td, 1);
2964 		}
2965 		break;
2966 	}
2967 
2968 	mutex_enter(&ph->p_usba_device->usb_mutex);
2969 
2970 	SetTD_c_err(uhcip, current_td, UHCI_MAX_ERR_COUNT);
2971 	SetTD_status(uhcip, current_td, UHCI_TD_ACTIVE);
2972 	SetTD_ioc(uhcip, current_td, INTERRUPT_ON_COMPLETION);
2973 	SetTD_mlen(uhcip, current_td, (length - 1));
2974 	SetTD_dtogg(uhcip, current_td, pp->pp_data_toggle);
2975 	SetTD_devaddr(uhcip, current_td, ph->p_usba_device->usb_addr);
2976 	SetTD_endpt(uhcip, current_td, ph->p_ep.bEndpointAddress &
2977 							END_POINT_ADDRESS_MASK);
2978 	SetTD_PID(uhcip, current_td, tw->tw_direction);
2979 
2980 	/* Get the right buffer address for the current TD */
2981 	switch (UHCI_XFER_TYPE(ept)) {
2982 	case USB_EP_ATTR_ISOCH:
2983 		buf_addr = tw->tw_isoc_bufs[offset].cookie.dmac_address;
2984 		break;
2985 	case USB_EP_ATTR_BULK:
2986 		buf_addr = uhci_get_tw_paddr_by_offs(uhcip, offset,
2987 		    length, tw);
2988 		break;
2989 	}
2990 	SetTD32(uhcip, current_td->buffer_address, buf_addr);
2991 
2992 	/*
2993 	 * Adjust the data toggle.
2994 	 * The data toggle bit must always be 0 for isoc transfers.
2995 	 * And set the "iso" bit in the TD for isoc transfers.
2996 	 */
2997 	if (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_ISOCH) {
2998 		pp->pp_data_toggle = 0;
2999 		SetTD_iso(uhcip, current_td, 1);
3000 	} else {
3001 		ADJ_DATA_TOGGLE(pp);
3002 		next_td->qh_td_prev = current_td;
3003 		pp->pp_qh->td_tailp = next_td;
3004 	}
3005 
3006 	current_td->outst_td_next = NULL;
3007 	current_td->outst_td_prev = uhcip->uhci_outst_tds_tail;
3008 	if (uhcip->uhci_outst_tds_head == NULL) {
3009 		uhcip->uhci_outst_tds_head = current_td;
3010 	} else {
3011 		uhcip->uhci_outst_tds_tail->outst_td_next = current_td;
3012 	}
3013 	uhcip->uhci_outst_tds_tail = current_td;
3014 	current_td->tw = tw;
3015 
3016 	if (tw->tw_hctd_head == NULL) {
3017 		ASSERT(tw->tw_hctd_tail == NULL);
3018 		tw->tw_hctd_head = current_td;
3019 		tw->tw_hctd_tail = current_td;
3020 	} else {
3021 		/* Add the td to the end of the list */
3022 		tw->tw_hctd_tail->tw_td_next = current_td;
3023 		tw->tw_hctd_tail = current_td;
3024 	}
3025 
3026 	mutex_exit(&ph->p_usba_device->usb_mutex);
3027 }
3028 
3029 
3030 /*
3031  * uhci_alloc_bulk_isoc_tds:
3032  *	- Allocates the isoc/bulk TD pools. It will allocate one whole
3033  *	  pool to store all the TDs if the system allows. Only when the
3034  *	  first allocation fails, it tries to allocate several small
3035  *	  pools with each pool limited in physical page size.
3036  */
3037 static int
3038 uhci_alloc_bulk_isoc_tds(
3039 	uhci_state_t		*uhcip,
3040 	uint_t			num_tds,
3041 	uhci_bulk_isoc_xfer_t	*info)
3042 {
3043 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3044 	    "uhci_alloc_bulk_isoc_tds: num_tds: 0x%x info: 0x%p",
3045 	    num_tds, info);
3046 
3047 	info->num_pools = 1;
3048 	/* allocate as a whole pool at the first time */
3049 	if (uhci_alloc_memory_for_tds(uhcip, num_tds, info) !=
3050 	    USB_SUCCESS) {
3051 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3052 		    "alloc_memory_for_tds failed: num_tds %d num_pools %d",
3053 		    num_tds, info->num_pools);
3054 
3055 		/* reduce the td number per pool and alloc again */
3056 		info->num_pools = num_tds / UHCI_MAX_TD_NUM_PER_POOL;
3057 		if (num_tds % UHCI_MAX_TD_NUM_PER_POOL) {
3058 			info->num_pools++;
3059 		}
3060 
3061 		if (uhci_alloc_memory_for_tds(uhcip, num_tds, info) !=
3062 		    USB_SUCCESS) {
3063 			USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3064 			    "alloc_memory_for_tds failed: num_tds %d "
3065 			    "num_pools %d", num_tds, info->num_pools);
3066 
3067 			return (USB_NO_RESOURCES);
3068 		}
3069 	}
3070 
3071 	return (USB_SUCCESS);
3072 }
3073 
3074 
3075 /*
3076  * uhci_alloc_memory_for_tds:
3077  *	- Allocates memory for the isoc/bulk td pools.
3078  */
3079 static int
3080 uhci_alloc_memory_for_tds(
3081 	uhci_state_t		*uhcip,
3082 	uint_t			num_tds,
3083 	uhci_bulk_isoc_xfer_t	*info)
3084 {
3085 	int			result, i, j, err;
3086 	size_t			real_length;
3087 	uint_t			ccount, num;
3088 	ddi_device_acc_attr_t	dev_attr;
3089 	uhci_bulk_isoc_td_pool_t *td_pool_ptr1, *td_pool_ptr2;
3090 
3091 	USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
3092 	    "uhci_alloc_memory_for_tds: num_tds: 0x%x info: 0x%p "
3093 	    "num_pools: %u", num_tds, info, info->num_pools);
3094 
3095 	/* The host controller will be little endian */
3096 	dev_attr.devacc_attr_version		= DDI_DEVICE_ATTR_V0;
3097 	dev_attr.devacc_attr_endian_flags	= DDI_STRUCTURE_LE_ACC;
3098 	dev_attr.devacc_attr_dataorder		= DDI_STRICTORDER_ACC;
3099 
3100 	/* Allocate the TD pool structures */
3101 	if ((info->td_pools = kmem_zalloc(
3102 	    (sizeof (uhci_bulk_isoc_td_pool_t) * info->num_pools),
3103 	    KM_SLEEP)) == NULL) {
3104 		USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
3105 		    "uhci_alloc_memory_for_tds: alloc td_pools failed");
3106 
3107 		return (USB_FAILURE);
3108 	}
3109 
3110 	for (i = 0; i < info->num_pools; i++) {
3111 		if (info->num_pools == 1) {
3112 			num = num_tds;
3113 		} else if (i < (info->num_pools - 1)) {
3114 			num = UHCI_MAX_TD_NUM_PER_POOL;
3115 		} else {
3116 			num = (num_tds % UHCI_MAX_TD_NUM_PER_POOL);
3117 		}
3118 
3119 		td_pool_ptr1 = &info->td_pools[i];
3120 
3121 		/* Allocate the bulk TD pool DMA handle */
3122 		if (ddi_dma_alloc_handle(uhcip->uhci_dip,
3123 		    &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 0,
3124 		    &td_pool_ptr1->dma_handle) != DDI_SUCCESS) {
3125 
3126 			for (j = 0; j < i; j++) {
3127 				td_pool_ptr2 = &info->td_pools[j];
3128 				result = ddi_dma_unbind_handle(
3129 				    td_pool_ptr2->dma_handle);
3130 				ASSERT(result == DDI_SUCCESS);
3131 				ddi_dma_mem_free(&td_pool_ptr2->mem_handle);
3132 				ddi_dma_free_handle(&td_pool_ptr2->dma_handle);
3133 			}
3134 
3135 			kmem_free(info->td_pools,
3136 			    (sizeof (uhci_bulk_isoc_td_pool_t) *
3137 			    info->num_pools));
3138 
3139 			return (USB_FAILURE);
3140 		}
3141 
3142 		/* Allocate the memory for the bulk TD pool */
3143 		if (ddi_dma_mem_alloc(td_pool_ptr1->dma_handle,
3144 		    num * sizeof (uhci_td_t), &dev_attr,
3145 		    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
3146 		    &td_pool_ptr1->pool_addr, &real_length,
3147 		    &td_pool_ptr1->mem_handle) != DDI_SUCCESS) {
3148 
3149 			ddi_dma_free_handle(&td_pool_ptr1->dma_handle);
3150 
3151 			for (j = 0; j < i; j++) {
3152 				td_pool_ptr2 = &info->td_pools[j];
3153 				result = ddi_dma_unbind_handle(
3154 				    td_pool_ptr2->dma_handle);
3155 				ASSERT(result == DDI_SUCCESS);
3156 				ddi_dma_mem_free(&td_pool_ptr2->mem_handle);
3157 				ddi_dma_free_handle(&td_pool_ptr2->dma_handle);
3158 			}
3159 
3160 			kmem_free(info->td_pools,
3161 			    (sizeof (uhci_bulk_isoc_td_pool_t) *
3162 			    info->num_pools));
3163 
3164 			return (USB_FAILURE);
3165 		}
3166 
3167 		/* Map the bulk TD pool into the I/O address space */
3168 		result = ddi_dma_addr_bind_handle(td_pool_ptr1->dma_handle,
3169 		    NULL, (caddr_t)td_pool_ptr1->pool_addr, real_length,
3170 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
3171 		    &td_pool_ptr1->cookie, &ccount);
3172 
3173 		/* Process the result */
3174 		err = USB_SUCCESS;
3175 
3176 		if (result != DDI_DMA_MAPPED) {
3177 			USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
3178 			    "uhci_allocate_memory_for_tds: Result = %d",
3179 			    result);
3180 			uhci_decode_ddi_dma_addr_bind_handle_result(uhcip,
3181 			    result);
3182 
3183 			err = USB_FAILURE;
3184 		}
3185 
3186 		if ((result == DDI_DMA_MAPPED) && (ccount != 1)) {
3187 			/* The cookie count should be 1 */
3188 			USB_DPRINTF_L2(PRINT_MASK_ATTA,
3189 			    uhcip->uhci_log_hdl,
3190 			    "uhci_allocate_memory_for_tds: "
3191 			    "More than 1 cookie");
3192 
3193 			result = ddi_dma_unbind_handle(
3194 			    td_pool_ptr1->dma_handle);
3195 			ASSERT(result == DDI_SUCCESS);
3196 
3197 			err = USB_FAILURE;
3198 		}
3199 
3200 		if (err == USB_FAILURE) {
3201 
3202 			ddi_dma_mem_free(&td_pool_ptr1->mem_handle);
3203 			ddi_dma_free_handle(&td_pool_ptr1->dma_handle);
3204 
3205 			for (j = 0; j < i; j++) {
3206 				td_pool_ptr2 = &info->td_pools[j];
3207 				result = ddi_dma_unbind_handle(
3208 				    td_pool_ptr2->dma_handle);
3209 				ASSERT(result == DDI_SUCCESS);
3210 				ddi_dma_mem_free(&td_pool_ptr2->mem_handle);
3211 				ddi_dma_free_handle(&td_pool_ptr2->dma_handle);
3212 			}
3213 
3214 			kmem_free(info->td_pools,
3215 			    (sizeof (uhci_bulk_isoc_td_pool_t) *
3216 			    info->num_pools));
3217 
3218 			return (USB_FAILURE);
3219 		}
3220 
3221 		bzero((void *)td_pool_ptr1->pool_addr,
3222 		    num * sizeof (uhci_td_t));
3223 		td_pool_ptr1->num_tds = num;
3224 	}
3225 
3226 	return (USB_SUCCESS);
3227 }
3228 
3229 
3230 /*
3231  * uhci_handle_bulk_td:
3232  *
3233  *	Handles the completed bulk transfer descriptors
3234  */
3235 void
3236 uhci_handle_bulk_td(uhci_state_t *uhcip, uhci_td_t *td)
3237 {
3238 	uint_t			num_bulk_tds, index, td_count, j;
3239 	usb_cr_t		error;
3240 	uint_t			length, bytes_xfered;
3241 	ushort_t		MaxPacketSize;
3242 	uint32_t		buf_offs, paddr;
3243 	uhci_td_t		*bulk_td_ptr, *current_dummy, *td_head;
3244 	uhci_td_t		*tmp_td;
3245 	queue_head_t		*qh, *next_qh;
3246 	uhci_trans_wrapper_t	*tw = td->tw;
3247 	uhci_pipe_private_t	*pp = tw->tw_pipe_private;
3248 	uhci_bulk_isoc_xfer_t	*bulk_xfer_info;
3249 	uhci_bulk_isoc_td_pool_t *td_pool_ptr;
3250 	usba_pipe_handle_data_t	*ph;
3251 
3252 	USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
3253 	    "uhci_handle_bulk_td: td = 0x%p tw = 0x%p", td, tw);
3254 
3255 	/*
3256 	 * Update the tw_bytes_pending, and tw_bytes_xfered
3257 	 */
3258 	bytes_xfered = ZERO_LENGTH;
3259 
3260 	/*
3261 	 * Check whether there are any errors occurred in the xfer.
3262 	 * If so, update the data_toggle for the queue head and
3263 	 * return error to the upper layer.
3264 	 */
3265 	if (GetTD_status(uhcip, td) & TD_STATUS_MASK) {
3266 		uhci_handle_bulk_td_errors(uhcip, td);
3267 
3268 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3269 		    "uhci_handle_bulk_td: error; data toggle: 0x%x",
3270 		    pp->pp_data_toggle);
3271 
3272 		return;
3273 	}
3274 
3275 	/*
3276 	 * Update the tw_bytes_pending, and tw_bytes_xfered
3277 	 */
3278 	bytes_xfered = GetTD_alen(uhcip, td);
3279 	if (bytes_xfered != ZERO_LENGTH) {
3280 		tw->tw_bytes_pending -= (bytes_xfered + 1);
3281 		tw->tw_bytes_xfered  += (bytes_xfered + 1);
3282 	}
3283 
3284 	/*
3285 	 * Get Bulk pipe information and pipe handle
3286 	 */
3287 	bulk_xfer_info	= pp->pp_qh->bulk_xfer_info;
3288 	ph = tw->tw_pipe_private->pp_pipe_handle;
3289 
3290 	/*
3291 	 * Check whether data underrun occurred.
3292 	 * If so, complete the transfer
3293 	 * Update the data toggle bit
3294 	 */
3295 	if (bytes_xfered != GetTD_mlen(uhcip, td)) {
3296 		bulk_xfer_info->num_tds = 1;
3297 		USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
3298 		    "uhci_handle_bulk_td: Data underrun occured");
3299 
3300 		pp->pp_data_toggle = GetTD_dtogg(uhcip, td) == 0 ? 1 : 0;
3301 	}
3302 
3303 	/*
3304 	 * If the TD's in the current frame are completed, then check
3305 	 * whether we have any more bytes to xfer. If so, insert TD's.
3306 	 * If no more bytes needs to be transferred, then do callback to the
3307 	 * upper layer.
3308 	 * If the TD's in the current frame are not completed, then
3309 	 * just delete the TD from the linked lists.
3310 	 */
3311 	USB_DPRINTF_L3(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3312 	    "uhci_handle_bulk_td: completed TD data toggle: 0x%x",
3313 	    GetTD_dtogg(uhcip, td));
3314 
3315 	if (--bulk_xfer_info->num_tds == 0) {
3316 		uhci_delete_td(uhcip, td);
3317 
3318 		if ((tw->tw_bytes_pending) &&
3319 		    (GetTD_mlen(uhcip, td) - GetTD_alen(uhcip, td) == 0)) {
3320 
3321 			MaxPacketSize = pp->pp_pipe_handle->p_ep.wMaxPacketSize;
3322 			length = MaxPacketSize;
3323 
3324 			qh = pp->pp_qh;
3325 			paddr = GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK;
3326 			if (GetQH32(uhcip, qh->link_ptr) !=
3327 			    GetQH32(uhcip,
3328 				uhcip->uhci_bulk_xfers_q_head->link_ptr)) {
3329 				next_qh = QH_VADDR(paddr);
3330 				SetQH32(uhcip, qh->prev_qh->link_ptr,
3331 				    paddr|(0x2));
3332 				next_qh->prev_qh = qh->prev_qh;
3333 				SetQH32(uhcip, qh->link_ptr,
3334 				    GetQH32(uhcip,
3335 				    uhcip->uhci_bulk_xfers_q_head->link_ptr));
3336 				qh->prev_qh = uhcip->uhci_bulk_xfers_q_tail;
3337 				SetQH32(uhcip,
3338 				    uhcip->uhci_bulk_xfers_q_tail->link_ptr,
3339 				    QH_PADDR(qh) | 0x2);
3340 				uhcip->uhci_bulk_xfers_q_tail = qh;
3341 			}
3342 
3343 			if ((tw->tw_bytes_pending / MaxPacketSize) >=
3344 			    MAX_NUM_BULK_TDS_PER_XFER) {
3345 				num_bulk_tds = MAX_NUM_BULK_TDS_PER_XFER;
3346 			} else {
3347 				num_bulk_tds =
3348 					(tw->tw_bytes_pending / MaxPacketSize);
3349 				if (tw->tw_bytes_pending % MaxPacketSize) {
3350 					num_bulk_tds++;
3351 					length = (tw->tw_bytes_pending %
3352 							MaxPacketSize);
3353 				}
3354 			}
3355 
3356 			current_dummy = pp->pp_qh->td_tailp;
3357 			td_pool_ptr = &bulk_xfer_info->td_pools[0];
3358 			bulk_td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr;
3359 			buf_offs = tw->tw_bytes_xfered;
3360 			td_count = num_bulk_tds;
3361 			index = 0;
3362 
3363 			/* reuse the TDs to transfer more data */
3364 			while (td_count > 0) {
3365 				for (j = 0;
3366 				    (j < (td_pool_ptr->num_tds - 1)) &&
3367 				    (td_count > 1); j++, td_count--) {
3368 					uhci_fill_in_bulk_isoc_td(uhcip,
3369 					    &bulk_td_ptr[j], &bulk_td_ptr[j+1],
3370 					    BULKTD_PADDR(td_pool_ptr,
3371 					    &bulk_td_ptr[j+1]), ph, buf_offs,
3372 					    MaxPacketSize, tw);
3373 					buf_offs += MaxPacketSize;
3374 				}
3375 
3376 				if (td_count == 1) {
3377 					uhci_fill_in_bulk_isoc_td(uhcip,
3378 					    &bulk_td_ptr[j], current_dummy,
3379 					    TD_PADDR(current_dummy), ph,
3380 					    buf_offs, length, tw);
3381 
3382 					break;
3383 				} else {
3384 					tmp_td = &bulk_td_ptr[j];
3385 					ASSERT(index <
3386 					    (bulk_xfer_info->num_pools - 1));
3387 					td_pool_ptr = &bulk_xfer_info->
3388 					    td_pools[index + 1];
3389 					bulk_td_ptr = (uhci_td_t *)
3390 					    td_pool_ptr->pool_addr;
3391 					uhci_fill_in_bulk_isoc_td(uhcip,
3392 					    tmp_td, &bulk_td_ptr[0],
3393 					    BULKTD_PADDR(td_pool_ptr,
3394 					    &bulk_td_ptr[0]), ph, buf_offs,
3395 					    MaxPacketSize, tw);
3396 					buf_offs += MaxPacketSize;
3397 					td_count--;
3398 					index++;
3399 				}
3400 			}
3401 
3402 			pp->pp_qh->bulk_xfer_info = bulk_xfer_info;
3403 			bulk_xfer_info->num_tds	= num_bulk_tds;
3404 			SetQH32(uhcip, pp->pp_qh->element_ptr,
3405 			    bulk_xfer_info->td_pools[0].cookie.dmac_address);
3406 		} else {
3407 			usba_pipe_handle_data_t *usb_pp = pp->pp_pipe_handle;
3408 
3409 			pp->pp_qh->bulk_xfer_info = NULL;
3410 
3411 			if (tw->tw_bytes_pending) {
3412 				/* Update the element pointer */
3413 				SetQH32(uhcip, pp->pp_qh->element_ptr,
3414 						TD_PADDR(pp->pp_qh->td_tailp));
3415 
3416 				/* Remove all the tds */
3417 				td_head = tw->tw_hctd_head;
3418 				while (td_head != NULL) {
3419 					uhci_delete_td(uhcip, td_head);
3420 					td_head = tw->tw_hctd_head;
3421 				}
3422 			}
3423 
3424 			if (tw->tw_direction == PID_IN) {
3425 				usb_req_attrs_t	attrs = ((usb_bulk_req_t *)
3426 					tw->tw_curr_xfer_reqp)->bulk_attributes;
3427 
3428 				error = USB_CR_OK;
3429 
3430 				/* Data run occurred */
3431 				if (tw->tw_bytes_pending &&
3432 				    (!(attrs & USB_ATTRS_SHORT_XFER_OK))) {
3433 					error = USB_CR_DATA_UNDERRUN;
3434 				}
3435 
3436 				uhci_sendup_td_message(uhcip, error, tw);
3437 			} else {
3438 				uhci_do_byte_stats(uhcip, tw->tw_length,
3439 				    usb_pp->p_ep.bmAttributes,
3440 				    usb_pp->p_ep.bEndpointAddress);
3441 
3442 				/* Data underrun occurred */
3443 				if (tw->tw_bytes_pending) {
3444 
3445 					tw->tw_data->b_rptr +=
3446 						tw->tw_bytes_xfered;
3447 
3448 					USB_DPRINTF_L2(PRINT_MASK_ATTA,
3449 					    uhcip->uhci_log_hdl,
3450 					    "uhci_handle_bulk_td: "
3451 					    "data underrun occurred");
3452 
3453 					uhci_hcdi_callback(uhcip, pp,
3454 					    tw->tw_pipe_private->pp_pipe_handle,
3455 					    tw, USB_CR_DATA_UNDERRUN);
3456 				} else {
3457 					uhci_hcdi_callback(uhcip, pp,
3458 					    tw->tw_pipe_private->pp_pipe_handle,
3459 					    tw, USB_CR_OK);
3460 				}
3461 			} /* direction */
3462 
3463 			/* Deallocate DMA memory */
3464 			uhci_deallocate_tw(uhcip, pp, tw);
3465 			for (j = 0; j < bulk_xfer_info->num_pools; j++) {
3466 				td_pool_ptr = &bulk_xfer_info->td_pools[j];
3467 				(void) ddi_dma_unbind_handle(
3468 				    td_pool_ptr->dma_handle);
3469 				ddi_dma_mem_free(&td_pool_ptr->mem_handle);
3470 				ddi_dma_free_handle(&td_pool_ptr->dma_handle);
3471 			}
3472 			kmem_free(bulk_xfer_info->td_pools,
3473 			    (sizeof (uhci_bulk_isoc_td_pool_t) *
3474 			    bulk_xfer_info->num_pools));
3475 			kmem_free(bulk_xfer_info,
3476 			    sizeof (uhci_bulk_isoc_xfer_t));
3477 
3478 			/*
3479 			 * When there are no pending bulk commands, point the
3480 			 * end of the lattice tree to NULL. This will make sure
3481 			 * that the HC control does not loop anymore and PCI
3482 			 * bus is not affected.
3483 			 */
3484 			if (--uhcip->uhci_pending_bulk_cmds == 0) {
3485 				uhcip->uhci_bulk_xfers_q_tail->link_ptr =
3486 				    HC_END_OF_LIST;
3487 				USB_DPRINTF_L3(PRINT_MASK_ATTA,
3488 				    uhcip->uhci_log_hdl,
3489 				    "uhci_handle_bulk_td: count = %d",
3490 				    uhcip->uhci_pending_bulk_cmds);
3491 			}
3492 		}
3493 	} else {
3494 		uhci_delete_td(uhcip, td);
3495 	}
3496 }
3497 
3498 
3499 void
3500 uhci_handle_bulk_td_errors(uhci_state_t *uhcip, uhci_td_t *td)
3501 {
3502 	usb_cr_t		usb_err;
3503 	uint32_t		paddr_tail, element_ptr, paddr;
3504 	uhci_td_t		*next_td;
3505 	uhci_pipe_private_t	*pp;
3506 	uhci_trans_wrapper_t	*tw = td->tw;
3507 	usba_pipe_handle_data_t	*ph;
3508 	uhci_bulk_isoc_td_pool_t *td_pool_ptr = NULL;
3509 
3510 	USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
3511 	    "uhci_handle_bulk_td_errors: td = %p", (void *)td);
3512 
3513 #ifdef	DEBUG
3514 	uhci_print_td(uhcip, td);
3515 #endif
3516 
3517 	tw = td->tw;
3518 	ph = tw->tw_pipe_private->pp_pipe_handle;
3519 	pp = (uhci_pipe_private_t *)ph->p_hcd_private;
3520 
3521 	/*
3522 	 * Find the type of error occurred and return the error
3523 	 * to the upper layer. And adjust the data toggle.
3524 	 */
3525 	element_ptr = GetQH32(uhcip, pp->pp_qh->element_ptr) &
3526 	    QH_ELEMENT_PTR_MASK;
3527 	paddr_tail = TD_PADDR(pp->pp_qh->td_tailp);
3528 
3529 	/*
3530 	 * If a timeout occurs before a transfer has completed,
3531 	 * the timeout handler sets the CRC/Timeout bit and clears the Active
3532 	 * bit in the link_ptr for each td in the transfer.
3533 	 * It then waits (at least) 1 ms so that any tds the controller might
3534 	 * have been executing will have completed.
3535 	 * So at this point element_ptr will point to either:
3536 	 * 1) the next td for the transfer (which has not been executed,
3537 	 * and has the CRC/Timeout status bit set and Active bit cleared),
3538 	 * 2) the dummy td for this qh.
3539 	 * So if the element_ptr does not point to the dummy td, we know
3540 	 * it points to the next td that would have been executed.
3541 	 * That td has the data toggle we want to save.
3542 	 * All outstanding tds have been marked as CRC/Timeout,
3543 	 * so it doesn't matter which td we pass to uhci_parse_td_error
3544 	 * for the error status.
3545 	 */
3546 	if (element_ptr != paddr_tail) {
3547 		paddr = (element_ptr & QH_ELEMENT_PTR_MASK);
3548 		uhci_get_bulk_td_by_paddr(uhcip, pp->pp_qh->bulk_xfer_info,
3549 		    paddr, &td_pool_ptr);
3550 		next_td = BULKTD_VADDR(td_pool_ptr, paddr);
3551 		USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3552 		    "uhci_handle_bulk_td_errors: next td = %p",
3553 		    (void *)next_td);
3554 
3555 		usb_err = uhci_parse_td_error(uhcip, pp, next_td);
3556 	} else {
3557 		usb_err = uhci_parse_td_error(uhcip, pp, td);
3558 	}
3559 
3560 	/*
3561 	 * Update the link pointer.
3562 	 */
3563 	SetQH32(uhcip, pp->pp_qh->element_ptr, TD_PADDR(pp->pp_qh->td_tailp));
3564 
3565 	/*
3566 	 * Send up number of bytes transferred before the error condition.
3567 	 */
3568 	if ((tw->tw_direction == PID_OUT) && tw->tw_data) {
3569 		tw->tw_data->b_rptr += tw->tw_bytes_xfered;
3570 	}
3571 
3572 	uhci_remove_bulk_tds_tws(uhcip, tw->tw_pipe_private, UHCI_IN_ERROR);
3573 
3574 	/*
3575 	 * When there  are no pending bulk commands, point the end of the
3576 	 * lattice tree to NULL. This will make sure that the  HC control
3577 	 * does not loop anymore and PCI bus is not affected.
3578 	 */
3579 	if (--uhcip->uhci_pending_bulk_cmds == 0) {
3580 		uhcip->uhci_bulk_xfers_q_tail->link_ptr = HC_END_OF_LIST;
3581 		USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
3582 		    "uhci_handle_bulk_td_errors: count = %d",
3583 		    uhcip->uhci_pending_bulk_cmds);
3584 	}
3585 
3586 	uhci_hcdi_callback(uhcip, pp, ph, tw, usb_err);
3587 	uhci_deallocate_tw(uhcip, pp, tw);
3588 }
3589 
3590 
3591 /*
3592  * uhci_get_bulk_td_by_paddr:
3593  *	Obtain the address of the TD pool the physical address falls in.
3594  *
3595  * td_pool_pp - pointer to the address of the TD pool containing the paddr
3596  */
3597 /* ARGSUSED */
3598 static void
3599 uhci_get_bulk_td_by_paddr(
3600 	uhci_state_t			*uhcip,
3601 	uhci_bulk_isoc_xfer_t		*info,
3602 	uint32_t			paddr,
3603 	uhci_bulk_isoc_td_pool_t	**td_pool_pp)
3604 {
3605 	uint_t				i = 0;
3606 
3607 	while (i < info->num_pools) {
3608 		*td_pool_pp = &info->td_pools[i];
3609 		if (((*td_pool_pp)->cookie.dmac_address <= paddr) &&
3610 		    (((*td_pool_pp)->cookie.dmac_address +
3611 		    (*td_pool_pp)->cookie.dmac_size) > paddr)) {
3612 
3613 			break;
3614 		}
3615 		i++;
3616 	}
3617 
3618 	ASSERT(i < info->num_pools);
3619 }
3620 
3621 
3622 void
3623 uhci_remove_bulk_tds_tws(
3624 	uhci_state_t		*uhcip,
3625 	uhci_pipe_private_t	*pp,
3626 	int			what)
3627 {
3628 	uint_t			rval, i;
3629 	uhci_td_t		*head;
3630 	uhci_td_t		*head_next;
3631 	usb_opaque_t		curr_reqp;
3632 	uhci_bulk_isoc_xfer_t	*info;
3633 	uhci_bulk_isoc_td_pool_t *td_pool_ptr;
3634 
3635 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
3636 
3637 	if ((info = pp->pp_qh->bulk_xfer_info) == NULL) {
3638 
3639 		return;
3640 	}
3641 
3642 	head = uhcip->uhci_outst_tds_head;
3643 
3644 	while (head) {
3645 		uhci_trans_wrapper_t *tw_tmp = head->tw;
3646 		head_next = head->outst_td_next;
3647 
3648 		if (pp->pp_qh == tw_tmp->tw_pipe_private->pp_qh) {
3649 			curr_reqp = tw_tmp->tw_curr_xfer_reqp;
3650 			if (curr_reqp &&
3651 			    ((what == UHCI_IN_CLOSE) ||
3652 			    (what == UHCI_IN_RESET))) {
3653 				uhci_hcdi_callback(uhcip, pp,
3654 				    pp->pp_pipe_handle,
3655 				    tw_tmp, USB_CR_FLUSHED);
3656 			} /* end of curr_reqp */
3657 
3658 			uhci_delete_td(uhcip, head);
3659 
3660 			if (what == UHCI_IN_CLOSE || what == UHCI_IN_RESET) {
3661 				ASSERT(info->num_tds > 0);
3662 				if (--info->num_tds == 0) {
3663 					uhci_deallocate_tw(uhcip, pp, tw_tmp);
3664 
3665 					/*
3666 					 * This will make sure that the HC
3667 					 * does not loop anymore when there
3668 					 * are no pending bulk commands.
3669 					 */
3670 					if (--uhcip->uhci_pending_bulk_cmds
3671 					    == 0) {
3672 						uhcip->uhci_bulk_xfers_q_tail->
3673 						    link_ptr = HC_END_OF_LIST;
3674 						USB_DPRINTF_L3(PRINT_MASK_ATTA,
3675 						    uhcip->uhci_log_hdl,
3676 						    "uhci_remove_bulk_tds_tws:"
3677 						    " count = %d",
3678 						    uhcip->
3679 						    uhci_pending_bulk_cmds);
3680 					}
3681 				}
3682 			}
3683 		}
3684 
3685 		head = head_next;
3686 	}
3687 
3688 	if (what == UHCI_IN_CLOSE || what == UHCI_IN_RESET) {
3689 		ASSERT(info->num_tds == 0);
3690 	}
3691 
3692 	for (i = 0; i < info->num_pools; i++) {
3693 		td_pool_ptr = &info->td_pools[i];
3694 		rval = ddi_dma_unbind_handle(td_pool_ptr->dma_handle);
3695 		ASSERT(rval == DDI_SUCCESS);
3696 		ddi_dma_mem_free(&td_pool_ptr->mem_handle);
3697 		ddi_dma_free_handle(&td_pool_ptr->dma_handle);
3698 	}
3699 	kmem_free(info->td_pools, (sizeof (uhci_bulk_isoc_td_pool_t) *
3700 	    info->num_pools));
3701 	kmem_free(info, sizeof (uhci_bulk_isoc_xfer_t));
3702 	pp->pp_qh->bulk_xfer_info = NULL;
3703 }
3704 
3705 
3706 /*
3707  * uhci_save_data_toggle ()
3708  *	Save the data toggle in the usba_device structure
3709  */
3710 void
3711 uhci_save_data_toggle(uhci_pipe_private_t *pp)
3712 {
3713 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
3714 
3715 	/* Save the data toggle in the usb devices structure. */
3716 	mutex_enter(&ph->p_mutex);
3717 	usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress,
3718 	    pp->pp_data_toggle);
3719 	mutex_exit(&ph->p_mutex);
3720 }
3721 
3722 /*
3723  * uhci_create_isoc_transfer_wrapper:
3724  *	Create a Transaction Wrapper (TW) for isoc transfer.
3725  *	This involves the allocating of DMA resources.
3726  *
3727  *	For isoc transfers, one isoc transfer includes multiple packets
3728  *	and each packet may have a different length. So each packet is
3729  *	transfered by one TD. We only know the individual packet length
3730  *	won't exceed 1023 bytes, but we don't know exactly the lengths.
3731  *	It is hard to make one physically discontiguous DMA buffer which
3732  *	can fit in all the TDs like what can be done to the ctrl/bulk/
3733  *	intr transfers. It is also undesirable to make one physically
3734  *	contiguous DMA buffer for all the packets, since this may easily
3735  *	fail when the system is in low memory. So an individual DMA
3736  *	buffer is allocated for an individual isoc packet and each DMA
3737  *	buffer is physically contiguous. An extra structure is allocated
3738  *	to save the multiple DMA handles.
3739  */
3740 static uhci_trans_wrapper_t *
3741 uhci_create_isoc_transfer_wrapper(
3742 	uhci_state_t		*uhcip,
3743 	uhci_pipe_private_t	*pp,
3744 	usb_isoc_req_t		*req,
3745 	size_t			length,
3746 	usb_flags_t		usb_flags)
3747 {
3748 	int			result;
3749 	size_t			real_length, strtlen, xfer_size;
3750 	uhci_trans_wrapper_t	*tw;
3751 	ddi_device_acc_attr_t	dev_attr;
3752 	ddi_dma_attr_t		dma_attr;
3753 	int			kmem_flag;
3754 	int			(*dmamem_wait)(caddr_t);
3755 	uint_t			i, j, ccount;
3756 	usb_isoc_req_t		*tmp_req = req;
3757 
3758 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
3759 
3760 	if (UHCI_XFER_TYPE(&pp->pp_pipe_handle->p_ep) != USB_EP_ATTR_ISOCH) {
3761 
3762 		return (NULL);
3763 	}
3764 
3765 	if ((req == NULL) && (UHCI_XFER_DIR(&pp->pp_pipe_handle->p_ep) ==
3766 	    USB_EP_DIR_IN)) {
3767 		tmp_req = (usb_isoc_req_t *)pp->pp_client_periodic_in_reqp;
3768 	}
3769 
3770 	if (tmp_req == NULL) {
3771 
3772 		return (NULL);
3773 	}
3774 
3775 
3776 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3777 	    "uhci_create_isoc_transfer_wrapper: length = 0x%lx flags = 0x%x",
3778 	    length, usb_flags);
3779 
3780 	/* SLEEP flag should not be used in interrupt context */
3781 	if (servicing_interrupt()) {
3782 		kmem_flag = KM_NOSLEEP;
3783 		dmamem_wait = DDI_DMA_DONTWAIT;
3784 	} else {
3785 		kmem_flag = KM_SLEEP;
3786 		dmamem_wait = DDI_DMA_SLEEP;
3787 	}
3788 
3789 	/* Allocate space for the transfer wrapper */
3790 	if ((tw = kmem_zalloc(sizeof (uhci_trans_wrapper_t), kmem_flag)) ==
3791 	    NULL) {
3792 		USB_DPRINTF_L2(PRINT_MASK_LISTS,  uhcip->uhci_log_hdl,
3793 		    "uhci_create_isoc_transfer_wrapper: kmem_alloc failed");
3794 
3795 		return (NULL);
3796 	}
3797 
3798 	/* Allocate space for the isoc buffer handles */
3799 	strtlen = sizeof (uhci_isoc_buf_t) * tmp_req->isoc_pkts_count;
3800 	if ((tw->tw_isoc_bufs = kmem_zalloc(strtlen, kmem_flag)) == NULL) {
3801 		USB_DPRINTF_L2(PRINT_MASK_LISTS,  uhcip->uhci_log_hdl,
3802 		    "uhci_create_isoc_transfer_wrapper: kmem_alloc "
3803 		    "isoc buffer failed");
3804 		kmem_free(tw, sizeof (uhci_trans_wrapper_t));
3805 
3806 		return (NULL);
3807 	}
3808 
3809 	bcopy(&uhcip->uhci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t));
3810 	dma_attr.dma_attr_sgllen = 1;
3811 
3812 	dev_attr.devacc_attr_version		= DDI_DEVICE_ATTR_V0;
3813 	dev_attr.devacc_attr_endian_flags	= DDI_STRUCTURE_LE_ACC;
3814 	dev_attr.devacc_attr_dataorder		= DDI_STRICTORDER_ACC;
3815 
3816 	/* Store the transfer length */
3817 	tw->tw_length = length;
3818 
3819 	for (i = 0; i < tmp_req->isoc_pkts_count; i++) {
3820 		tw->tw_isoc_bufs[i].index = i;
3821 
3822 		/* Allocate the DMA handle */
3823 		if ((result = ddi_dma_alloc_handle(uhcip->uhci_dip, &dma_attr,
3824 		    dmamem_wait, 0, &tw->tw_isoc_bufs[i].dma_handle)) !=
3825 		    DDI_SUCCESS) {
3826 			USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3827 			    "uhci_create_isoc_transfer_wrapper: "
3828 			    "Alloc handle %d failed", i);
3829 
3830 			for (j = 0; j < i; j++) {
3831 				result = ddi_dma_unbind_handle(
3832 				    tw->tw_isoc_bufs[j].dma_handle);
3833 				ASSERT(result == USB_SUCCESS);
3834 				ddi_dma_mem_free(&tw->tw_isoc_bufs[j].
3835 				    mem_handle);
3836 				ddi_dma_free_handle(&tw->tw_isoc_bufs[j].
3837 				    dma_handle);
3838 			}
3839 			kmem_free(tw->tw_isoc_bufs, strtlen);
3840 			kmem_free(tw, sizeof (uhci_trans_wrapper_t));
3841 
3842 			return (NULL);
3843 		}
3844 
3845 		/* Allocate the memory */
3846 		xfer_size = tmp_req->isoc_pkt_descr[i].isoc_pkt_length;
3847 		if ((result = ddi_dma_mem_alloc(tw->tw_isoc_bufs[i].dma_handle,
3848 		    xfer_size, &dev_attr, DDI_DMA_CONSISTENT, dmamem_wait,
3849 		    NULL, (caddr_t *)&tw->tw_isoc_bufs[i].buf_addr,
3850 		    &real_length, &tw->tw_isoc_bufs[i].mem_handle)) !=
3851 		    DDI_SUCCESS) {
3852 			USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3853 			    "uhci_create_isoc_transfer_wrapper: "
3854 			    "dma_mem_alloc %d fail", i);
3855 			ddi_dma_free_handle(&tw->tw_isoc_bufs[i].dma_handle);
3856 
3857 			for (j = 0; j < i; j++) {
3858 				result = ddi_dma_unbind_handle(
3859 				    tw->tw_isoc_bufs[j].dma_handle);
3860 				ASSERT(result == USB_SUCCESS);
3861 				ddi_dma_mem_free(&tw->tw_isoc_bufs[j].
3862 				    mem_handle);
3863 				ddi_dma_free_handle(&tw->tw_isoc_bufs[j].
3864 				    dma_handle);
3865 			}
3866 			kmem_free(tw->tw_isoc_bufs, strtlen);
3867 			kmem_free(tw, sizeof (uhci_trans_wrapper_t));
3868 
3869 			return (NULL);
3870 		}
3871 
3872 		ASSERT(real_length >= xfer_size);
3873 
3874 		/* Bind the handle */
3875 		result = ddi_dma_addr_bind_handle(
3876 		    tw->tw_isoc_bufs[i].dma_handle, NULL,
3877 		    (caddr_t)tw->tw_isoc_bufs[i].buf_addr, real_length,
3878 		    DDI_DMA_RDWR|DDI_DMA_CONSISTENT, dmamem_wait, NULL,
3879 		    &tw->tw_isoc_bufs[i].cookie, &ccount);
3880 
3881 		if ((result == DDI_DMA_MAPPED) && (ccount == 1)) {
3882 			tw->tw_isoc_bufs[i].length = xfer_size;
3883 
3884 			continue;
3885 		} else {
3886 			USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3887 			    "uhci_create_isoc_transfer_wrapper: "
3888 			    "Bind handle %d failed", i);
3889 			if (result == DDI_DMA_MAPPED) {
3890 				result = ddi_dma_unbind_handle(
3891 				    tw->tw_isoc_bufs[i].dma_handle);
3892 				ASSERT(result == USB_SUCCESS);
3893 			}
3894 			ddi_dma_mem_free(&tw->tw_isoc_bufs[i].mem_handle);
3895 			ddi_dma_free_handle(&tw->tw_isoc_bufs[i].dma_handle);
3896 
3897 			for (j = 0; j < i; j++) {
3898 				result = ddi_dma_unbind_handle(
3899 				    tw->tw_isoc_bufs[j].dma_handle);
3900 				ASSERT(result == USB_SUCCESS);
3901 				ddi_dma_mem_free(&tw->tw_isoc_bufs[j].
3902 				    mem_handle);
3903 				ddi_dma_free_handle(&tw->tw_isoc_bufs[j].
3904 				    dma_handle);
3905 			}
3906 			kmem_free(tw->tw_isoc_bufs, strtlen);
3907 			kmem_free(tw, sizeof (uhci_trans_wrapper_t));
3908 
3909 			return (NULL);
3910 		}
3911 	}
3912 
3913 	tw->tw_ncookies = tmp_req->isoc_pkts_count;
3914 	tw->tw_isoc_strtlen = strtlen;
3915 
3916 	/*
3917 	 * Only allow one wrapper to be added at a time. Insert the
3918 	 * new transaction wrapper into the list for this pipe.
3919 	 */
3920 	if (pp->pp_tw_head == NULL) {
3921 		pp->pp_tw_head = tw;
3922 		pp->pp_tw_tail = tw;
3923 	} else {
3924 		pp->pp_tw_tail->tw_next = tw;
3925 		pp->pp_tw_tail = tw;
3926 		ASSERT(tw->tw_next == NULL);
3927 	}
3928 
3929 	/* Store a back pointer to the pipe private structure */
3930 	tw->tw_pipe_private = pp;
3931 
3932 	/* Store the transfer type - synchronous or asynchronous */
3933 	tw->tw_flags = usb_flags;
3934 
3935 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3936 	    "uhci_create_isoc_transfer_wrapper: tw = 0x%p, ncookies = %u",
3937 	    tw, tw->tw_ncookies);
3938 
3939 	return (tw);
3940 }
3941 
3942 /*
3943  * uhci_insert_isoc_td:
3944  *	- Create transfer wrapper
3945  *	- Allocate memory for the isoc td's
3946  *	- Fill up all the TD's and submit to the HC
3947  *	- Update all the linked lists
3948  */
3949 int
3950 uhci_insert_isoc_td(
3951 	uhci_state_t		*uhcip,
3952 	usba_pipe_handle_data_t	*ph,
3953 	usb_isoc_req_t		*isoc_req,
3954 	size_t			length,
3955 	usb_flags_t		flags)
3956 {
3957 	int			rval = USB_SUCCESS;
3958 	int			error;
3959 	uint_t			ddic;
3960 	uint32_t		i, j, index;
3961 	uint32_t		bytes_to_xfer;
3962 	uint32_t		expired_frames = 0;
3963 	usb_frame_number_t	start_frame, end_frame, current_frame;
3964 	uhci_td_t		*td_ptr;
3965 	uhci_pipe_private_t	*pp = (uhci_pipe_private_t *)ph->p_hcd_private;
3966 	uhci_trans_wrapper_t	*tw;
3967 	uhci_bulk_isoc_xfer_t	*isoc_xfer_info;
3968 	uhci_bulk_isoc_td_pool_t *td_pool_ptr;
3969 
3970 	USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
3971 	    "uhci_insert_isoc_td: ph = 0x%p isoc req = %p length = %lu",
3972 	    ph, (void *)isoc_req, length);
3973 
3974 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
3975 
3976 	/* Allocate a transfer wrapper */
3977 	if ((tw = uhci_create_isoc_transfer_wrapper(uhcip, pp, isoc_req,
3978 	    length, flags)) == NULL) {
3979 		USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
3980 		    "uhci_insert_isoc_td: TW allocation failed");
3981 
3982 		return (USB_NO_RESOURCES);
3983 	}
3984 
3985 	/* Save current isochronous request pointer */
3986 	tw->tw_curr_xfer_reqp = (usb_opaque_t)isoc_req;
3987 
3988 	/*
3989 	 * Initialize the transfer wrapper. These values are useful
3990 	 * for sending back the reply.
3991 	 */
3992 	tw->tw_handle_td		= uhci_handle_isoc_td;
3993 	tw->tw_handle_callback_value	= NULL;
3994 	tw->tw_direction = (UHCI_XFER_DIR(&ph->p_ep) == USB_EP_DIR_OUT) ?
3995 							PID_OUT : PID_IN;
3996 
3997 	/*
3998 	 * If the transfer isoc send, then copy the data from the request
3999 	 * to the transfer wrapper.
4000 	 */
4001 	if ((tw->tw_direction == PID_OUT) && length) {
4002 		uchar_t *p;
4003 
4004 		ASSERT(isoc_req->isoc_data != NULL);
4005 		p = isoc_req->isoc_data->b_rptr;
4006 
4007 		/* Copy the data into the message */
4008 		for (i = 0; i < isoc_req->isoc_pkts_count; i++) {
4009 			ddi_rep_put8(tw->tw_isoc_bufs[i].mem_handle,
4010 			    p, (uint8_t *)tw->tw_isoc_bufs[i].buf_addr,
4011 			    isoc_req->isoc_pkt_descr[i].isoc_pkt_length,
4012 			    DDI_DEV_AUTOINCR);
4013 			p += isoc_req->isoc_pkt_descr[i].isoc_pkt_length;
4014 		}
4015 	}
4016 
4017 	if (tw->tw_direction == PID_IN) {
4018 		if ((rval = uhci_allocate_periodic_in_resource(uhcip, pp, tw,
4019 		    flags)) != USB_SUCCESS) {
4020 			USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4021 			    "uhci_insert_isoc_td: isoc_req_t alloc failed");
4022 			uhci_deallocate_tw(uhcip, pp, tw);
4023 
4024 			return (rval);
4025 		}
4026 
4027 		isoc_req = (usb_isoc_req_t *)tw->tw_curr_xfer_reqp;
4028 	}
4029 
4030 	tw->tw_isoc_req	= (usb_isoc_req_t *)tw->tw_curr_xfer_reqp;
4031 
4032 	/* Get the pointer to the isoc_xfer_info structure */
4033 	isoc_xfer_info = (uhci_bulk_isoc_xfer_t *)&tw->tw_xfer_info;
4034 	isoc_xfer_info->num_tds = isoc_req->isoc_pkts_count;
4035 
4036 	/*
4037 	 * Allocate memory for isoc tds
4038 	 */
4039 	if ((rval = uhci_alloc_bulk_isoc_tds(uhcip, isoc_req->isoc_pkts_count,
4040 	    isoc_xfer_info)) != USB_SUCCESS) {
4041 		USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4042 		    "uhci_alloc_bulk_isoc_td: Memory allocation failure");
4043 
4044 		if (tw->tw_direction == PID_IN) {
4045 			uhci_deallocate_periodic_in_resource(uhcip, pp, tw);
4046 		}
4047 		uhci_deallocate_tw(uhcip, pp, tw);
4048 
4049 		return (rval);
4050 	}
4051 
4052 	/*
4053 	 * Get the isoc td pool address, buffer address and
4054 	 * max packet size that the device supports.
4055 	 */
4056 	td_pool_ptr = &isoc_xfer_info->td_pools[0];
4057 	td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr;
4058 	index = 0;
4059 
4060 	/*
4061 	 * Fill up the isoc tds
4062 	 */
4063 	USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4064 	    "uhci_insert_isoc_td : isoc pkts %d", isoc_req->isoc_pkts_count);
4065 
4066 	for (i = 0; i < isoc_xfer_info->num_pools; i++) {
4067 		for (j = 0; j < td_pool_ptr->num_tds; j++) {
4068 			bytes_to_xfer =
4069 			    isoc_req->isoc_pkt_descr[index].isoc_pkt_length;
4070 
4071 			uhci_fill_in_bulk_isoc_td(uhcip, &td_ptr[j],
4072 			    (uhci_td_t *)NULL, HC_END_OF_LIST, ph, index,
4073 			    bytes_to_xfer, tw);
4074 			td_ptr[j].isoc_pkt_index = index;
4075 			index++;
4076 		}
4077 
4078 		if (i < (isoc_xfer_info->num_pools - 1)) {
4079 			td_pool_ptr = &isoc_xfer_info->td_pools[i + 1];
4080 			td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr;
4081 		}
4082 	}
4083 
4084 	/*
4085 	 * Get the starting frame number.
4086 	 * The client drivers sets the flag USB_ATTRS_ISOC_XFER_ASAP to inform
4087 	 * the HCD to care of starting frame number.
4088 	 *
4089 	 * Following code is very time critical. So, perform atomic execution.
4090 	 */
4091 	ddic = ddi_enter_critical();
4092 	current_frame = uhci_get_sw_frame_number(uhcip);
4093 
4094 	if (isoc_req->isoc_attributes & USB_ATTRS_ISOC_START_FRAME) {
4095 		start_frame = isoc_req->isoc_frame_no;
4096 		end_frame = start_frame + isoc_req->isoc_pkts_count;
4097 
4098 		/* Check available frames */
4099 		if ((end_frame - current_frame) < UHCI_MAX_ISOC_FRAMES) {
4100 			if (current_frame > start_frame) {
4101 				if ((current_frame + FRNUM_OFFSET) <
4102 				    end_frame) {
4103 					expired_frames = current_frame +
4104 						FRNUM_OFFSET - start_frame;
4105 					start_frame = current_frame +
4106 							FRNUM_OFFSET;
4107 				} else {
4108 					rval = USB_INVALID_START_FRAME;
4109 				}
4110 			}
4111 		} else {
4112 			rval = USB_INVALID_START_FRAME;
4113 		}
4114 
4115 	} else if (isoc_req->isoc_attributes & USB_ATTRS_ISOC_XFER_ASAP) {
4116 		start_frame = pp->pp_frame_num;
4117 
4118 		if (start_frame == INVALID_FRNUM) {
4119 			start_frame = current_frame + FRNUM_OFFSET;
4120 		} else if (current_frame > start_frame) {
4121 			start_frame = current_frame + FRNUM_OFFSET;
4122 		}
4123 
4124 		end_frame = start_frame + isoc_req->isoc_pkts_count;
4125 		isoc_req->isoc_frame_no = start_frame;
4126 
4127 	}
4128 
4129 	if (rval != USB_SUCCESS) {
4130 
4131 		/* Exit the critical */
4132 		ddi_exit_critical(ddic);
4133 
4134 		USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4135 		    "uhci_insert_isoc_td: Invalid starting frame number");
4136 
4137 		if (tw->tw_direction == PID_IN) {
4138 			uhci_deallocate_periodic_in_resource(uhcip, pp, tw);
4139 		}
4140 
4141 		while (tw->tw_hctd_head) {
4142 			uhci_delete_td(uhcip, tw->tw_hctd_head);
4143 		}
4144 
4145 		for (i = 0; i < isoc_xfer_info->num_pools; i++) {
4146 			td_pool_ptr = &isoc_xfer_info->td_pools[i];
4147 			error = ddi_dma_unbind_handle(td_pool_ptr->dma_handle);
4148 			ASSERT(error == DDI_SUCCESS);
4149 			ddi_dma_mem_free(&td_pool_ptr->mem_handle);
4150 			ddi_dma_free_handle(&td_pool_ptr->dma_handle);
4151 		}
4152 		kmem_free(isoc_xfer_info->td_pools,
4153 		    (sizeof (uhci_bulk_isoc_td_pool_t) *
4154 		    isoc_xfer_info->num_pools));
4155 
4156 		uhci_deallocate_tw(uhcip, pp, tw);
4157 
4158 		return (rval);
4159 	}
4160 
4161 	for (i = 0; i < expired_frames; i++) {
4162 		isoc_req->isoc_pkt_descr[i].isoc_pkt_status =
4163 							USB_CR_NOT_ACCESSED;
4164 		isoc_req->isoc_pkt_descr[i].isoc_pkt_actual_length =
4165 				isoc_req->isoc_pkt_descr[i].isoc_pkt_length;
4166 		uhci_get_isoc_td_by_index(uhcip, isoc_xfer_info, i,
4167 		    &td_ptr, &td_pool_ptr);
4168 		uhci_delete_td(uhcip, td_ptr);
4169 		--isoc_xfer_info->num_tds;
4170 	}
4171 
4172 	/*
4173 	 * Add the TD's to the HC list
4174 	 */
4175 	start_frame = (start_frame & 0x3ff);
4176 	for (; i < isoc_req->isoc_pkts_count; i++) {
4177 		uhci_get_isoc_td_by_index(uhcip, isoc_xfer_info, i,
4178 		    &td_ptr, &td_pool_ptr);
4179 		if (uhcip->uhci_isoc_q_tailp[start_frame]) {
4180 			td_ptr->isoc_prev =
4181 					uhcip->uhci_isoc_q_tailp[start_frame];
4182 			td_ptr->isoc_next = NULL;
4183 			td_ptr->link_ptr =
4184 			    uhcip->uhci_isoc_q_tailp[start_frame]->link_ptr;
4185 			uhcip->uhci_isoc_q_tailp[start_frame]->isoc_next =
4186 								    td_ptr;
4187 			SetTD32(uhcip,
4188 			    uhcip->uhci_isoc_q_tailp[start_frame]->link_ptr,
4189 			    ISOCTD_PADDR(td_pool_ptr, td_ptr));
4190 			uhcip->uhci_isoc_q_tailp[start_frame] = td_ptr;
4191 		} else {
4192 			uhcip->uhci_isoc_q_tailp[start_frame] = td_ptr;
4193 			td_ptr->isoc_next = NULL;
4194 			td_ptr->isoc_prev = NULL;
4195 			SetTD32(uhcip, td_ptr->link_ptr,
4196 			    GetFL32(uhcip,
4197 				uhcip->uhci_frame_lst_tablep[start_frame]));
4198 			SetFL32(uhcip,
4199 			    uhcip->uhci_frame_lst_tablep[start_frame],
4200 			    ISOCTD_PADDR(td_pool_ptr, td_ptr));
4201 		}
4202 		td_ptr->starting_frame = start_frame;
4203 
4204 		if (++start_frame == NUM_FRAME_LST_ENTRIES)
4205 			start_frame = 0;
4206 	}
4207 
4208 	ddi_exit_critical(ddic);
4209 	pp->pp_frame_num = end_frame;
4210 
4211 	USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4212 	    "uhci_insert_isoc_td: current frame number 0x%llx, pipe frame num"
4213 	    " 0x%llx", current_frame, pp->pp_frame_num);
4214 
4215 	return (rval);
4216 }
4217 
4218 
4219 /*
4220  * uhci_get_isoc_td_by_index:
4221  *	Obtain the addresses of the TD pool and the TD at the index.
4222  *
4223  * tdpp - pointer to the address of the TD at the isoc packet index
4224  * td_pool_pp - pointer to the address of the TD pool containing
4225  *              the specified TD
4226  */
4227 /* ARGSUSED */
4228 static void
4229 uhci_get_isoc_td_by_index(
4230 	uhci_state_t			*uhcip,
4231 	uhci_bulk_isoc_xfer_t		*info,
4232 	uint_t				index,
4233 	uhci_td_t			**tdpp,
4234 	uhci_bulk_isoc_td_pool_t	**td_pool_pp)
4235 {
4236 	uint_t			i = 0, j = 0;
4237 	uhci_td_t		*td_ptr;
4238 
4239 	while (j < info->num_pools) {
4240 		if ((i + info->td_pools[j].num_tds) <= index) {
4241 			i += info->td_pools[j].num_tds;
4242 			j++;
4243 		} else {
4244 			i = index - i;
4245 
4246 			break;
4247 		}
4248 	}
4249 
4250 	ASSERT(j < info->num_pools);
4251 	*td_pool_pp = &info->td_pools[j];
4252 	td_ptr = (uhci_td_t *)((*td_pool_pp)->pool_addr);
4253 	*tdpp = &td_ptr[i];
4254 }
4255 
4256 
4257 /*
4258  * uhci_handle_isoc_td:
4259  *	Handles the completed isoc tds
4260  */
4261 void
4262 uhci_handle_isoc_td(uhci_state_t *uhcip, uhci_td_t *td)
4263 {
4264 	uint_t			rval, i;
4265 	uint32_t		pkt_index = td->isoc_pkt_index;
4266 	usb_cr_t		cr;
4267 	uhci_trans_wrapper_t	*tw = td->tw;
4268 	usb_isoc_req_t		*isoc_req = (usb_isoc_req_t *)tw->tw_isoc_req;
4269 	uhci_pipe_private_t	*pp = tw->tw_pipe_private;
4270 	uhci_bulk_isoc_xfer_t	*isoc_xfer_info = &tw->tw_xfer_info;
4271 	usba_pipe_handle_data_t	*usb_pp;
4272 	uhci_bulk_isoc_td_pool_t *td_pool_ptr;
4273 
4274 	USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4275 	    "uhci_handle_isoc_td: td = 0x%p, pp = 0x%p, tw = 0x%p, req = 0x%p, "
4276 	    "index = %x", td, pp, tw, isoc_req, pkt_index);
4277 
4278 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4279 
4280 	usb_pp = pp->pp_pipe_handle;
4281 
4282 	/*
4283 	 * Check whether there are any errors occurred. If so, update error
4284 	 * count and return it to the upper.But never return a non zero
4285 	 * completion reason.
4286 	 */
4287 	cr = USB_CR_OK;
4288 	if (GetTD_status(uhcip, td) & TD_STATUS_MASK) {
4289 		USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4290 		    "uhci_handle_isoc_td: Error Occurred: TD Status = %x",
4291 		    GetTD_status(uhcip, td));
4292 		isoc_req->isoc_error_count++;
4293 	}
4294 
4295 	if (isoc_req != NULL) {
4296 		isoc_req->isoc_pkt_descr[pkt_index].isoc_pkt_status = cr;
4297 		isoc_req->isoc_pkt_descr[pkt_index].isoc_pkt_actual_length =
4298 				(GetTD_alen(uhcip, td) == ZERO_LENGTH) ? 0 :
4299 				GetTD_alen(uhcip, td) + 1;
4300 	}
4301 
4302 	uhci_delete_isoc_td(uhcip, td);
4303 
4304 	if (--isoc_xfer_info->num_tds != 0) {
4305 		USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4306 		    "uhci_handle_isoc_td: Number of TDs %d",
4307 		    isoc_xfer_info->num_tds);
4308 
4309 		return;
4310 	}
4311 
4312 	tw->tw_claim = UHCI_INTR_HDLR_CLAIMED;
4313 	if (tw->tw_direction == PID_IN) {
4314 		uhci_sendup_td_message(uhcip, cr, tw);
4315 
4316 		if ((uhci_handle_isoc_receive(uhcip, pp, tw)) != USB_SUCCESS) {
4317 			USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4318 			    "uhci_handle_isoc_td: Drop message");
4319 		}
4320 
4321 	} else {
4322 		/* update kstats only for OUT. sendup_td_msg() does it for IN */
4323 		uhci_do_byte_stats(uhcip, tw->tw_length,
4324 		    usb_pp->p_ep.bmAttributes, usb_pp->p_ep.bEndpointAddress);
4325 
4326 		uhci_hcdi_callback(uhcip, pp, usb_pp, tw, USB_CR_OK);
4327 	}
4328 
4329 	for (i = 0; i < isoc_xfer_info->num_pools; i++) {
4330 		td_pool_ptr = &isoc_xfer_info->td_pools[i];
4331 		rval = ddi_dma_unbind_handle(td_pool_ptr->dma_handle);
4332 		ASSERT(rval == DDI_SUCCESS);
4333 		ddi_dma_mem_free(&td_pool_ptr->mem_handle);
4334 		ddi_dma_free_handle(&td_pool_ptr->dma_handle);
4335 	}
4336 	kmem_free(isoc_xfer_info->td_pools,
4337 	    (sizeof (uhci_bulk_isoc_td_pool_t) *
4338 	    isoc_xfer_info->num_pools));
4339 	uhci_deallocate_tw(uhcip, pp, tw);
4340 }
4341 
4342 
4343 /*
4344  * uhci_handle_isoc_receive:
4345  *	- Sends the isoc data to the client
4346  *	- Inserts another isoc receive request
4347  */
4348 static int
4349 uhci_handle_isoc_receive(
4350 	uhci_state_t		*uhcip,
4351 	uhci_pipe_private_t	*pp,
4352 	uhci_trans_wrapper_t	*tw)
4353 {
4354 	USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4355 	    "uhci_handle_isoc_receive: tw = 0x%p", tw);
4356 
4357 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4358 
4359 	/*
4360 	 * -- check for pipe state being polling before
4361 	 * inserting a new request. Check when is TD
4362 	 * de-allocation being done? (so we can reuse the same TD)
4363 	 */
4364 	if (uhci_start_isoc_receive_polling(uhcip,
4365 	    pp->pp_pipe_handle, (usb_isoc_req_t *)tw->tw_curr_xfer_reqp,
4366 	    0) != USB_SUCCESS) {
4367 		USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4368 		    "uhci_handle_isoc_receive: receive polling failed");
4369 
4370 		return (USB_FAILURE);
4371 	}
4372 
4373 	return (USB_SUCCESS);
4374 }
4375 
4376 
4377 /*
4378  * uhci_delete_isoc_td:
4379  *	- Delete from the outstanding command queue
4380  *	- Delete from the tw queue
4381  *	- Delete from the isoc queue
4382  *	- Delete from the HOST CONTROLLER list
4383  */
4384 static void
4385 uhci_delete_isoc_td(uhci_state_t *uhcip, uhci_td_t *td)
4386 {
4387 	uint32_t	starting_frame = td->starting_frame;
4388 
4389 	if ((td->isoc_next == NULL) && (td->isoc_prev == NULL)) {
4390 		SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[starting_frame],
4391 			GetTD32(uhcip, td->link_ptr));
4392 		uhcip->uhci_isoc_q_tailp[starting_frame] = 0;
4393 	} else if (td->isoc_next == NULL) {
4394 		td->isoc_prev->link_ptr = td->link_ptr;
4395 		td->isoc_prev->isoc_next = NULL;
4396 		uhcip->uhci_isoc_q_tailp[starting_frame] = td->isoc_prev;
4397 	} else if (td->isoc_prev == NULL) {
4398 		td->isoc_next->isoc_prev = NULL;
4399 		SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[starting_frame],
4400 		    GetTD32(uhcip, td->link_ptr));
4401 	} else {
4402 		td->isoc_prev->isoc_next = td->isoc_next;
4403 		td->isoc_next->isoc_prev = td->isoc_prev;
4404 		td->isoc_prev->link_ptr = td->link_ptr;
4405 	}
4406 
4407 	uhci_delete_td(uhcip, td);
4408 }
4409 
4410 
4411 /*
4412  * uhci_send_isoc_receive
4413  *	- Allocates usb_isoc_request
4414  *	- Updates the isoc request
4415  *	- Inserts the isoc td's into the HC processing list.
4416  */
4417 int
4418 uhci_start_isoc_receive_polling(
4419 	uhci_state_t		*uhcip,
4420 	usba_pipe_handle_data_t	*ph,
4421 	usb_isoc_req_t		*isoc_req,
4422 	usb_flags_t		usb_flags)
4423 {
4424 	int			ii, error;
4425 	size_t			max_isoc_xfer_size, length;
4426 	ushort_t		isoc_pkt_count;
4427 	uhci_pipe_private_t	*pp = (uhci_pipe_private_t *)ph->p_hcd_private;
4428 	usb_isoc_pkt_descr_t	*isoc_pkt_descr;
4429 
4430 	USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4431 	    "uhci_start_isoc_receive_polling: usb_flags = %x", usb_flags);
4432 
4433 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4434 
4435 	max_isoc_xfer_size = ph->p_ep.wMaxPacketSize * UHCI_MAX_ISOC_PKTS;
4436 
4437 	if (isoc_req) {
4438 		isoc_pkt_descr = isoc_req->isoc_pkt_descr;
4439 		isoc_pkt_count = isoc_req->isoc_pkts_count;
4440 	} else {
4441 		isoc_pkt_descr = ((usb_isoc_req_t *)
4442 			pp->pp_client_periodic_in_reqp)->isoc_pkt_descr;
4443 		isoc_pkt_count = ((usb_isoc_req_t *)
4444 			pp->pp_client_periodic_in_reqp)->isoc_pkts_count;
4445 	}
4446 
4447 	for (ii = 0, length = 0; ii < isoc_pkt_count; ii++) {
4448 		length += isoc_pkt_descr->isoc_pkt_length;
4449 		isoc_pkt_descr++;
4450 	}
4451 
4452 	/* Check the size of isochronous request */
4453 	if (length > max_isoc_xfer_size) {
4454 		USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4455 		    "uhci_start_isoc_receive_polling: "
4456 		    "Max isoc request size = %lx, Given isoc req size = %lx",
4457 		    max_isoc_xfer_size, length);
4458 
4459 		return (USB_FAILURE);
4460 	}
4461 
4462 	/* Add the TD into the Host Controller's isoc list */
4463 	error = uhci_insert_isoc_td(uhcip, ph, isoc_req, length, usb_flags);
4464 
4465 	return (error);
4466 }
4467 
4468 
4469 /*
4470  * uhci_remove_isoc_tds_tws
4471  *	This routine scans the pipe and removes all the td's
4472  *	and transfer wrappers and deallocates the memory
4473  *	associated with those td's and tw's.
4474  */
4475 void
4476 uhci_remove_isoc_tds_tws(uhci_state_t *uhcip, uhci_pipe_private_t *pp)
4477 {
4478 	uint_t			rval, i;
4479 	uhci_td_t		*tmp_td, *td_head;
4480 	usb_isoc_req_t		*isoc_req;
4481 	uhci_trans_wrapper_t	*tmp_tw, *tw_head;
4482 	uhci_bulk_isoc_xfer_t	*isoc_xfer_info;
4483 	uhci_bulk_isoc_td_pool_t *td_pool_ptr;
4484 
4485 	USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4486 	    "uhci_remove_isoc_tds_tws: pp = %p", (void *)pp);
4487 
4488 	tw_head = pp->pp_tw_head;
4489 	while (tw_head) {
4490 		tmp_tw = tw_head;
4491 		tw_head = tw_head->tw_next;
4492 		td_head = tmp_tw->tw_hctd_head;
4493 		if (tmp_tw->tw_direction == PID_IN) {
4494 			uhci_deallocate_periodic_in_resource(uhcip, pp,
4495 								tmp_tw);
4496 		} else if (tmp_tw->tw_direction == PID_OUT) {
4497 			uhci_hcdi_callback(uhcip, pp, pp->pp_pipe_handle,
4498 			    tmp_tw, USB_CR_FLUSHED);
4499 		}
4500 
4501 		while (td_head) {
4502 			tmp_td = td_head;
4503 			td_head = td_head->tw_td_next;
4504 			uhci_delete_isoc_td(uhcip, tmp_td);
4505 		}
4506 
4507 		isoc_req = (usb_isoc_req_t *)tmp_tw->tw_isoc_req;
4508 		if (isoc_req) {
4509 			usb_free_isoc_req(isoc_req);
4510 		}
4511 
4512 		ASSERT(tmp_tw->tw_hctd_head == NULL);
4513 
4514 		if (tmp_tw->tw_xfer_info.td_pools) {
4515 			isoc_xfer_info =
4516 			    (uhci_bulk_isoc_xfer_t *)&tmp_tw->tw_xfer_info;
4517 			for (i = 0; i < isoc_xfer_info->num_pools; i++) {
4518 				td_pool_ptr = &isoc_xfer_info->td_pools[i];
4519 				rval = ddi_dma_unbind_handle(
4520 				    td_pool_ptr->dma_handle);
4521 				ASSERT(rval == DDI_SUCCESS);
4522 				ddi_dma_mem_free(&td_pool_ptr->mem_handle);
4523 				ddi_dma_free_handle(&td_pool_ptr->dma_handle);
4524 			}
4525 			kmem_free(isoc_xfer_info->td_pools,
4526 			    (sizeof (uhci_bulk_isoc_td_pool_t) *
4527 			    isoc_xfer_info->num_pools));
4528 		}
4529 
4530 		uhci_deallocate_tw(uhcip, pp, tmp_tw);
4531 	}
4532 }
4533 
4534 
4535 /*
4536  * uhci_isoc_update_sw_frame_number()
4537  *	to avoid code duplication, call uhci_get_sw_frame_number()
4538  */
4539 void
4540 uhci_isoc_update_sw_frame_number(uhci_state_t *uhcip)
4541 {
4542 	(void) uhci_get_sw_frame_number(uhcip);
4543 }
4544 
4545 
4546 /*
4547  * uhci_get_sw_frame_number:
4548  *	Hold the uhci_int_mutex before calling this routine.
4549  */
4550 uint64_t
4551 uhci_get_sw_frame_number(uhci_state_t *uhcip)
4552 {
4553 	uint64_t sw_frnum, hw_frnum, current_frnum;
4554 
4555 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4556 
4557 	sw_frnum = uhcip->uhci_sw_frnum;
4558 	hw_frnum = Get_OpReg16(FRNUM);
4559 
4560 	/*
4561 	 * Check bit 10 in the software counter and hardware frame counter.
4562 	 * If both are same, then don't increment the software frame counter
4563 	 * (Bit 10 of hw frame counter toggle for every 1024 frames)
4564 	 * The lower 11 bits of software counter contains the hardware frame
4565 	 * counter value. The MSB (bit 10) of software counter is incremented
4566 	 * for every 1024 frames either here or in get frame number routine.
4567 	 */
4568 	if ((sw_frnum & UHCI_BIT_10_MASK) == (hw_frnum & UHCI_BIT_10_MASK)) {
4569 		/* The MSB of hw counter did not toggle */
4570 		current_frnum = ((sw_frnum & (SW_FRNUM_MASK)) | hw_frnum);
4571 	} else {
4572 		/*
4573 		 * The hw counter wrapped around. And the interrupt handler
4574 		 * did not get a chance to update the sw frame counter.
4575 		 * So, update the sw frame counter and return correct frame no.
4576 		 */
4577 		sw_frnum >>= UHCI_SIZE_OF_HW_FRNUM - 1;
4578 		current_frnum =
4579 		    ((++sw_frnum << (UHCI_SIZE_OF_HW_FRNUM - 1)) | hw_frnum);
4580 	}
4581 	uhcip->uhci_sw_frnum = current_frnum;
4582 
4583 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
4584 	    "uhci_get_sw_frame_number: sw=%ld hd=%ld",
4585 	    uhcip->uhci_sw_frnum, hw_frnum);
4586 
4587 	return (current_frnum);
4588 }
4589 
4590 
4591 /*
4592  * uhci_cmd_timeout_hdlr:
4593  *	This routine will get called for every second. It checks for
4594  *	timed out control commands/bulk commands. Timeout any commands
4595  *	that exceeds the time out period specified by the pipe policy.
4596  */
4597 void
4598 uhci_cmd_timeout_hdlr(void *arg)
4599 {
4600 	uint_t			flag = B_FALSE;
4601 	uhci_td_t		*head, *tmp_td;
4602 	uhci_state_t		*uhcip = (uhci_state_t *)arg;
4603 	uhci_pipe_private_t	*pp;
4604 
4605 	/*
4606 	 * Check whether any of the control xfers are timed out.
4607 	 * If so, complete those commands with time out as reason.
4608 	 */
4609 	mutex_enter(&uhcip->uhci_int_mutex);
4610 	head = uhcip->uhci_outst_tds_head;
4611 
4612 	while (head) {
4613 		/*
4614 		 * If timeout out is zero, then dont timeout command.
4615 		 */
4616 		if (head->tw->tw_timeout_cnt == 0)  {
4617 			head = head->outst_td_next;
4618 			continue;
4619 		}
4620 
4621 		if (!(head->tw->tw_flags & TW_TIMEOUT_FLAG)) {
4622 			head->tw->tw_flags |= TW_TIMEOUT_FLAG;
4623 			--head->tw->tw_timeout_cnt;
4624 		}
4625 
4626 		/* only do it for bulk and control TDs */
4627 		if ((head->tw->tw_timeout_cnt == 0) &&
4628 		    (head->tw->tw_handle_td != uhci_handle_isoc_td)) {
4629 
4630 			USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
4631 			    "Command timed out: td = %p", (void *)head);
4632 
4633 			head->tw->tw_claim = UHCI_TIMEOUT_HDLR_CLAIMED;
4634 
4635 			/*
4636 			 * Check finally whether the command completed
4637 			 */
4638 			if (GetTD_status(uhcip, head) & UHCI_TD_ACTIVE) {
4639 				SetTD32(uhcip, head->link_ptr,
4640 				    GetTD32(uhcip, head->link_ptr) |
4641 				    HC_END_OF_LIST);
4642 				pp = head->tw->tw_pipe_private;
4643 				SetQH32(uhcip, pp->pp_qh->element_ptr,
4644 				    GetQH32(uhcip, pp->pp_qh->element_ptr) |
4645 				    HC_END_OF_LIST);
4646 			}
4647 
4648 			flag = B_TRUE;
4649 		}
4650 
4651 		head = head->outst_td_next;
4652 	}
4653 
4654 	if (flag) {
4655 		(void) uhci_wait_for_sof(uhcip);
4656 	}
4657 
4658 	head = uhcip->uhci_outst_tds_head;
4659 	while (head) {
4660 		if (head->tw->tw_flags & TW_TIMEOUT_FLAG) {
4661 			head->tw->tw_flags &= ~TW_TIMEOUT_FLAG;
4662 		}
4663 		if (head->tw->tw_claim == UHCI_TIMEOUT_HDLR_CLAIMED) {
4664 			head->tw->tw_claim = UHCI_NOT_CLAIMED;
4665 			tmp_td = head->tw->tw_hctd_head;
4666 			while (tmp_td) {
4667 				SetTD_status(uhcip, tmp_td,
4668 						UHCI_TD_CRC_TIMEOUT);
4669 				tmp_td = tmp_td->tw_td_next;
4670 			}
4671 		}
4672 		head = head->outst_td_next;
4673 	}
4674 
4675 	/*
4676 	 * Process the td which was completed before shifting from normal
4677 	 * mode to polled mode
4678 	 */
4679 	if (uhcip->uhci_polled_flag == UHCI_POLLED_FLAG_TRUE) {
4680 		uhci_process_submitted_td_queue(uhcip);
4681 		uhcip->uhci_polled_flag = UHCI_POLLED_FLAG_FALSE;
4682 	} else if (flag) {
4683 		/* Process the completed/timed out commands */
4684 		uhci_process_submitted_td_queue(uhcip);
4685 	}
4686 
4687 	/* Re-register the control/bulk/intr commands' timeout handler */
4688 	if (uhcip->uhci_cmd_timeout_id) {
4689 		uhcip->uhci_cmd_timeout_id = timeout(uhci_cmd_timeout_hdlr,
4690 		    (void *)uhcip, UHCI_ONE_SECOND);
4691 	}
4692 
4693 	mutex_exit(&uhcip->uhci_int_mutex);
4694 }
4695 
4696 
4697 /*
4698  * uhci_wait_for_sof:
4699  *	Wait for the start of the next frame (implying any changes made in the
4700  *	lattice have now taken effect).
4701  *	To be sure this is the case, we wait for the completion of the current
4702  *	frame (which might have already been pending), then another complete
4703  *	frame to ensure everything has taken effect.
4704  */
4705 int
4706 uhci_wait_for_sof(uhci_state_t *uhcip)
4707 {
4708 	int	n;
4709 	ushort_t    cmd_reg;
4710 	usb_frame_number_t	before_frame_number, after_frame_number;
4711 	clock_t	time, rval;
4712 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
4713 	    "uhci_wait_for_sof: uhcip = %p", uhcip);
4714 
4715 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4716 	before_frame_number =  uhci_get_sw_frame_number(uhcip);
4717 	for (n = 0; n < MAX_SOF_WAIT_COUNT; n++) {
4718 		SetTD_ioc(uhcip, uhcip->uhci_sof_td, 1);
4719 		uhcip->uhci_cv_signal = B_TRUE;
4720 
4721 		time = ddi_get_lbolt() + UHCI_ONE_SECOND;
4722 		rval = cv_timedwait(&uhcip->uhci_cv_SOF,
4723 			&uhcip->uhci_int_mutex, time);
4724 
4725 		after_frame_number = uhci_get_sw_frame_number(uhcip);
4726 		if ((rval == -1) &&
4727 		    (after_frame_number <= before_frame_number)) {
4728 			cmd_reg = Get_OpReg16(USBCMD);
4729 			Set_OpReg16(USBCMD, (cmd_reg | USBCMD_REG_HC_RUN));
4730 			Set_OpReg16(USBINTR, ENABLE_ALL_INTRS);
4731 			after_frame_number = uhci_get_sw_frame_number(uhcip);
4732 		}
4733 		before_frame_number = after_frame_number;
4734 	}
4735 
4736 	SetTD_ioc(uhcip, uhcip->uhci_sof_td, 0);
4737 
4738 	return (uhcip->uhci_cv_signal ? USB_FAILURE : USB_SUCCESS);
4739 
4740 }
4741 
4742 /*
4743  * uhci_allocate_periodic_in_resource:
4744  *	Allocate interrupt/isochronous request structure for the
4745  *	interrupt/isochronous IN transfer.
4746  */
4747 int
4748 uhci_allocate_periodic_in_resource(
4749 	uhci_state_t		*uhcip,
4750 	uhci_pipe_private_t	*pp,
4751 	uhci_trans_wrapper_t	*tw,
4752 	usb_flags_t		flags)
4753 {
4754 	size_t			length = 0;
4755 	usb_opaque_t		client_periodic_in_reqp;
4756 	usb_intr_req_t		*cur_intr_req;
4757 	usb_isoc_req_t		*curr_isoc_reqp;
4758 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
4759 
4760 	USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl,
4761 	    "uhci_allocate_periodic_in_resource:\n\t"
4762 	    "ph = 0x%p, pp = 0x%p, tw = 0x%p, flags = 0x%x", ph, pp, tw, flags);
4763 
4764 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4765 
4766 	/* Check the current periodic in request pointer */
4767 	if (tw->tw_curr_xfer_reqp) {
4768 		USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl,
4769 		    "uhci_allocate_periodic_in_resource: Interrupt "
4770 		    "request structure already exists: "
4771 		    "allocation failed");
4772 
4773 		return (USB_SUCCESS);
4774 	}
4775 
4776 	/* Get the client periodic in request pointer */
4777 	client_periodic_in_reqp = pp->pp_client_periodic_in_reqp;
4778 
4779 	/*
4780 	 * If it a periodic IN request and periodic request is NULL,
4781 	 * allocate corresponding usb periodic IN request for the
4782 	 * current periodic polling request and copy the information
4783 	 * from the saved periodic request structure.
4784 	 */
4785 	if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_INTR) {
4786 		/* Get the interrupt transfer length */
4787 		length = ((usb_intr_req_t *)client_periodic_in_reqp)->
4788 				intr_len;
4789 
4790 		cur_intr_req = usba_hcdi_dup_intr_req(ph->p_dip,
4791 		    (usb_intr_req_t *)client_periodic_in_reqp, length, flags);
4792 		if (cur_intr_req == NULL) {
4793 			USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl,
4794 			    "uhci_allocate_periodic_in_resource: Interrupt "
4795 			    "request structure allocation failed");
4796 
4797 			return (USB_NO_RESOURCES);
4798 		}
4799 
4800 		/* Check and save the timeout value */
4801 		tw->tw_timeout_cnt = (cur_intr_req->intr_attributes &
4802 		    USB_ATTRS_ONE_XFER) ? cur_intr_req->intr_timeout: 0;
4803 		tw->tw_curr_xfer_reqp = (usb_opaque_t)cur_intr_req;
4804 		tw->tw_length = cur_intr_req->intr_len;
4805 	} else {
4806 		ASSERT(client_periodic_in_reqp != NULL);
4807 
4808 		if ((curr_isoc_reqp = usba_hcdi_dup_isoc_req(ph->p_dip,
4809 		    (usb_isoc_req_t *)client_periodic_in_reqp, flags)) ==
4810 		    NULL) {
4811 			USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl,
4812 			    "uhci_allocate_periodic_in_resource: Isochronous "
4813 			    "request structure allocation failed");
4814 
4815 			return (USB_NO_RESOURCES);
4816 		}
4817 
4818 		/*
4819 		 * Save the client's isochronous request pointer and
4820 		 * length of isochronous transfer in transfer wrapper.
4821 		 * The dup'ed request is saved in pp_client_periodic_in_reqp
4822 		 */
4823 		tw->tw_curr_xfer_reqp =
4824 				(usb_opaque_t)pp->pp_client_periodic_in_reqp;
4825 		pp->pp_client_periodic_in_reqp = (usb_opaque_t)curr_isoc_reqp;
4826 		tw->tw_length = curr_isoc_reqp->isoc_pkts_length;
4827 	}
4828 
4829 	mutex_enter(&ph->p_mutex);
4830 	ph->p_req_count++;
4831 	mutex_exit(&ph->p_mutex);
4832 
4833 	return (USB_SUCCESS);
4834 }
4835 
4836 
4837 /*
4838  * uhci_deallocate_periodic_in_resource:
4839  *	Deallocate interrupt/isochronous request structure for the
4840  *	interrupt/isochronous IN transfer.
4841  */
4842 void
4843 uhci_deallocate_periodic_in_resource(
4844 	uhci_state_t		*uhcip,
4845 	uhci_pipe_private_t	*pp,
4846 	uhci_trans_wrapper_t	*tw)
4847 {
4848 	usb_opaque_t		curr_xfer_reqp;
4849 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
4850 
4851 	USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl,
4852 	    "uhci_deallocate_periodic_in_resource: "
4853 	    "pp = 0x%p tw = 0x%p", pp, tw);
4854 
4855 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4856 
4857 	curr_xfer_reqp = tw->tw_curr_xfer_reqp;
4858 	if (curr_xfer_reqp) {
4859 		/*
4860 		 * Reset periodic in request usb isoch
4861 		 * packet request pointers to null.
4862 		 */
4863 		tw->tw_curr_xfer_reqp = NULL;
4864 		tw->tw_isoc_req = NULL;
4865 
4866 		mutex_enter(&ph->p_mutex);
4867 		ph->p_req_count--;
4868 		mutex_exit(&ph->p_mutex);
4869 
4870 		/*
4871 		 * Free pre-allocated interrupt or isochronous requests.
4872 		 */
4873 		switch (UHCI_XFER_TYPE(&ph->p_ep)) {
4874 		case USB_EP_ATTR_INTR:
4875 			usb_free_intr_req((usb_intr_req_t *)curr_xfer_reqp);
4876 			break;
4877 		case USB_EP_ATTR_ISOCH:
4878 			usb_free_isoc_req((usb_isoc_req_t *)curr_xfer_reqp);
4879 			break;
4880 		}
4881 	}
4882 }
4883 
4884 
4885 /*
4886  * uhci_hcdi_callback()
4887  *	convenience wrapper around usba_hcdi_callback()
4888  */
4889 void
4890 uhci_hcdi_callback(uhci_state_t *uhcip, uhci_pipe_private_t *pp,
4891     usba_pipe_handle_data_t *ph, uhci_trans_wrapper_t *tw, usb_cr_t cr)
4892 {
4893 	usb_opaque_t	curr_xfer_reqp;
4894 
4895 	USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl,
4896 	    "uhci_hcdi_callback: ph = 0x%p, tw = 0x%p, cr = 0x%x", ph, tw, cr);
4897 
4898 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4899 
4900 	if (tw && tw->tw_curr_xfer_reqp) {
4901 		curr_xfer_reqp = tw->tw_curr_xfer_reqp;
4902 		tw->tw_curr_xfer_reqp = NULL;
4903 		tw->tw_isoc_req = NULL;
4904 	} else {
4905 		ASSERT(pp->pp_client_periodic_in_reqp != NULL);
4906 
4907 		curr_xfer_reqp = pp->pp_client_periodic_in_reqp;
4908 		pp->pp_client_periodic_in_reqp = NULL;
4909 	}
4910 
4911 	ASSERT(curr_xfer_reqp != NULL);
4912 
4913 	mutex_exit(&uhcip->uhci_int_mutex);
4914 	usba_hcdi_cb(ph, curr_xfer_reqp, cr);
4915 	mutex_enter(&uhcip->uhci_int_mutex);
4916 }
4917 
4918 
4919 #ifdef DEBUG
4920 static void
4921 uhci_print_td(uhci_state_t *uhcip, uhci_td_t *td)
4922 {
4923 	uint_t	*ptr = (uint_t *)td;
4924 
4925 #ifndef lint
4926 	_NOTE(NO_COMPETING_THREADS_NOW);
4927 #endif
4928 	USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl,
4929 	    "\tDWORD 1 0x%x\t DWORD 2 0x%x", ptr[0], ptr[1]);
4930 	USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl,
4931 	    "\tDWORD 3 0x%x\t DWORD 4 0x%x", ptr[2], ptr[3]);
4932 	USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl,
4933 	    "\tBytes xfered    = %d", td->tw->tw_bytes_xfered);
4934 	USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl,
4935 	    "\tBytes Pending   = %d", td->tw->tw_bytes_pending);
4936 	USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl,
4937 	    "Queue Head Details:");
4938 	uhci_print_qh(uhcip, td->tw->tw_pipe_private->pp_qh);
4939 
4940 #ifndef lint
4941 	_NOTE(COMPETING_THREADS_NOW);
4942 #endif
4943 }
4944 
4945 
4946 static void
4947 uhci_print_qh(uhci_state_t *uhcip, queue_head_t *qh)
4948 {
4949 	uint_t	*ptr = (uint_t *)qh;
4950 
4951 	USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl,
4952 	    "\tLink Ptr = %x Element Ptr = %x", ptr[0], ptr[1]);
4953 }
4954 #endif
4955