xref: /illumos-gate/usr/src/uts/common/io/usb/hcd/uhci/uhciutil.c (revision c211fc479225fa54805cf480633bf6689ca9a2db)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 
27 /*
28  * Universal Host Controller Driver (UHCI)
29  *
30  * The UHCI driver is a driver which interfaces to the Universal
31  * Serial Bus Driver (USBA) and the Host Controller (HC). The interface to
32  * the Host Controller is defined by the UHCI.
33  * This file contains misc functions.
34  */
35 #include <sys/usb/hcd/uhci/uhcid.h>
36 #include <sys/usb/hcd/uhci/uhciutil.h>
37 #include <sys/usb/hcd/uhci/uhcipolled.h>
38 
39 #include <sys/disp.h>
40 
41 /* Globals */
42 extern uint_t	uhci_td_pool_size;			/* Num TDs */
43 extern uint_t	uhci_qh_pool_size;			/* Num QHs */
44 extern ushort_t	uhci_tree_bottom_nodes[];
45 extern void	*uhci_statep;
46 
47 /* function prototypes */
48 static void	uhci_build_interrupt_lattice(uhci_state_t *uhcip);
49 static int	uhci_init_frame_lst_table(dev_info_t *dip, uhci_state_t *uhcip);
50 
51 static uint_t	uhci_lattice_height(uint_t bandwidth);
52 static uint_t	uhci_lattice_parent(uint_t node);
53 static uint_t	uhci_leftmost_leaf(uint_t node, uint_t height);
54 static uint_t	uhci_compute_total_bandwidth(usb_ep_descr_t *endpoint,
55 		    usb_port_status_t port_status);
56 
57 static int	uhci_bandwidth_adjust(uhci_state_t *uhcip,
58 		    usb_ep_descr_t *endpoint, usb_port_status_t port_status);
59 
60 static uhci_td_t *uhci_allocate_td_from_pool(uhci_state_t *uhcip);
61 static void	uhci_fill_in_td(uhci_state_t *uhcip,
62 		    uhci_td_t *td, uhci_td_t *current_dummy,
63 		    uint32_t buffer_offset, size_t length,
64 		    uhci_pipe_private_t	*pp, uchar_t PID,
65 		    usb_req_attrs_t attrs, uhci_trans_wrapper_t *tw);
66 static uint32_t	uhci_get_tw_paddr_by_offs(uhci_state_t *uhcip,
67 		    uint32_t buffer_offset, size_t length,
68 		    uhci_trans_wrapper_t *tw);
69 static uhci_trans_wrapper_t *uhci_create_transfer_wrapper(
70 		    uhci_state_t *uhcip, uhci_pipe_private_t *pp,
71 		    size_t length, usb_flags_t usb_flags);
72 static uhci_trans_wrapper_t *uhci_create_isoc_transfer_wrapper(
73 		    uhci_state_t *uhcip, uhci_pipe_private_t *pp,
74 		    usb_isoc_req_t *req, size_t length,
75 		    usb_flags_t usb_flags);
76 
77 static int	uhci_create_setup_pkt(uhci_state_t *uhcip,
78 		    uhci_pipe_private_t	*pp, uhci_trans_wrapper_t *tw);
79 static void	uhci_insert_ctrl_qh(uhci_state_t *uhcip,
80 		    uhci_pipe_private_t *pp);
81 static void	uhci_remove_ctrl_qh(uhci_state_t *uhcip,
82 		    uhci_pipe_private_t *pp);
83 static void	uhci_insert_intr_qh(uhci_state_t *uhcip,
84 		    uhci_pipe_private_t *pp);
85 static void	uhci_remove_intr_qh(uhci_state_t *uhcip,
86 		    uhci_pipe_private_t *pp);
87 static void	uhci_remove_bulk_qh(uhci_state_t *uhcip,
88 		    uhci_pipe_private_t *pp);
89 static void	uhci_insert_bulk_qh(uhci_state_t *uhcip,
90 		    uhci_pipe_private_t *pp);
91 static void	uhci_handle_bulk_td_errors(uhci_state_t *uhcip, uhci_td_t *td);
92 static int	uhci_alloc_memory_for_tds(uhci_state_t *uhcip, uint_t num_tds,
93 		    uhci_bulk_isoc_xfer_t *info);
94 static int	uhci_alloc_bulk_isoc_tds(uhci_state_t *uhcip, uint_t num_tds,
95 		    uhci_bulk_isoc_xfer_t *info);
96 static void	uhci_get_isoc_td_by_index(uhci_state_t *uhcip,
97 		    uhci_bulk_isoc_xfer_t *info, uint_t index,
98 		    uhci_td_t **tdpp, uhci_bulk_isoc_td_pool_t **td_pool_pp);
99 static void	uhci_get_bulk_td_by_paddr(uhci_state_t *uhcip,
100 		    uhci_bulk_isoc_xfer_t *info, uint32_t paddr,
101 		    uhci_bulk_isoc_td_pool_t **td_pool_pp);
102 
103 static	int	uhci_handle_isoc_receive(uhci_state_t *uhcip,
104 		uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw);
105 static void	uhci_delete_isoc_td(uhci_state_t *uhcip,
106 		    uhci_td_t *td);
107 #ifdef DEBUG
108 static void	uhci_print_td(uhci_state_t *uhcip, uhci_td_t *td);
109 static void	uhci_print_qh(uhci_state_t *uhcip, queue_head_t *qh);
110 #endif
111 
112 
113 /*
114  * uhci_build_interrupt_lattice:
115  *
116  * Construct the interrupt lattice tree using static Queue Head pointers.
117  * This interrupt lattice tree will have total of 63 queue heads and the
118  * Host Controller (HC) processes queue heads every frame.
119  */
120 static void
121 uhci_build_interrupt_lattice(uhci_state_t *uhcip)
122 {
123 	int			half_list = NUM_INTR_QH_LISTS / 2;
124 	uint16_t		i, j, k;
125 	uhci_td_t		*sof_td, *isoc_td;
126 	uintptr_t		addr;
127 	queue_head_t		*list_array = uhcip->uhci_qh_pool_addr;
128 	queue_head_t		*tmp_qh;
129 	frame_lst_table_t	*frame_lst_tablep =
130 	    uhcip->uhci_frame_lst_tablep;
131 
132 	USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
133 	    "uhci_build_interrupt_lattice:");
134 
135 	/*
136 	 * Reserve the first 63 queue head structures in the pool as static
137 	 * queue heads & these are required for constructing interrupt
138 	 * lattice tree.
139 	 */
140 	for (i = 0; i < NUM_INTR_QH_LISTS; i++) {
141 		SetQH32(uhcip, list_array[i].link_ptr, HC_END_OF_LIST);
142 		SetQH32(uhcip, list_array[i].element_ptr, HC_END_OF_LIST);
143 		list_array[i].qh_flag		= QUEUE_HEAD_FLAG_STATIC;
144 		list_array[i].node		= i;
145 	}
146 
147 	/* Build the interrupt lattice tree */
148 	for (i = 0; i < half_list - 1; i++) {
149 		/*
150 		 * The next  pointer in the host controller  queue head
151 		 * descriptor must contain an iommu address. Calculate
152 		 * the offset into the cpu address and add this to the
153 		 * starting iommu address.
154 		 */
155 		addr = QH_PADDR(&list_array[i]) | HC_QUEUE_HEAD;
156 
157 		SetQH32(uhcip, list_array[2*i + 1].link_ptr, addr);
158 		SetQH32(uhcip, list_array[2*i + 2].link_ptr, addr);
159 	}
160 
161 	/*
162 	 * Initialize the interrupt list in the Frame list Table
163 	 * so that it points to the bottom of the tree.
164 	 */
165 	for (i = 0, j = 0; i < pow_2(TREE_HEIGHT); i++) {
166 		addr = QH_PADDR(&list_array[half_list + i - 1]);
167 		for (k = 0; k <  pow_2(VIRTUAL_TREE_HEIGHT); k++) {
168 			SetFL32(uhcip,
169 			    frame_lst_tablep[uhci_tree_bottom_nodes[j++]],
170 			    addr | HC_QUEUE_HEAD);
171 		}
172 	}
173 
174 	/*
175 	 *  Create a controller and bulk Queue heads
176 	 */
177 	uhcip->uhci_ctrl_xfers_q_head = uhci_alloc_queue_head(uhcip);
178 	tmp_qh = uhcip->uhci_ctrl_xfers_q_tail = uhcip->uhci_ctrl_xfers_q_head;
179 
180 	SetQH32(uhcip, list_array[0].link_ptr,
181 	    (QH_PADDR(tmp_qh) | HC_QUEUE_HEAD));
182 
183 	uhcip->uhci_bulk_xfers_q_head = uhci_alloc_queue_head(uhcip);
184 	uhcip->uhci_bulk_xfers_q_tail = uhcip->uhci_bulk_xfers_q_head;
185 	SetQH32(uhcip, tmp_qh->link_ptr,
186 	    (QH_PADDR(uhcip->uhci_bulk_xfers_q_head)|HC_QUEUE_HEAD));
187 
188 	SetQH32(uhcip, uhcip->uhci_bulk_xfers_q_head->link_ptr, HC_END_OF_LIST);
189 
190 	/*
191 	 * Add a dummy TD to the static queue head 0. THis is used
192 	 * to generate an at the end of frame.
193 	 */
194 	sof_td = uhci_allocate_td_from_pool(uhcip);
195 
196 	SetQH32(uhcip, list_array[0].element_ptr,
197 	    TD_PADDR(sof_td) | HC_TD_HEAD);
198 	SetTD32(uhcip, sof_td->link_ptr, HC_END_OF_LIST);
199 	uhcip->uhci_sof_td = sof_td;
200 
201 	/*
202 	 * Add a dummy td that is used to generate an interrupt for
203 	 * every 1024 frames.
204 	 */
205 	isoc_td = uhci_allocate_td_from_pool(uhcip);
206 	SetTD32(uhcip, isoc_td->link_ptr, HC_END_OF_LIST);
207 	uhcip->uhci_isoc_td = isoc_td;
208 
209 	uhcip->uhci_isoc_qh = uhci_alloc_queue_head(uhcip);
210 	SetQH32(uhcip, uhcip->uhci_isoc_qh->link_ptr,
211 	    GetFL32(uhcip, uhcip->uhci_frame_lst_tablep[MAX_FRAME_NUM]));
212 	SetQH32(uhcip, uhcip->uhci_isoc_qh->element_ptr, TD_PADDR(isoc_td));
213 	SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[MAX_FRAME_NUM],
214 	    QH_PADDR(uhcip->uhci_isoc_qh) | HC_QUEUE_HEAD);
215 }
216 
217 
218 /*
219  * uhci_allocate_pools:
220  *	Allocate the system memory for the Queue Heads Descriptor and
221  *	for the Transfer Descriptor (TD) pools. Both QH and TD structures
222  *	must be aligned to a 16 byte boundary.
223  */
224 int
225 uhci_allocate_pools(uhci_state_t *uhcip)
226 {
227 	dev_info_t		*dip = uhcip->uhci_dip;
228 	size_t			real_length;
229 	int			i, result;
230 	uint_t			ccount;
231 	ddi_device_acc_attr_t	dev_attr;
232 
233 	USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
234 	    "uhci_allocate_pools:");
235 
236 	/* The host controller will be little endian */
237 	dev_attr.devacc_attr_version		= DDI_DEVICE_ATTR_V0;
238 	dev_attr.devacc_attr_endian_flags	= DDI_STRUCTURE_LE_ACC;
239 	dev_attr.devacc_attr_dataorder		= DDI_STRICTORDER_ACC;
240 
241 	/* Allocate the TD pool DMA handle */
242 	if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 0,
243 	    &uhcip->uhci_td_pool_dma_handle) != DDI_SUCCESS) {
244 
245 		return (USB_FAILURE);
246 	}
247 
248 	/* Allocate the memory for the TD pool */
249 	if (ddi_dma_mem_alloc(uhcip->uhci_td_pool_dma_handle,
250 	    uhci_td_pool_size * sizeof (uhci_td_t),
251 	    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
252 	    (caddr_t *)&uhcip->uhci_td_pool_addr, &real_length,
253 	    &uhcip->uhci_td_pool_mem_handle)) {
254 
255 		return (USB_FAILURE);
256 	}
257 
258 	/* Map the TD pool into the I/O address space */
259 	result = ddi_dma_addr_bind_handle(uhcip->uhci_td_pool_dma_handle,
260 	    NULL, (caddr_t)uhcip->uhci_td_pool_addr, real_length,
261 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
262 	    NULL, &uhcip->uhci_td_pool_cookie, &ccount);
263 
264 	bzero((void *)uhcip->uhci_td_pool_addr,
265 	    uhci_td_pool_size * sizeof (uhci_td_t));
266 
267 	/* Process the result */
268 	if (result == DDI_DMA_MAPPED) {
269 		/* The cookie count should be 1 */
270 		if (ccount != 1) {
271 			USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
272 			    "uhci_allocate_pools: More than 1 cookie");
273 
274 			return (USB_FAILURE);
275 		}
276 	} else {
277 		USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
278 		    "uhci_allocate_pools: Result = %d", result);
279 
280 		uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result);
281 
282 		return (USB_FAILURE);
283 	}
284 
285 	uhcip->uhci_dma_addr_bind_flag |= UHCI_TD_POOL_BOUND;
286 
287 	/* Initialize the TD pool */
288 	for (i = 0; i < uhci_td_pool_size; i++) {
289 		uhcip->uhci_td_pool_addr[i].flag = TD_FLAG_FREE;
290 	}
291 
292 	/* Allocate the TD pool DMA handle */
293 	if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP,
294 	    0, &uhcip->uhci_qh_pool_dma_handle) != DDI_SUCCESS) {
295 
296 		return (USB_FAILURE);
297 	}
298 
299 	/* Allocate the memory for the QH pool */
300 	if (ddi_dma_mem_alloc(uhcip->uhci_qh_pool_dma_handle,
301 	    uhci_qh_pool_size * sizeof (queue_head_t),
302 	    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
303 	    (caddr_t *)&uhcip->uhci_qh_pool_addr, &real_length,
304 	    &uhcip->uhci_qh_pool_mem_handle) != DDI_SUCCESS) {
305 
306 		return (USB_FAILURE);
307 	}
308 
309 	result = ddi_dma_addr_bind_handle(uhcip->uhci_qh_pool_dma_handle,
310 	    NULL, (caddr_t)uhcip->uhci_qh_pool_addr, real_length,
311 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
312 	    &uhcip->uhci_qh_pool_cookie, &ccount);
313 
314 	/* Process the result */
315 	if (result == DDI_DMA_MAPPED) {
316 		/* The cookie count should be 1 */
317 		if (ccount != 1) {
318 			USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
319 			    "uhci_allocate_pools: More than 1 cookie");
320 
321 			return (USB_FAILURE);
322 		}
323 	} else {
324 		uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result);
325 
326 		return (USB_FAILURE);
327 	}
328 
329 	uhcip->uhci_dma_addr_bind_flag |= UHCI_QH_POOL_BOUND;
330 
331 	bzero((void *)uhcip->uhci_qh_pool_addr,
332 	    uhci_qh_pool_size * sizeof (queue_head_t));
333 
334 	/* Initialize the QH pool */
335 	for (i = 0; i < uhci_qh_pool_size; i ++) {
336 		uhcip->uhci_qh_pool_addr[i].qh_flag = QUEUE_HEAD_FLAG_FREE;
337 	}
338 
339 	USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
340 	    "uhci_allocate_pools: Completed");
341 
342 	return (USB_SUCCESS);
343 }
344 
345 
346 /*
347  * uhci_free_pools:
348  *	Cleanup on attach failure or detach
349  */
350 void
351 uhci_free_pools(uhci_state_t *uhcip)
352 {
353 	int			i, flag, rval;
354 	uhci_td_t		*td;
355 	uhci_trans_wrapper_t	*tw;
356 
357 	USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
358 	    "uhci_free_pools:");
359 
360 	if (uhcip->uhci_td_pool_addr && uhcip->uhci_td_pool_mem_handle) {
361 		for (i = 0; i < uhci_td_pool_size; i ++) {
362 			td = &uhcip->uhci_td_pool_addr[i];
363 
364 			flag = uhcip->uhci_td_pool_addr[i].flag;
365 			if ((flag != TD_FLAG_FREE) &&
366 			    (flag != TD_FLAG_DUMMY) && (td->tw != NULL)) {
367 				tw = td->tw;
368 				uhci_free_tw(uhcip, tw);
369 			}
370 
371 		}
372 
373 		if (uhcip->uhci_dma_addr_bind_flag & UHCI_TD_POOL_BOUND) {
374 			rval = ddi_dma_unbind_handle(
375 			    uhcip->uhci_td_pool_dma_handle);
376 			ASSERT(rval == DDI_SUCCESS);
377 		}
378 
379 		ddi_dma_mem_free(&uhcip->uhci_td_pool_mem_handle);
380 	}
381 
382 	/* Free the TD pool */
383 	if (uhcip->uhci_td_pool_dma_handle) {
384 		ddi_dma_free_handle(&uhcip->uhci_td_pool_dma_handle);
385 	}
386 
387 	if (uhcip->uhci_qh_pool_addr && uhcip->uhci_qh_pool_mem_handle) {
388 		if (uhcip->uhci_dma_addr_bind_flag & UHCI_QH_POOL_BOUND) {
389 			rval = ddi_dma_unbind_handle(
390 			    uhcip->uhci_qh_pool_dma_handle);
391 			ASSERT(rval == DDI_SUCCESS);
392 		}
393 		ddi_dma_mem_free(&uhcip->uhci_qh_pool_mem_handle);
394 	}
395 
396 	/* Free the QH pool */
397 	if (uhcip->uhci_qh_pool_dma_handle) {
398 		ddi_dma_free_handle(&uhcip->uhci_qh_pool_dma_handle);
399 	}
400 
401 	/* Free the Frame list Table area */
402 	if (uhcip->uhci_frame_lst_tablep && uhcip->uhci_flt_mem_handle) {
403 		if (uhcip->uhci_dma_addr_bind_flag & UHCI_FLA_POOL_BOUND) {
404 			rval = ddi_dma_unbind_handle(
405 			    uhcip->uhci_flt_dma_handle);
406 			ASSERT(rval == DDI_SUCCESS);
407 		}
408 		ddi_dma_mem_free(&uhcip->uhci_flt_mem_handle);
409 	}
410 
411 	if (uhcip->uhci_flt_dma_handle) {
412 		ddi_dma_free_handle(&uhcip->uhci_flt_dma_handle);
413 	}
414 }
415 
416 
417 /*
418  * uhci_decode_ddi_dma_addr_bind_handle_result:
419  *	Process the return values of ddi_dma_addr_bind_handle()
420  */
421 void
422 uhci_decode_ddi_dma_addr_bind_handle_result(uhci_state_t *uhcip, int result)
423 {
424 	char *msg;
425 
426 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
427 	    "uhci_decode_ddi_dma_addr_bind_handle_result:");
428 
429 	switch (result) {
430 	case DDI_DMA_PARTIAL_MAP:
431 		msg = "Partial transfers not allowed";
432 		break;
433 	case DDI_DMA_INUSE:
434 		msg = "Handle is in use";
435 		break;
436 	case DDI_DMA_NORESOURCES:
437 		msg = "No resources";
438 		break;
439 	case DDI_DMA_NOMAPPING:
440 		msg = "No mapping";
441 		break;
442 	case DDI_DMA_TOOBIG:
443 		msg = "Object is too big";
444 		break;
445 	default:
446 		msg = "Unknown dma error";
447 	}
448 
449 	USB_DPRINTF_L4(PRINT_MASK_ALL, uhcip->uhci_log_hdl, "%s", msg);
450 }
451 
452 
453 /*
454  * uhci_init_ctlr:
455  *	Initialize the Host Controller (HC).
456  */
457 int
458 uhci_init_ctlr(uhci_state_t *uhcip)
459 {
460 	dev_info_t *dip = uhcip->uhci_dip;
461 	uint_t	cmd_reg;
462 	uint_t	frame_base_addr;
463 
464 	mutex_enter(&uhcip->uhci_int_mutex);
465 
466 	USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, "uhci_init_ctlr:");
467 
468 	/*
469 	 * When USB legacy mode is enabled, the BIOS manages the USB keyboard
470 	 * attached to the UHCI controller. It has been observed that some
471 	 * times the BIOS does not clear the interrupts in the legacy mode
472 	 * register in the PCI configuration space. So, disable the SMI intrs
473 	 * and route the intrs to PIRQD here.
474 	 */
475 	pci_config_put16(uhcip->uhci_config_handle,
476 	    LEGACYMODE_REG_OFFSET, LEGACYMODE_REG_INIT_VALUE);
477 
478 	/*
479 	 * Disable all the interrupts.
480 	 */
481 	Set_OpReg16(USBINTR, DISABLE_ALL_INTRS);
482 
483 	cmd_reg = Get_OpReg16(USBCMD);
484 	cmd_reg &= (~USBCMD_REG_HC_RUN);
485 
486 	/* Stop the controller */
487 	Set_OpReg16(USBCMD, cmd_reg);
488 
489 	/* Reset the host controller */
490 	Set_OpReg16(USBCMD, USBCMD_REG_GBL_RESET);
491 
492 	/* Wait 10ms for reset to complete */
493 	mutex_exit(&uhcip->uhci_int_mutex);
494 	delay(drv_usectohz(UHCI_RESET_DELAY));
495 	mutex_enter(&uhcip->uhci_int_mutex);
496 
497 	Set_OpReg16(USBCMD, 0);
498 
499 	/* Set the frame number to zero */
500 	Set_OpReg16(FRNUM, 0);
501 
502 	if (uhcip->uhci_hc_soft_state == UHCI_CTLR_INIT_STATE) {
503 		/* Initialize the Frame list base address area */
504 		if (uhci_init_frame_lst_table(dip, uhcip) != USB_SUCCESS) {
505 			mutex_exit(&uhcip->uhci_int_mutex);
506 
507 			return (USB_FAILURE);
508 		}
509 	}
510 
511 	/* Save the contents of the Frame Interval Registers */
512 	uhcip->uhci_frame_interval = Get_OpReg8(SOFMOD);
513 
514 	frame_base_addr = uhcip->uhci_flt_cookie.dmac_address;
515 
516 	/* Set the Frame list base address */
517 	Set_OpReg32(FRBASEADD, frame_base_addr);
518 
519 	/*
520 	 * Begin sending SOFs
521 	 * Set the Host Controller Functional State to Operational
522 	 */
523 	cmd_reg = Get_OpReg16(USBCMD);
524 	cmd_reg |= (USBCMD_REG_HC_RUN | USBCMD_REG_MAXPKT_64 |
525 	    USBCMD_REG_CONFIG_FLAG);
526 
527 	Set_OpReg16(USBCMD, cmd_reg);
528 
529 	/*
530 	 * Verify the Command and interrupt enable registers,
531 	 * a sanity check whether actually initialized or not
532 	 */
533 	cmd_reg = Get_OpReg16(USBCMD);
534 
535 	if (!(cmd_reg & (USBCMD_REG_HC_RUN | USBCMD_REG_MAXPKT_64 |
536 	    USBCMD_REG_CONFIG_FLAG))) {
537 		USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
538 		    "uhci_init_ctlr: Controller initialization failed");
539 		mutex_exit(&uhcip->uhci_int_mutex);
540 
541 		return (USB_FAILURE);
542 	}
543 
544 	/*
545 	 * Set the ioc bit of the isoc intr td. This enables
546 	 * the generation of an interrupt for every 1024 frames.
547 	 */
548 	SetTD_ioc(uhcip, uhcip->uhci_isoc_td, 1);
549 
550 	/* Set host controller soft state to operational */
551 	uhcip->uhci_hc_soft_state = UHCI_CTLR_OPERATIONAL_STATE;
552 	mutex_exit(&uhcip->uhci_int_mutex);
553 
554 	USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
555 	    "uhci_init_ctlr: Completed");
556 
557 	return (USB_SUCCESS);
558 }
559 
560 
561 /*
562  * uhci_uninit_ctlr:
563  *	uninitialize the Host Controller (HC).
564  */
565 void
566 uhci_uninit_ctlr(uhci_state_t *uhcip)
567 {
568 	if (uhcip->uhci_regs_handle) {
569 		/* Disable all the interrupts. */
570 		Set_OpReg16(USBINTR, DISABLE_ALL_INTRS);
571 
572 		/* Complete the current transaction and then halt. */
573 		Set_OpReg16(USBCMD, 0);
574 
575 		/* Wait for sometime */
576 		mutex_exit(&uhcip->uhci_int_mutex);
577 		delay(drv_usectohz(UHCI_TIMEWAIT));
578 		mutex_enter(&uhcip->uhci_int_mutex);
579 	}
580 }
581 
582 
583 /*
584  * uhci_map_regs:
585  *	The Host Controller (HC) contains a set of on-chip operational
586  *	registers and which should be mapped into a non-cacheable
587  *	portion of the system addressable space.
588  */
589 int
590 uhci_map_regs(uhci_state_t *uhcip)
591 {
592 	dev_info_t		*dip = uhcip->uhci_dip;
593 	int			index;
594 	uint32_t		regs_prop_len;
595 	int32_t			*regs_list;
596 	uint16_t		command_reg;
597 	ddi_device_acc_attr_t	attr;
598 
599 	USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl, "uhci_map_regs:");
600 
601 	/* The host controller will be little endian */
602 	attr.devacc_attr_version	= DDI_DEVICE_ATTR_V0;
603 	attr.devacc_attr_endian_flags	= DDI_STRUCTURE_LE_ACC;
604 	attr.devacc_attr_dataorder	= DDI_STRICTORDER_ACC;
605 
606 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, uhcip->uhci_dip,
607 	    DDI_PROP_DONTPASS, "reg", &regs_list, &regs_prop_len) !=
608 	    DDI_PROP_SUCCESS) {
609 
610 		return (USB_FAILURE);
611 	}
612 
613 	for (index = 0; index * 5 < regs_prop_len; index++) {
614 		if (regs_list[index * 5] & UHCI_PROP_MASK) {
615 			break;
616 		}
617 	}
618 
619 	/*
620 	 * Deallocate the memory allocated by the ddi_prop_lookup_int_array
621 	 */
622 	ddi_prop_free(regs_list);
623 
624 	if (index * 5 >= regs_prop_len) {
625 
626 		return (USB_FAILURE);
627 	}
628 
629 	/* Map in operational registers */
630 	if (ddi_regs_map_setup(dip, index, (caddr_t *)&uhcip->uhci_regsp,
631 	    0, sizeof (hc_regs_t), &attr, &uhcip->uhci_regs_handle) !=
632 	    DDI_SUCCESS) {
633 		USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
634 		    "ddi_regs_map_setup: failed");
635 
636 		return (USB_FAILURE);
637 	}
638 
639 	if (pci_config_setup(dip, &uhcip->uhci_config_handle) != DDI_SUCCESS) {
640 		USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
641 		    "uhci_map_regs: Config error");
642 
643 		return (USB_FAILURE);
644 	}
645 
646 	/* Make sure Memory Access Enable and Master Enable are set */
647 	command_reg = pci_config_get16(uhcip->uhci_config_handle,
648 	    PCI_CONF_COMM);
649 	if (!(command_reg & (PCI_COMM_MAE | PCI_COMM_ME))) {
650 		USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
651 		    "uhci_map_regs: No MAE/ME");
652 	}
653 
654 	command_reg |= PCI_COMM_MAE | PCI_COMM_ME;
655 	pci_config_put16(uhcip->uhci_config_handle, PCI_CONF_COMM, command_reg);
656 
657 	/*
658 	 * Check whether I/O base address is configured and enabled.
659 	 */
660 	if (!(command_reg & PCI_COMM_IO)) {
661 		USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
662 		    "I/O Base address access disabled");
663 
664 		return (USB_FAILURE);
665 	}
666 	/*
667 	 * Get the IO base address of the controller
668 	 */
669 	uhcip->uhci_iobase = (pci_config_get16(uhcip->uhci_config_handle,
670 	    PCI_CONF_IOBASE) & PCI_CONF_IOBASE_MASK);
671 
672 	USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
673 	    "uhci_map_regs: Completed");
674 
675 	return (USB_SUCCESS);
676 }
677 
678 
679 void
680 uhci_unmap_regs(uhci_state_t *uhcip)
681 {
682 	/* Unmap the UHCI registers */
683 	if (uhcip->uhci_regs_handle) {
684 		/* Reset the host controller */
685 		Set_OpReg16(USBCMD, USBCMD_REG_GBL_RESET);
686 
687 		ddi_regs_map_free(&uhcip->uhci_regs_handle);
688 	}
689 
690 	if (uhcip->uhci_config_handle) {
691 		pci_config_teardown(&uhcip->uhci_config_handle);
692 	}
693 }
694 
695 
696 /*
697  * uhci_set_dma_attributes:
698  *	Set the limits in the DMA attributes structure. Most of the values used
699  *	in the	DMA limit structres are the default values as specified by  the
700  *	Writing PCI device drivers document.
701  */
702 void
703 uhci_set_dma_attributes(uhci_state_t *uhcip)
704 {
705 	USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
706 	    "uhci_set_dma_attributes:");
707 
708 	/* Initialize the DMA attributes */
709 	uhcip->uhci_dma_attr.dma_attr_version = DMA_ATTR_V0;
710 	uhcip->uhci_dma_attr.dma_attr_addr_lo = 0x00000000ull;
711 	uhcip->uhci_dma_attr.dma_attr_addr_hi = 0xfffffff0ull;
712 
713 	/* 32 bit addressing */
714 	uhcip->uhci_dma_attr.dma_attr_count_max = 0xffffffull;
715 
716 	/*
717 	 * Setting the dam_att_align to 512, some times fails the
718 	 * binding handle. I dont know why ? But setting to 16 will
719 	 * be right for our case (16 byte alignment required per
720 	 * UHCI spec for TD descriptors).
721 	 */
722 
723 	/* 16 byte alignment */
724 	uhcip->uhci_dma_attr.dma_attr_align = 0x10;
725 
726 	/*
727 	 * Since PCI  specification is byte alignment, the
728 	 * burstsize field should be set to 1 for PCI devices.
729 	 */
730 	uhcip->uhci_dma_attr.dma_attr_burstsizes = 0x1;
731 
732 	uhcip->uhci_dma_attr.dma_attr_minxfer	= 0x1;
733 	uhcip->uhci_dma_attr.dma_attr_maxxfer	= 0xffffffull;
734 	uhcip->uhci_dma_attr.dma_attr_seg	= 0xffffffffull;
735 	uhcip->uhci_dma_attr.dma_attr_sgllen	= 1;
736 	uhcip->uhci_dma_attr.dma_attr_granular	= 1;
737 	uhcip->uhci_dma_attr.dma_attr_flags	= 0;
738 }
739 
740 
741 uint_t
742 pow_2(uint_t x)
743 {
744 	return ((x == 0) ? 1 : (1 << x));
745 }
746 
747 
748 uint_t
749 log_2(uint_t x)
750 {
751 	int ret_val = 0;
752 
753 	while (x != 1) {
754 		ret_val++;
755 		x = x >> 1;
756 	}
757 
758 	return (ret_val);
759 }
760 
761 
762 /*
763  * uhci_obtain_state:
764  */
765 uhci_state_t *
766 uhci_obtain_state(dev_info_t *dip)
767 {
768 	int instance = ddi_get_instance(dip);
769 	uhci_state_t *state = ddi_get_soft_state(uhci_statep, instance);
770 
771 	ASSERT(state != NULL);
772 
773 	return (state);
774 }
775 
776 
777 /*
778  * uhci_alloc_hcdi_ops:
779  *	The HCDI interfaces or entry points are the software interfaces used by
780  *	the Universal Serial Bus Driver  (USBA) to  access the services of the
781  *	Host Controller Driver (HCD).  During HCD initialization, inform  USBA
782  *	about all available HCDI interfaces or entry points.
783  */
784 usba_hcdi_ops_t *
785 uhci_alloc_hcdi_ops(uhci_state_t *uhcip)
786 {
787 	usba_hcdi_ops_t	*hcdi_ops;
788 
789 	USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl,
790 	    "uhci_alloc_hcdi_ops:");
791 
792 	hcdi_ops = usba_alloc_hcdi_ops();
793 
794 	hcdi_ops->usba_hcdi_ops_version = HCDI_OPS_VERSION_1;
795 
796 	hcdi_ops->usba_hcdi_pipe_open = uhci_hcdi_pipe_open;
797 	hcdi_ops->usba_hcdi_pipe_close	= uhci_hcdi_pipe_close;
798 	hcdi_ops->usba_hcdi_pipe_reset = uhci_hcdi_pipe_reset;
799 	hcdi_ops->usba_hcdi_pipe_reset_data_toggle =
800 	    uhci_hcdi_pipe_reset_data_toggle;
801 
802 	hcdi_ops->usba_hcdi_pipe_ctrl_xfer = uhci_hcdi_pipe_ctrl_xfer;
803 	hcdi_ops->usba_hcdi_pipe_bulk_xfer = uhci_hcdi_pipe_bulk_xfer;
804 	hcdi_ops->usba_hcdi_pipe_intr_xfer = uhci_hcdi_pipe_intr_xfer;
805 	hcdi_ops->usba_hcdi_pipe_isoc_xfer = uhci_hcdi_pipe_isoc_xfer;
806 
807 	hcdi_ops->usba_hcdi_bulk_transfer_size = uhci_hcdi_bulk_transfer_size;
808 	hcdi_ops->usba_hcdi_pipe_stop_intr_polling =
809 	    uhci_hcdi_pipe_stop_intr_polling;
810 	hcdi_ops->usba_hcdi_pipe_stop_isoc_polling =
811 	    uhci_hcdi_pipe_stop_isoc_polling;
812 
813 	hcdi_ops->usba_hcdi_get_current_frame_number =
814 	    uhci_hcdi_get_current_frame_number;
815 	hcdi_ops->usba_hcdi_get_max_isoc_pkts = uhci_hcdi_get_max_isoc_pkts;
816 
817 	hcdi_ops->usba_hcdi_console_input_init = uhci_hcdi_polled_input_init;
818 	hcdi_ops->usba_hcdi_console_input_enter = uhci_hcdi_polled_input_enter;
819 	hcdi_ops->usba_hcdi_console_read = uhci_hcdi_polled_read;
820 	hcdi_ops->usba_hcdi_console_input_exit = uhci_hcdi_polled_input_exit;
821 	hcdi_ops->usba_hcdi_console_input_fini = uhci_hcdi_polled_input_fini;
822 
823 	hcdi_ops->usba_hcdi_console_output_init = uhci_hcdi_polled_output_init;
824 	hcdi_ops->usba_hcdi_console_output_enter =
825 	    uhci_hcdi_polled_output_enter;
826 	hcdi_ops->usba_hcdi_console_write = uhci_hcdi_polled_write;
827 	hcdi_ops->usba_hcdi_console_output_exit = uhci_hcdi_polled_output_exit;
828 	hcdi_ops->usba_hcdi_console_output_fini = uhci_hcdi_polled_output_fini;
829 
830 	return (hcdi_ops);
831 }
832 
833 
834 /*
835  * uhci_init_frame_lst_table :
836  *	Allocate the system memory and initialize Host Controller
837  *	Frame list table area The starting of the Frame list Table
838  *	area must be 4096 byte aligned.
839  */
840 static int
841 uhci_init_frame_lst_table(dev_info_t *dip, uhci_state_t *uhcip)
842 {
843 	int			result;
844 	uint_t			ccount;
845 	size_t			real_length;
846 	ddi_device_acc_attr_t	dev_attr;
847 
848 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
849 
850 	USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
851 	    "uhci_init_frame_lst_table:");
852 
853 	/* The host controller will be little endian */
854 	dev_attr.devacc_attr_version		= DDI_DEVICE_ATTR_V0;
855 	dev_attr.devacc_attr_endian_flags	= DDI_STRUCTURE_LE_ACC;
856 	dev_attr.devacc_attr_dataorder		= DDI_STRICTORDER_ACC;
857 
858 	/* 4K alignment required */
859 	uhcip->uhci_dma_attr.dma_attr_align = 0x1000;
860 
861 	/* Create space for the HCCA block */
862 	if (ddi_dma_alloc_handle(dip, &uhcip->uhci_dma_attr, DDI_DMA_SLEEP,
863 	    0, &uhcip->uhci_flt_dma_handle) != DDI_SUCCESS) {
864 
865 		return (USB_FAILURE);
866 	}
867 
868 	/* Reset to default 16 bytes */
869 	uhcip->uhci_dma_attr.dma_attr_align = 0x10;
870 
871 	if (ddi_dma_mem_alloc(uhcip->uhci_flt_dma_handle,
872 	    SIZE_OF_FRAME_LST_TABLE, &dev_attr, DDI_DMA_CONSISTENT,
873 	    DDI_DMA_SLEEP, 0, (caddr_t *)&uhcip->uhci_frame_lst_tablep,
874 	    &real_length, &uhcip->uhci_flt_mem_handle)) {
875 
876 		return (USB_FAILURE);
877 	}
878 
879 	/* Map the whole Frame list base area into the I/O address space */
880 	result = ddi_dma_addr_bind_handle(uhcip->uhci_flt_dma_handle,
881 	    NULL, (caddr_t)uhcip->uhci_frame_lst_tablep, real_length,
882 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
883 	    &uhcip->uhci_flt_cookie, &ccount);
884 
885 	if (result == DDI_DMA_MAPPED) {
886 		/* The cookie count should be 1 */
887 		if (ccount != 1) {
888 			USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
889 			    "uhci_init_frame_list_table: More than 1 cookie");
890 
891 			return (USB_FAILURE);
892 		}
893 	} else {
894 		uhci_decode_ddi_dma_addr_bind_handle_result(uhcip, result);
895 
896 		return (USB_FAILURE);
897 	}
898 
899 	uhcip->uhci_dma_addr_bind_flag |= UHCI_FLA_POOL_BOUND;
900 
901 	bzero((void *)uhcip->uhci_frame_lst_tablep, real_length);
902 
903 	/* Initialize the interrupt lists */
904 	uhci_build_interrupt_lattice(uhcip);
905 
906 	return (USB_SUCCESS);
907 }
908 
909 
910 /*
911  * uhci_alloc_queue_head:
912  *	Allocate a queue head
913  */
914 queue_head_t *
915 uhci_alloc_queue_head(uhci_state_t *uhcip)
916 {
917 	int		index;
918 	uhci_td_t	*dummy_td;
919 	queue_head_t	*queue_head;
920 
921 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
922 	    "uhci_alloc_queue_head");
923 
924 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
925 
926 	/* Allocate a dummy td first. */
927 	if ((dummy_td = uhci_allocate_td_from_pool(uhcip)) == NULL) {
928 
929 		USB_DPRINTF_L2(PRINT_MASK_ALLOC,  uhcip->uhci_log_hdl,
930 		    "uhci_alloc_queue_head: allocate td from pool failed");
931 
932 		return (NULL);
933 	}
934 
935 	/*
936 	 * The first 63 queue heads in the Queue Head (QH)
937 	 * buffer pool are reserved for building interrupt lattice
938 	 * tree. Search for a blank Queue head in the QH buffer pool.
939 	 */
940 	for (index = NUM_STATIC_NODES; index < uhci_qh_pool_size; index++) {
941 		if (uhcip->uhci_qh_pool_addr[index].qh_flag ==
942 		    QUEUE_HEAD_FLAG_FREE) {
943 			break;
944 		}
945 	}
946 
947 	USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
948 	    "uhci_alloc_queue_head: Allocated %d", index);
949 
950 	if (index == uhci_qh_pool_size) {
951 		USB_DPRINTF_L2(PRINT_MASK_ALLOC,  uhcip->uhci_log_hdl,
952 		    "uhci_alloc_queue_head: All QH exhausted");
953 
954 		/* Free the dummy td allocated for this qh. */
955 		dummy_td->flag = TD_FLAG_FREE;
956 
957 		return (NULL);
958 	}
959 
960 	queue_head = &uhcip->uhci_qh_pool_addr[index];
961 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
962 	    "uhci_alloc_queue_head: Allocated address 0x%p",
963 	    (void *)queue_head);
964 
965 	bzero((void *)queue_head, sizeof (queue_head_t));
966 	SetQH32(uhcip, queue_head->link_ptr, HC_END_OF_LIST);
967 	SetQH32(uhcip, queue_head->element_ptr, HC_END_OF_LIST);
968 	queue_head->prev_qh	= NULL;
969 	queue_head->qh_flag	= QUEUE_HEAD_FLAG_BUSY;
970 
971 	bzero((char *)dummy_td, sizeof (uhci_td_t));
972 	queue_head->td_tailp	= dummy_td;
973 	SetQH32(uhcip, queue_head->element_ptr, TD_PADDR(dummy_td));
974 
975 	return (queue_head);
976 }
977 
978 
979 /*
980  * uhci_allocate_bandwidth:
981  *	Figure out whether or not this interval may be supported. Return
982  *	the index into the  lattice if it can be supported.  Return
983  *	allocation failure if it can not be supported.
984  */
985 int
986 uhci_allocate_bandwidth(
987 	uhci_state_t		*uhcip,
988 	usba_pipe_handle_data_t	*pipe_handle,
989 	uint_t			*node)
990 {
991 	int		bandwidth;	/* Requested bandwidth */
992 	uint_t		min, min_index;
993 	uint_t		i;
994 	uint_t		height;		/* Bandwidth's height in the tree */
995 	uint_t		leftmost;
996 	uint_t		length;
997 	uint32_t	paddr;
998 	queue_head_t	*tmp_qh;
999 	usb_ep_descr_t	*endpoint = &pipe_handle->p_ep;
1000 
1001 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1002 
1003 	/*
1004 	 * Calculate the length in bytes of a transaction on this
1005 	 * periodic endpoint.
1006 	 */
1007 	mutex_enter(&pipe_handle->p_usba_device->usb_mutex);
1008 
1009 	length = uhci_compute_total_bandwidth(endpoint,
1010 	    pipe_handle->p_usba_device->usb_port_status);
1011 	mutex_exit(&pipe_handle->p_usba_device->usb_mutex);
1012 
1013 	/*
1014 	 * If the length in bytes plus the allocated bandwidth exceeds
1015 	 * the maximum, return bandwidth allocation failure.
1016 	 */
1017 	if ((length + uhcip->uhci_bandwidth_intr_min +
1018 	    uhcip->uhci_bandwidth_isoch_sum) > (MAX_PERIODIC_BANDWIDTH)) {
1019 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1020 		    "uhci_allocate_bandwidth: "
1021 		    "Reached maximum bandwidth value and cannot allocate "
1022 		    "bandwidth for a given Interrupt/Isoch endpoint");
1023 
1024 		return (USB_NO_BANDWIDTH);
1025 	}
1026 
1027 	/*
1028 	 * ISOC xfers are not supported at this point type
1029 	 */
1030 	if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_ISOCH) {
1031 		uhcip->uhci_bandwidth_isoch_sum += length;
1032 
1033 		return (USB_SUCCESS);
1034 	}
1035 
1036 	/*
1037 	 * This is an interrupt endpoint.
1038 	 * Adjust bandwidth to be a power of 2
1039 	 */
1040 	mutex_enter(&pipe_handle->p_usba_device->usb_mutex);
1041 	bandwidth = uhci_bandwidth_adjust(uhcip, endpoint,
1042 	    pipe_handle->p_usba_device->usb_port_status);
1043 	mutex_exit(&pipe_handle->p_usba_device->usb_mutex);
1044 
1045 	/*
1046 	 * If this bandwidth can't be supported,
1047 	 * return allocation failure.
1048 	 */
1049 	if (bandwidth == USB_FAILURE) {
1050 
1051 		return (USB_FAILURE);
1052 	}
1053 
1054 	USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl,
1055 	    "The new bandwidth is %d", bandwidth);
1056 
1057 	/* Find the leaf with the smallest allocated bandwidth */
1058 	min_index = 0;
1059 	min = uhcip->uhci_bandwidth[0];
1060 
1061 	for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) {
1062 		if (uhcip->uhci_bandwidth[i] < min) {
1063 			min_index = i;
1064 			min = uhcip->uhci_bandwidth[i];
1065 		}
1066 	}
1067 
1068 	USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl,
1069 	    "The leaf with minimal bandwidth %d, "
1070 	    "The smallest bandwidth %d", min_index, min);
1071 
1072 	/*
1073 	 * Find the index into the lattice given the
1074 	 * leaf with the smallest allocated bandwidth.
1075 	 */
1076 	height = uhci_lattice_height(bandwidth);
1077 	USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl,
1078 	    "The height is %d", height);
1079 
1080 	*node = uhci_tree_bottom_nodes[min_index];
1081 
1082 	/* check if there are isocs TDs scheduled for this frame */
1083 	if (uhcip->uhci_isoc_q_tailp[*node]) {
1084 		paddr = (uhcip->uhci_isoc_q_tailp[*node]->link_ptr &
1085 		    FRAME_LST_PTR_MASK);
1086 	} else {
1087 		paddr = (uhcip->uhci_frame_lst_tablep[*node] &
1088 		    FRAME_LST_PTR_MASK);
1089 	}
1090 
1091 	tmp_qh = QH_VADDR(paddr);
1092 	*node = tmp_qh->node;
1093 	for (i = 0; i < height; i++) {
1094 		*node = uhci_lattice_parent(*node);
1095 	}
1096 
1097 	USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl,
1098 	    "The real node is %d", *node);
1099 
1100 	/*
1101 	 * Find the leftmost leaf in the subtree specified by the node.
1102 	 */
1103 	leftmost = uhci_leftmost_leaf(*node, height);
1104 	USB_DPRINTF_L3(PRINT_MASK_BW, uhcip->uhci_log_hdl,
1105 	    "Leftmost %d", leftmost);
1106 
1107 	for (i = leftmost; i < leftmost +
1108 	    (NUM_FRAME_LST_ENTRIES/bandwidth); i ++) {
1109 
1110 		if ((length + uhcip->uhci_bandwidth_isoch_sum +
1111 		    uhcip->uhci_bandwidth[i]) > MAX_PERIODIC_BANDWIDTH) {
1112 
1113 			USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1114 			    "uhci_allocate_bandwidth: "
1115 			    "Reached maximum bandwidth value and cannot "
1116 			    "allocate bandwidth for Interrupt endpoint");
1117 
1118 			return (USB_NO_BANDWIDTH);
1119 		}
1120 	}
1121 
1122 	/*
1123 	 * All the leaves for this node must be updated with the bandwidth.
1124 	 */
1125 	for (i = leftmost; i < leftmost +
1126 	    (NUM_FRAME_LST_ENTRIES/bandwidth); i ++) {
1127 		uhcip->uhci_bandwidth[i] += length;
1128 	}
1129 
1130 	/* Find the leaf with the smallest allocated bandwidth */
1131 	min_index = 0;
1132 	min = uhcip->uhci_bandwidth[0];
1133 
1134 	for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) {
1135 		if (uhcip->uhci_bandwidth[i] < min) {
1136 			min_index = i;
1137 			min = uhcip->uhci_bandwidth[i];
1138 		}
1139 	}
1140 
1141 	/* Save the minimum for later use */
1142 	uhcip->uhci_bandwidth_intr_min = min;
1143 
1144 	return (USB_SUCCESS);
1145 }
1146 
1147 
1148 /*
1149  * uhci_deallocate_bandwidth:
1150  *	Deallocate bandwidth for the given node in the lattice
1151  *	and the length of transfer.
1152  */
1153 void
1154 uhci_deallocate_bandwidth(uhci_state_t *uhcip,
1155     usba_pipe_handle_data_t *pipe_handle)
1156 {
1157 	uint_t		bandwidth;
1158 	uint_t		height;
1159 	uint_t		leftmost;
1160 	uint_t		i;
1161 	uint_t		min;
1162 	usb_ep_descr_t	*endpoint = &pipe_handle->p_ep;
1163 	uint_t		node, length;
1164 	uhci_pipe_private_t *pp =
1165 	    (uhci_pipe_private_t *)pipe_handle->p_hcd_private;
1166 
1167 	/* This routine is protected by the uhci_int_mutex */
1168 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1169 
1170 	/* Obtain the length */
1171 	mutex_enter(&pipe_handle->p_usba_device->usb_mutex);
1172 	length = uhci_compute_total_bandwidth(endpoint,
1173 	    pipe_handle->p_usba_device->usb_port_status);
1174 	mutex_exit(&pipe_handle->p_usba_device->usb_mutex);
1175 
1176 	/*
1177 	 * If this is an isochronous endpoint, just delete endpoint's
1178 	 * bandwidth from the total allocated isochronous bandwidth.
1179 	 */
1180 	if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_ISOCH) {
1181 		uhcip->uhci_bandwidth_isoch_sum -= length;
1182 
1183 		return;
1184 	}
1185 
1186 	/* Obtain the node */
1187 	node = pp->pp_node;
1188 
1189 	/* Adjust bandwidth to be a power of 2 */
1190 	mutex_enter(&pipe_handle->p_usba_device->usb_mutex);
1191 	bandwidth = uhci_bandwidth_adjust(uhcip, endpoint,
1192 	    pipe_handle->p_usba_device->usb_port_status);
1193 	mutex_exit(&pipe_handle->p_usba_device->usb_mutex);
1194 
1195 	/* Find the height in the tree */
1196 	height = uhci_lattice_height(bandwidth);
1197 
1198 	/*
1199 	 * Find the leftmost leaf in the subtree specified by the node
1200 	 */
1201 	leftmost = uhci_leftmost_leaf(node, height);
1202 
1203 	/* Delete the bandwith from the appropriate lists */
1204 	for (i = leftmost; i < leftmost + (NUM_FRAME_LST_ENTRIES/bandwidth);
1205 	    i ++) {
1206 		uhcip->uhci_bandwidth[i] -= length;
1207 	}
1208 
1209 	min = uhcip->uhci_bandwidth[0];
1210 
1211 	/* Recompute the minimum */
1212 	for (i = 1; i < NUM_FRAME_LST_ENTRIES; i++) {
1213 		if (uhcip->uhci_bandwidth[i] < min) {
1214 			min = uhcip->uhci_bandwidth[i];
1215 		}
1216 	}
1217 
1218 	/* Save the minimum for later use */
1219 	uhcip->uhci_bandwidth_intr_min = min;
1220 }
1221 
1222 
1223 /*
1224  * uhci_compute_total_bandwidth:
1225  *
1226  * Given a periodic endpoint (interrupt or isochronous) determine the total
1227  * bandwidth for one transaction. The UHCI host controller traverses the
1228  * endpoint descriptor lists on a first-come-first-serve basis. When the HC
1229  * services an endpoint, only a single transaction attempt is made. The  HC
1230  * moves to the next Endpoint Descriptor after the first transaction attempt
1231  * rather than finishing the entire Transfer Descriptor. Therefore, when  a
1232  * Transfer Descriptor is inserted into the lattice, we will only count the
1233  * number of bytes for one transaction.
1234  *
1235  * The following are the formulas used for calculating bandwidth in terms
1236  * bytes and it is for the single USB full speed and low speed	transaction
1237  * respectively. The protocol overheads will be different for each of  type
1238  * of USB transfer and all these formulas & protocol overheads are  derived
1239  * from the 5.9.3 section of USB Specification & with the help of Bandwidth
1240  * Analysis white paper which is posted on the USB  developer forum.
1241  *
1242  * Full-Speed:
1243  *	  Protocol overhead  + ((MaxPacketSize * 7)/6 )  + Host_Delay
1244  *
1245  * Low-Speed:
1246  *		Protocol overhead  + Hub LS overhead +
1247  *		  (Low-Speed clock * ((MaxPacketSize * 7)/6 )) + Host_Delay
1248  */
1249 static uint_t
1250 uhci_compute_total_bandwidth(usb_ep_descr_t *endpoint,
1251 		usb_port_status_t port_status)
1252 {
1253 	uint_t		bandwidth;
1254 	ushort_t	MaxPacketSize = endpoint->wMaxPacketSize;
1255 
1256 	/* Add Host Controller specific delay to required bandwidth */
1257 	bandwidth = HOST_CONTROLLER_DELAY;
1258 
1259 	/* Add bit-stuffing overhead */
1260 	MaxPacketSize = (ushort_t)((MaxPacketSize * 7) / 6);
1261 
1262 	/* Low Speed interrupt transaction */
1263 	if (port_status == USBA_LOW_SPEED_DEV) {
1264 		/* Low Speed interrupt transaction */
1265 		bandwidth += (LOW_SPEED_PROTO_OVERHEAD +
1266 		    HUB_LOW_SPEED_PROTO_OVERHEAD +
1267 		    (LOW_SPEED_CLOCK * MaxPacketSize));
1268 	} else {
1269 		/* Full Speed transaction */
1270 		bandwidth += MaxPacketSize;
1271 
1272 		if (UHCI_XFER_TYPE(endpoint) == USB_EP_ATTR_INTR) {
1273 			/* Full Speed interrupt transaction */
1274 			bandwidth += FS_NON_ISOC_PROTO_OVERHEAD;
1275 		} else {
1276 			/* Isochronus and input transaction */
1277 			if (UHCI_XFER_DIR(endpoint) == USB_EP_DIR_IN) {
1278 				bandwidth += FS_ISOC_INPUT_PROTO_OVERHEAD;
1279 			} else {
1280 				/* Isochronus and output transaction */
1281 				bandwidth += FS_ISOC_OUTPUT_PROTO_OVERHEAD;
1282 			}
1283 		}
1284 	}
1285 
1286 	return (bandwidth);
1287 }
1288 
1289 
1290 /*
1291  * uhci_bandwidth_adjust:
1292  */
1293 static int
1294 uhci_bandwidth_adjust(
1295 	uhci_state_t		*uhcip,
1296 	usb_ep_descr_t		*endpoint,
1297 	usb_port_status_t	port_status)
1298 {
1299 	int	i = 0;
1300 	uint_t	interval;
1301 
1302 	/*
1303 	 * Get the polling interval from the endpoint descriptor
1304 	 */
1305 	interval = endpoint->bInterval;
1306 
1307 	/*
1308 	 * The bInterval value in the endpoint descriptor can range
1309 	 * from 1 to 255ms. The interrupt lattice has 32 leaf nodes,
1310 	 * and the host controller cycles through these nodes every
1311 	 * 32ms. The longest polling  interval that the  controller
1312 	 * supports is 32ms.
1313 	 */
1314 
1315 	/*
1316 	 * Return an error if the polling interval is less than 1ms
1317 	 * and greater than 255ms
1318 	 */
1319 	if ((interval < MIN_POLL_INTERVAL) || (interval > MAX_POLL_INTERVAL)) {
1320 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1321 		    "uhci_bandwidth_adjust: Endpoint's poll interval must be "
1322 		    "between %d and %d ms", MIN_POLL_INTERVAL,
1323 		    MAX_POLL_INTERVAL);
1324 
1325 		return (USB_FAILURE);
1326 	}
1327 
1328 	/*
1329 	 * According USB Specifications, a  full-speed endpoint can
1330 	 * specify a desired polling interval 1ms to 255ms and a low
1331 	 * speed  endpoints are limited to  specifying only 10ms to
1332 	 * 255ms. But some old keyboards & mice uses polling interval
1333 	 * of 8ms. For compatibility  purpose, we are using polling
1334 	 * interval between 8ms & 255ms for low speed endpoints.
1335 	 */
1336 	if ((port_status == USBA_LOW_SPEED_DEV) &&
1337 	    (interval < MIN_LOW_SPEED_POLL_INTERVAL)) {
1338 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1339 		    "uhci_bandwidth_adjust: Low speed endpoint's poll interval "
1340 		    "must be >= %d ms, adjusted",
1341 		    MIN_LOW_SPEED_POLL_INTERVAL);
1342 
1343 		interval = MIN_LOW_SPEED_POLL_INTERVAL;
1344 	}
1345 
1346 	/*
1347 	 * If polling interval is greater than 32ms,
1348 	 * adjust polling interval equal to 32ms.
1349 	 */
1350 	if (interval > 32) {
1351 		interval = 32;
1352 	}
1353 
1354 	/*
1355 	 * Find the nearest power of 2 that's less
1356 	 * than interval.
1357 	 */
1358 	while ((pow_2(i)) <= interval) {
1359 		i++;
1360 	}
1361 
1362 	return (pow_2((i - 1)));
1363 }
1364 
1365 
1366 /*
1367  * uhci_lattice_height:
1368  *	Given the requested bandwidth, find the height in the tree at
1369  *	which the nodes for this bandwidth fall.  The height is measured
1370  *	as the number of nodes from the leaf to the level specified by
1371  *	bandwidth The root of the tree is at height TREE_HEIGHT.
1372  */
1373 static uint_t
1374 uhci_lattice_height(uint_t bandwidth)
1375 {
1376 	return (TREE_HEIGHT - (log_2(bandwidth)));
1377 }
1378 
1379 
1380 static uint_t
1381 uhci_lattice_parent(uint_t node)
1382 {
1383 	return (((node % 2) == 0) ? ((node/2) - 1) : (node/2));
1384 }
1385 
1386 
1387 /*
1388  * uhci_leftmost_leaf:
1389  *	Find the leftmost leaf in the subtree specified by the node.
1390  *	Height refers to number of nodes from the bottom of the tree
1391  *	to the node,  including the node.
1392  */
1393 static uint_t
1394 uhci_leftmost_leaf(uint_t node, uint_t height)
1395 {
1396 	node = pow_2(height + VIRTUAL_TREE_HEIGHT) * (node+1) -
1397 	    NUM_FRAME_LST_ENTRIES;
1398 	return (node);
1399 }
1400 
1401 
1402 /*
1403  * uhci_insert_qh:
1404  *	Add the Queue Head (QH) into the Host Controller's (HC)
1405  *	appropriate queue head list.
1406  */
1407 void
1408 uhci_insert_qh(uhci_state_t *uhcip, usba_pipe_handle_data_t *ph)
1409 {
1410 	uhci_pipe_private_t *pp = (uhci_pipe_private_t *)ph->p_hcd_private;
1411 
1412 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1413 	    "uhci_insert_qh:");
1414 
1415 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1416 
1417 	switch (UHCI_XFER_TYPE(&ph->p_ep)) {
1418 	case USB_EP_ATTR_CONTROL:
1419 		uhci_insert_ctrl_qh(uhcip, pp);
1420 		break;
1421 	case USB_EP_ATTR_BULK:
1422 		uhci_insert_bulk_qh(uhcip, pp);
1423 		break;
1424 	case USB_EP_ATTR_INTR:
1425 		uhci_insert_intr_qh(uhcip, pp);
1426 		break;
1427 	case USB_EP_ATTR_ISOCH:
1428 			USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
1429 			    "uhci_insert_qh: Illegal request");
1430 		break;
1431 	}
1432 }
1433 
1434 
1435 /*
1436  * uhci_insert_ctrl_qh:
1437  *	Insert a control QH into the Host Controller's (HC) control QH list.
1438  */
1439 static void
1440 uhci_insert_ctrl_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp)
1441 {
1442 	queue_head_t *qh = pp->pp_qh;
1443 
1444 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1445 	    "uhci_insert_ctrl_qh:");
1446 
1447 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1448 
1449 	if (uhcip->uhci_ctrl_xfers_q_head == uhcip->uhci_ctrl_xfers_q_tail) {
1450 		uhcip->uhci_ctrl_xfers_q_head->prev_qh	= UHCI_INVALID_PTR;
1451 	}
1452 
1453 	SetQH32(uhcip, qh->link_ptr,
1454 	    GetQH32(uhcip, uhcip->uhci_ctrl_xfers_q_tail->link_ptr));
1455 	qh->prev_qh = uhcip->uhci_ctrl_xfers_q_tail;
1456 	SetQH32(uhcip, uhcip->uhci_ctrl_xfers_q_tail->link_ptr,
1457 	    QH_PADDR(qh) | HC_QUEUE_HEAD);
1458 	uhcip->uhci_ctrl_xfers_q_tail = qh;
1459 
1460 }
1461 
1462 
1463 /*
1464  * uhci_insert_bulk_qh:
1465  *	Insert a bulk QH into the Host Controller's (HC) bulk QH list.
1466  */
1467 static void
1468 uhci_insert_bulk_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp)
1469 {
1470 	queue_head_t *qh = pp->pp_qh;
1471 
1472 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1473 	    "uhci_insert_bulk_qh:");
1474 
1475 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1476 
1477 	if (uhcip->uhci_bulk_xfers_q_head == uhcip->uhci_bulk_xfers_q_tail) {
1478 		uhcip->uhci_bulk_xfers_q_head->prev_qh = UHCI_INVALID_PTR;
1479 	} else if (uhcip->uhci_bulk_xfers_q_head->link_ptr ==
1480 	    uhcip->uhci_bulk_xfers_q_tail->link_ptr) {
1481 
1482 		/* If there is already a loop, we should keep the loop. */
1483 		qh->link_ptr = uhcip->uhci_bulk_xfers_q_tail->link_ptr;
1484 	}
1485 
1486 	qh->prev_qh = uhcip->uhci_bulk_xfers_q_tail;
1487 	SetQH32(uhcip, uhcip->uhci_bulk_xfers_q_tail->link_ptr,
1488 	    QH_PADDR(qh) | HC_QUEUE_HEAD);
1489 	uhcip->uhci_bulk_xfers_q_tail = qh;
1490 }
1491 
1492 
1493 /*
1494  * uhci_insert_intr_qh:
1495  *	Insert a periodic Queue head i.e Interrupt queue head into the
1496  *	Host Controller's (HC) interrupt lattice tree.
1497  */
1498 static void
1499 uhci_insert_intr_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp)
1500 {
1501 	uint_t		node = pp->pp_node;	/* The appropriate node was */
1502 						/* found during the opening */
1503 						/* of the pipe.  */
1504 	queue_head_t	*qh = pp->pp_qh;
1505 	queue_head_t	*next_lattice_qh, *lattice_qh;
1506 
1507 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1508 	    "uhci_insert_intr_qh:");
1509 
1510 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1511 
1512 	/* Find the lattice queue head */
1513 	lattice_qh = &uhcip->uhci_qh_pool_addr[node];
1514 	next_lattice_qh =
1515 	    QH_VADDR(GetQH32(uhcip, lattice_qh->link_ptr) & QH_LINK_PTR_MASK);
1516 
1517 	next_lattice_qh->prev_qh = qh;
1518 	qh->link_ptr	= lattice_qh->link_ptr;
1519 	qh->prev_qh	= lattice_qh;
1520 	SetQH32(uhcip, lattice_qh->link_ptr, QH_PADDR(qh) | HC_QUEUE_HEAD);
1521 	pp->pp_data_toggle = 0;
1522 }
1523 
1524 
1525 /*
1526  * uhci_insert_intr_td:
1527  *	Create a TD and a data buffer for an interrupt endpoint.
1528  */
1529 int
1530 uhci_insert_intr_td(
1531 	uhci_state_t		*uhcip,
1532 	usba_pipe_handle_data_t	*ph,
1533 	usb_intr_req_t		*req,
1534 	usb_flags_t		flags)
1535 {
1536 	int			error, pipe_dir;
1537 	uint_t			length, mps;
1538 	uint32_t		buf_offs;
1539 	uhci_td_t		*tmp_td;
1540 	usb_intr_req_t		*intr_reqp;
1541 	uhci_pipe_private_t	*pp = (uhci_pipe_private_t *)ph->p_hcd_private;
1542 	uhci_trans_wrapper_t	*tw;
1543 
1544 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1545 	    "uhci_insert_intr_td: req: 0x%p", (void *)req);
1546 
1547 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1548 
1549 	/* Get the interrupt pipe direction */
1550 	pipe_dir = UHCI_XFER_DIR(&ph->p_ep);
1551 
1552 	/* Get the current interrupt request pointer */
1553 	if (req) {
1554 		length = req->intr_len;
1555 	} else {
1556 		ASSERT(pipe_dir == USB_EP_DIR_IN);
1557 		length = (pp->pp_client_periodic_in_reqp) ?
1558 		    (((usb_intr_req_t *)pp->
1559 		    pp_client_periodic_in_reqp)->intr_len) :
1560 		    ph->p_ep.wMaxPacketSize;
1561 	}
1562 
1563 	/* Check the size of interrupt request */
1564 	if (length > UHCI_MAX_TD_XFER_SIZE) {
1565 
1566 		/* the length shouldn't exceed 8K */
1567 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1568 		    "uhci_insert_intr_td: Intr request size 0x%x is "
1569 		    "more than 0x%x", length, UHCI_MAX_TD_XFER_SIZE);
1570 
1571 		return (USB_INVALID_REQUEST);
1572 	}
1573 
1574 	USB_DPRINTF_L3(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1575 	    "uhci_insert_intr_td: length: 0x%x", length);
1576 
1577 	/* Allocate a transaction wrapper */
1578 	if ((tw = uhci_create_transfer_wrapper(uhcip, pp, length, flags)) ==
1579 	    NULL) {
1580 
1581 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1582 		    "uhci_insert_intr_td: TW allocation failed");
1583 
1584 		return (USB_NO_RESOURCES);
1585 	}
1586 
1587 	/*
1588 	 * Initialize the callback and any callback
1589 	 * data for when the td completes.
1590 	 */
1591 	tw->tw_handle_td = uhci_handle_intr_td;
1592 	tw->tw_handle_callback_value = NULL;
1593 	tw->tw_direction = (pipe_dir == USB_EP_DIR_OUT) ?
1594 	    PID_OUT : PID_IN;
1595 	tw->tw_curr_xfer_reqp = (usb_opaque_t)req;
1596 
1597 	/*
1598 	 * If it is an Interrupt IN request and interrupt request is NULL,
1599 	 * allocate the usb interrupt request structure for the current
1600 	 * interrupt polling request.
1601 	 */
1602 	if (tw->tw_direction == PID_IN) {
1603 		if ((error = uhci_allocate_periodic_in_resource(uhcip,
1604 		    pp, tw, flags)) != USB_SUCCESS) {
1605 			USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1606 			    "uhci_insert_intr_td: Interrupt request structure "
1607 			    "allocation failed");
1608 
1609 			/* free the transfer wrapper */
1610 			uhci_deallocate_tw(uhcip, pp, tw);
1611 
1612 			return (error);
1613 		}
1614 	}
1615 
1616 	intr_reqp = (usb_intr_req_t *)tw->tw_curr_xfer_reqp;
1617 	ASSERT(tw->tw_curr_xfer_reqp != NULL);
1618 
1619 	tw->tw_timeout_cnt = (intr_reqp->intr_attributes & USB_ATTRS_ONE_XFER) ?
1620 	    intr_reqp->intr_timeout : 0;
1621 
1622 	/* DATA IN */
1623 	if (tw->tw_direction == PID_IN) {
1624 		/* Insert the td onto the queue head */
1625 		error = uhci_insert_hc_td(uhcip, 0,
1626 		    length, pp, tw, PID_IN, intr_reqp->intr_attributes);
1627 
1628 		if (error != USB_SUCCESS) {
1629 
1630 			uhci_deallocate_periodic_in_resource(uhcip, pp, tw);
1631 			/* free the transfer wrapper */
1632 			uhci_deallocate_tw(uhcip, pp, tw);
1633 
1634 			return (USB_NO_RESOURCES);
1635 		}
1636 		tw->tw_bytes_xfered = 0;
1637 
1638 		return (USB_SUCCESS);
1639 	}
1640 
1641 	if (req->intr_len) {
1642 		/* DATA OUT */
1643 		ASSERT(req->intr_data != NULL);
1644 
1645 		/* Copy the data into the message */
1646 		ddi_rep_put8(tw->tw_accesshandle, req->intr_data->b_rptr,
1647 		    (uint8_t *)tw->tw_buf, req->intr_len, DDI_DEV_AUTOINCR);
1648 	}
1649 
1650 	/* set tw->tw_claim flag, so that nobody else works on this tw. */
1651 	tw->tw_claim = UHCI_INTR_HDLR_CLAIMED;
1652 
1653 	mps = ph->p_ep.wMaxPacketSize;
1654 	buf_offs = 0;
1655 
1656 	/* Insert tds onto the queue head */
1657 	while (length > 0) {
1658 
1659 		error = uhci_insert_hc_td(uhcip, buf_offs,
1660 		    (length > mps) ? mps : length,
1661 		    pp, tw, PID_OUT,
1662 		    intr_reqp->intr_attributes);
1663 
1664 		if (error != USB_SUCCESS) {
1665 			/* no resource. */
1666 			break;
1667 		}
1668 
1669 		if (length <= mps) {
1670 			/* inserted all data. */
1671 			length = 0;
1672 
1673 		} else {
1674 
1675 			buf_offs += mps;
1676 			length -= mps;
1677 		}
1678 	}
1679 
1680 	if (error != USB_SUCCESS) {
1681 
1682 		USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
1683 		    "uhci_insert_intr_td: allocate td failed, free resource");
1684 
1685 		/* remove all the tds */
1686 		while (tw->tw_hctd_head != NULL) {
1687 			uhci_delete_td(uhcip, tw->tw_hctd_head);
1688 		}
1689 
1690 		tw->tw_claim = UHCI_NOT_CLAIMED;
1691 		uhci_deallocate_tw(uhcip, pp, tw);
1692 
1693 		return (error);
1694 	}
1695 
1696 	/* allow HC to xfer the tds of this tw */
1697 	tmp_td = tw->tw_hctd_head;
1698 	while (tmp_td != NULL) {
1699 
1700 		SetTD_status(uhcip, tmp_td, UHCI_TD_ACTIVE);
1701 		tmp_td = tmp_td->tw_td_next;
1702 	}
1703 
1704 	tw->tw_bytes_xfered = 0;
1705 	tw->tw_claim = UHCI_NOT_CLAIMED;
1706 
1707 	return (error);
1708 }
1709 
1710 
1711 /*
1712  * uhci_create_transfer_wrapper:
1713  *	Create a Transaction Wrapper (TW) for non-isoc transfer types.
1714  *	This involves the allocating of DMA resources.
1715  *
1716  *	For non-isoc transfers, one DMA handle and one DMA buffer are
1717  *	allocated per transfer. The DMA buffer may contain multiple
1718  *	DMA cookies and the cookies should meet certain alignment
1719  *	requirement to be able to fit in the multiple TDs. The alignment
1720  *	needs to ensure:
1721  *	1. the size of a cookie be larger than max TD length (0x500)
1722  *	2. the size of a cookie be a multiple of wMaxPacketSize of the
1723  *	ctrl/bulk pipes
1724  *
1725  *	wMaxPacketSize for ctrl and bulk pipes may be 8, 16, 32 or 64 bytes.
1726  *	So the alignment should be a multiple of 64. wMaxPacketSize for intr
1727  *	pipes is a little different since it only specifies the max to be
1728  *	64 bytes, but as long as an intr transfer is limited to max TD length,
1729  *	any alignment can work if the cookie size is larger than max TD length.
1730  *
1731  *	Considering the above conditions, 2K alignment is used. 4K alignment
1732  *	should also be fine.
1733  */
1734 static uhci_trans_wrapper_t *
1735 uhci_create_transfer_wrapper(
1736 	uhci_state_t		*uhcip,
1737 	uhci_pipe_private_t	*pp,
1738 	size_t			length,
1739 	usb_flags_t		usb_flags)
1740 {
1741 	size_t			real_length;
1742 	uhci_trans_wrapper_t	*tw;
1743 	ddi_device_acc_attr_t	dev_attr;
1744 	ddi_dma_attr_t		dma_attr;
1745 	int			kmem_flag;
1746 	int			(*dmamem_wait)(caddr_t);
1747 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
1748 
1749 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1750 	    "uhci_create_transfer_wrapper: length = 0x%lx flags = 0x%x",
1751 	    length, usb_flags);
1752 
1753 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1754 
1755 	/* isochronous pipe should not call into this function */
1756 	if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_ISOCH) {
1757 
1758 		return (NULL);
1759 	}
1760 
1761 	/* SLEEP flag should not be used in interrupt context */
1762 	if (servicing_interrupt()) {
1763 		kmem_flag = KM_NOSLEEP;
1764 		dmamem_wait = DDI_DMA_DONTWAIT;
1765 	} else {
1766 		kmem_flag = KM_SLEEP;
1767 		dmamem_wait = DDI_DMA_SLEEP;
1768 	}
1769 
1770 	/* Allocate space for the transfer wrapper */
1771 	if ((tw = kmem_zalloc(sizeof (uhci_trans_wrapper_t), kmem_flag)) ==
1772 	    NULL) {
1773 		USB_DPRINTF_L2(PRINT_MASK_LISTS,  uhcip->uhci_log_hdl,
1774 		    "uhci_create_transfer_wrapper: kmem_alloc failed");
1775 
1776 		return (NULL);
1777 	}
1778 
1779 	/* zero-length packet doesn't need to allocate dma memory */
1780 	if (length == 0) {
1781 
1782 		goto dmadone;
1783 	}
1784 
1785 	/* allow sg lists for transfer wrapper dma memory */
1786 	bcopy(&uhcip->uhci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t));
1787 	dma_attr.dma_attr_sgllen = UHCI_DMA_ATTR_SGLLEN;
1788 	dma_attr.dma_attr_align = UHCI_DMA_ATTR_ALIGN;
1789 
1790 	/* Store the transfer length */
1791 	tw->tw_length = length;
1792 
1793 	/* Allocate the DMA handle */
1794 	if (ddi_dma_alloc_handle(uhcip->uhci_dip, &dma_attr, dmamem_wait,
1795 	    0, &tw->tw_dmahandle) != DDI_SUCCESS) {
1796 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1797 		    "uhci_create_transfer_wrapper: Alloc handle failed");
1798 		kmem_free(tw, sizeof (uhci_trans_wrapper_t));
1799 
1800 		return (NULL);
1801 	}
1802 
1803 	dev_attr.devacc_attr_version		= DDI_DEVICE_ATTR_V0;
1804 	dev_attr.devacc_attr_endian_flags	= DDI_STRUCTURE_LE_ACC;
1805 	dev_attr.devacc_attr_dataorder		= DDI_STRICTORDER_ACC;
1806 
1807 	/* Allocate the memory */
1808 	if (ddi_dma_mem_alloc(tw->tw_dmahandle, tw->tw_length, &dev_attr,
1809 	    DDI_DMA_CONSISTENT, dmamem_wait, NULL, (caddr_t *)&tw->tw_buf,
1810 	    &real_length, &tw->tw_accesshandle) != DDI_SUCCESS) {
1811 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1812 		    "uhci_create_transfer_wrapper: dma_mem_alloc fail");
1813 		ddi_dma_free_handle(&tw->tw_dmahandle);
1814 		kmem_free(tw, sizeof (uhci_trans_wrapper_t));
1815 
1816 		return (NULL);
1817 	}
1818 
1819 	ASSERT(real_length >= length);
1820 
1821 	/* Bind the handle */
1822 	if (ddi_dma_addr_bind_handle(tw->tw_dmahandle, NULL,
1823 	    (caddr_t)tw->tw_buf, real_length, DDI_DMA_RDWR|DDI_DMA_CONSISTENT,
1824 	    dmamem_wait, NULL, &tw->tw_cookie, &tw->tw_ncookies) !=
1825 	    DDI_DMA_MAPPED) {
1826 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1827 		    "uhci_create_transfer_wrapper: Bind handle failed");
1828 		ddi_dma_mem_free(&tw->tw_accesshandle);
1829 		ddi_dma_free_handle(&tw->tw_dmahandle);
1830 		kmem_free(tw, sizeof (uhci_trans_wrapper_t));
1831 
1832 		return (NULL);
1833 	}
1834 
1835 	tw->tw_cookie_idx = 0;
1836 	tw->tw_dma_offs = 0;
1837 
1838 dmadone:
1839 	/*
1840 	 * Only allow one wrapper to be added at a time. Insert the
1841 	 * new transaction wrapper into the list for this pipe.
1842 	 */
1843 	if (pp->pp_tw_head == NULL) {
1844 		pp->pp_tw_head = tw;
1845 		pp->pp_tw_tail = tw;
1846 	} else {
1847 		pp->pp_tw_tail->tw_next = tw;
1848 		pp->pp_tw_tail = tw;
1849 		ASSERT(tw->tw_next == NULL);
1850 	}
1851 
1852 	/* Store a back pointer to the pipe private structure */
1853 	tw->tw_pipe_private = pp;
1854 
1855 	/* Store the transfer type - synchronous or asynchronous */
1856 	tw->tw_flags = usb_flags;
1857 
1858 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
1859 	    "uhci_create_transfer_wrapper: tw = 0x%p, ncookies = %u",
1860 	    (void *)tw, tw->tw_ncookies);
1861 
1862 	return (tw);
1863 }
1864 
1865 
1866 /*
1867  * uhci_insert_hc_td:
1868  *	Insert a Transfer Descriptor (TD) on an QH.
1869  */
1870 int
1871 uhci_insert_hc_td(
1872 	uhci_state_t		*uhcip,
1873 	uint32_t		buffer_offset,
1874 	size_t			hcgtd_length,
1875 	uhci_pipe_private_t	*pp,
1876 	uhci_trans_wrapper_t	*tw,
1877 	uchar_t			PID,
1878 	usb_req_attrs_t		attrs)
1879 {
1880 	uhci_td_t	*td, *current_dummy;
1881 	queue_head_t	*qh = pp->pp_qh;
1882 
1883 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
1884 
1885 	if ((td = uhci_allocate_td_from_pool(uhcip)) == NULL) {
1886 
1887 		return (USB_NO_RESOURCES);
1888 	}
1889 
1890 	current_dummy = qh->td_tailp;
1891 
1892 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
1893 	    "uhci_insert_hc_td: td %p, attrs = 0x%x", (void *)td, attrs);
1894 
1895 	/*
1896 	 * Fill in the current dummy td and
1897 	 * add the new dummy to the end.
1898 	 */
1899 	uhci_fill_in_td(uhcip, td, current_dummy, buffer_offset,
1900 	    hcgtd_length, pp, PID, attrs, tw);
1901 
1902 	/*
1903 	 * Allow HC hardware xfer the td, except interrupt out td.
1904 	 */
1905 	if ((tw->tw_handle_td != uhci_handle_intr_td) || (PID != PID_OUT)) {
1906 
1907 		SetTD_status(uhcip, current_dummy, UHCI_TD_ACTIVE);
1908 	}
1909 
1910 	/* Insert this td onto the tw */
1911 
1912 	if (tw->tw_hctd_head == NULL) {
1913 		ASSERT(tw->tw_hctd_tail == NULL);
1914 		tw->tw_hctd_head = current_dummy;
1915 		tw->tw_hctd_tail = current_dummy;
1916 	} else {
1917 		/* Add the td to the end of the list */
1918 		tw->tw_hctd_tail->tw_td_next = current_dummy;
1919 		tw->tw_hctd_tail = current_dummy;
1920 	}
1921 
1922 	/*
1923 	 * Insert the TD on to the QH. When this occurs,
1924 	 * the Host Controller will see the newly filled in TD
1925 	 */
1926 	current_dummy->outst_td_next	 = NULL;
1927 	current_dummy->outst_td_prev	 = uhcip->uhci_outst_tds_tail;
1928 	if (uhcip->uhci_outst_tds_head == NULL) {
1929 		uhcip->uhci_outst_tds_head = current_dummy;
1930 	} else {
1931 		uhcip->uhci_outst_tds_tail->outst_td_next = current_dummy;
1932 	}
1933 	uhcip->uhci_outst_tds_tail = current_dummy;
1934 	current_dummy->tw = tw;
1935 
1936 	return (USB_SUCCESS);
1937 }
1938 
1939 
1940 /*
1941  * uhci_fill_in_td:
1942  *	Fill in the fields of a Transfer Descriptor (TD).
1943  */
1944 static void
1945 uhci_fill_in_td(
1946 	uhci_state_t		*uhcip,
1947 	uhci_td_t		*td,
1948 	uhci_td_t		*current_dummy,
1949 	uint32_t		buffer_offset,
1950 	size_t			length,
1951 	uhci_pipe_private_t	*pp,
1952 	uchar_t			PID,
1953 	usb_req_attrs_t		attrs,
1954 	uhci_trans_wrapper_t	*tw)
1955 {
1956 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
1957 	uint32_t		buf_addr;
1958 
1959 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
1960 	    "uhci_fill_in_td: td 0x%p buf_offs 0x%x len 0x%lx "
1961 	    "attrs 0x%x", (void *)td, buffer_offset, length, attrs);
1962 
1963 	/*
1964 	 * If this is an isochronous TD, just return
1965 	 */
1966 	if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_ISOCH) {
1967 
1968 		return;
1969 	}
1970 
1971 	/* The maximum transfer length of UHCI cannot exceed 0x500 bytes */
1972 	ASSERT(length <= UHCI_MAX_TD_XFER_SIZE);
1973 
1974 	bzero((char *)td, sizeof (uhci_td_t));	/* Clear the TD */
1975 	SetTD32(uhcip, current_dummy->link_ptr, TD_PADDR(td));
1976 
1977 	if (attrs & USB_ATTRS_SHORT_XFER_OK) {
1978 		SetTD_spd(uhcip, current_dummy, 1);
1979 	}
1980 
1981 	mutex_enter(&ph->p_usba_device->usb_mutex);
1982 	if (ph->p_usba_device->usb_port_status == USBA_LOW_SPEED_DEV) {
1983 		SetTD_ls(uhcip, current_dummy, LOW_SPEED_DEVICE);
1984 	}
1985 
1986 	SetTD_c_err(uhcip, current_dummy, UHCI_MAX_ERR_COUNT);
1987 	SetTD_mlen(uhcip, current_dummy,
1988 	    (length == 0) ? ZERO_LENGTH : (length - 1));
1989 	SetTD_dtogg(uhcip, current_dummy, pp->pp_data_toggle);
1990 
1991 	/* Adjust the data toggle bit */
1992 	ADJ_DATA_TOGGLE(pp);
1993 
1994 	SetTD_devaddr(uhcip, current_dummy,  ph->p_usba_device->usb_addr);
1995 	SetTD_endpt(uhcip, current_dummy,
1996 	    ph->p_ep.bEndpointAddress & END_POINT_ADDRESS_MASK);
1997 	SetTD_PID(uhcip, current_dummy, PID);
1998 	SetTD_ioc(uhcip, current_dummy, INTERRUPT_ON_COMPLETION);
1999 
2000 	buf_addr = uhci_get_tw_paddr_by_offs(uhcip, buffer_offset, length, tw);
2001 	SetTD32(uhcip, current_dummy->buffer_address, buf_addr);
2002 
2003 	td->qh_td_prev			= current_dummy;
2004 	current_dummy->qh_td_prev	= NULL;
2005 	pp->pp_qh->td_tailp		= td;
2006 	mutex_exit(&ph->p_usba_device->usb_mutex);
2007 }
2008 
2009 /*
2010  * uhci_get_tw_paddr_by_offs:
2011  *	Walk through the DMA cookies of a TW buffer to retrieve
2012  *	the device address used for a TD.
2013  *
2014  * buffer_offset - the starting offset into the TW buffer, where the
2015  *		   TD should transfer from. When a TW has more than
2016  *		   one TD, the TDs must be filled in increasing order.
2017  */
2018 static uint32_t
2019 uhci_get_tw_paddr_by_offs(
2020 	uhci_state_t		*uhcip,
2021 	uint32_t		buffer_offset,
2022 	size_t			length,
2023 	uhci_trans_wrapper_t	*tw)
2024 {
2025 	uint32_t		buf_addr;
2026 	int			rem_len;
2027 
2028 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
2029 	    "uhci_get_tw_paddr_by_offs: buf_offs 0x%x len 0x%lx",
2030 	    buffer_offset, length);
2031 
2032 	/*
2033 	 * TDs must be filled in increasing DMA offset order.
2034 	 * tw_dma_offs is initialized to be 0 at TW creation and
2035 	 * is only increased in this function.
2036 	 */
2037 	ASSERT(length == 0 || buffer_offset >= tw->tw_dma_offs);
2038 
2039 	if (length == 0) {
2040 		buf_addr = 0;
2041 
2042 		return (buf_addr);
2043 	}
2044 
2045 	/*
2046 	 * Advance to the next DMA cookie until finding the cookie
2047 	 * that buffer_offset falls in.
2048 	 * It is very likely this loop will never repeat more than
2049 	 * once. It is here just to accommodate the case buffer_offset
2050 	 * is increased by multiple cookies during two consecutive
2051 	 * calls into this function. In that case, the interim DMA
2052 	 * buffer is allowed to be skipped.
2053 	 */
2054 	while ((tw->tw_dma_offs + tw->tw_cookie.dmac_size) <=
2055 	    buffer_offset) {
2056 		/*
2057 		 * tw_dma_offs always points to the starting offset
2058 		 * of a cookie
2059 		 */
2060 		tw->tw_dma_offs += tw->tw_cookie.dmac_size;
2061 		ddi_dma_nextcookie(tw->tw_dmahandle, &tw->tw_cookie);
2062 		tw->tw_cookie_idx++;
2063 		ASSERT(tw->tw_cookie_idx < tw->tw_ncookies);
2064 	}
2065 
2066 	/*
2067 	 * Counting the remained buffer length to be filled in
2068 	 * the TDs for current DMA cookie
2069 	 */
2070 	rem_len = (tw->tw_dma_offs + tw->tw_cookie.dmac_size) -
2071 	    buffer_offset;
2072 
2073 	/* Calculate the beginning address of the buffer */
2074 	ASSERT(length <= rem_len);
2075 	buf_addr = (buffer_offset - tw->tw_dma_offs) +
2076 	    tw->tw_cookie.dmac_address;
2077 
2078 	USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
2079 	    "uhci_get_tw_paddr_by_offs: dmac_addr 0x%x dmac_size "
2080 	    "0x%lx idx %d", buf_addr, tw->tw_cookie.dmac_size,
2081 	    tw->tw_cookie_idx);
2082 
2083 	return (buf_addr);
2084 }
2085 
2086 
2087 /*
2088  * uhci_modify_td_active_bits:
2089  *	Sets active bit in all the tds of QH to INACTIVE so that
2090  *	the HC stops processing the TD's related to the QH.
2091  */
2092 void
2093 uhci_modify_td_active_bits(
2094 	uhci_state_t		*uhcip,
2095 	uhci_pipe_private_t	*pp)
2096 {
2097 	uhci_td_t		*td_head;
2098 	usb_ep_descr_t		*ept = &pp->pp_pipe_handle->p_ep;
2099 	uhci_trans_wrapper_t	*tw_head = pp->pp_tw_head;
2100 
2101 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
2102 	    "uhci_modify_td_active_bits: tw head %p", (void *)tw_head);
2103 
2104 	while (tw_head != NULL) {
2105 		tw_head->tw_claim = UHCI_MODIFY_TD_BITS_CLAIMED;
2106 		td_head = tw_head->tw_hctd_head;
2107 
2108 		while (td_head) {
2109 			if (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_ISOCH) {
2110 				SetTD_status(uhcip, td_head,
2111 				    GetTD_status(uhcip, td_head) & TD_INACTIVE);
2112 			} else {
2113 				SetTD32(uhcip, td_head->link_ptr,
2114 				    GetTD32(uhcip, td_head->link_ptr) |
2115 				    HC_END_OF_LIST);
2116 			}
2117 
2118 			td_head = td_head->tw_td_next;
2119 		}
2120 		tw_head = tw_head->tw_next;
2121 	}
2122 }
2123 
2124 
2125 /*
2126  * uhci_insert_ctrl_td:
2127  *	Create a TD and a data buffer for a control Queue Head.
2128  */
2129 int
2130 uhci_insert_ctrl_td(
2131 	uhci_state_t		*uhcip,
2132 	usba_pipe_handle_data_t  *ph,
2133 	usb_ctrl_req_t		*ctrl_reqp,
2134 	usb_flags_t		flags)
2135 {
2136 	uhci_pipe_private_t  *pp = (uhci_pipe_private_t *)ph->p_hcd_private;
2137 	uhci_trans_wrapper_t *tw;
2138 	size_t	ctrl_buf_size;
2139 
2140 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2141 	    "uhci_insert_ctrl_td: timeout: 0x%x", ctrl_reqp->ctrl_timeout);
2142 
2143 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
2144 
2145 	/*
2146 	 * If we have a control data phase, make the data buffer start
2147 	 * on the next 64-byte boundary so as to ensure the DMA cookie
2148 	 * can fit in the multiple TDs. The buffer in the range of
2149 	 * [SETUP_SIZE, UHCI_CTRL_EPT_MAX_SIZE) is just for padding
2150 	 * and not to be transferred.
2151 	 */
2152 	if (ctrl_reqp->ctrl_wLength) {
2153 		ctrl_buf_size = UHCI_CTRL_EPT_MAX_SIZE +
2154 		    ctrl_reqp->ctrl_wLength;
2155 	} else {
2156 		ctrl_buf_size = SETUP_SIZE;
2157 	}
2158 
2159 	/* Allocate a transaction wrapper */
2160 	if ((tw = uhci_create_transfer_wrapper(uhcip, pp,
2161 	    ctrl_buf_size, flags)) == NULL) {
2162 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2163 		    "uhci_insert_ctrl_td: TW allocation failed");
2164 
2165 		return (USB_NO_RESOURCES);
2166 	}
2167 
2168 	pp->pp_data_toggle = 0;
2169 
2170 	tw->tw_curr_xfer_reqp = (usb_opaque_t)ctrl_reqp;
2171 	tw->tw_bytes_xfered = 0;
2172 	tw->tw_bytes_pending = ctrl_reqp->ctrl_wLength;
2173 	tw->tw_timeout_cnt = max(UHCI_CTRL_TIMEOUT, ctrl_reqp->ctrl_timeout);
2174 
2175 	/*
2176 	 * Initialize the callback and any callback
2177 	 * data for when the td completes.
2178 	 */
2179 	tw->tw_handle_td = uhci_handle_ctrl_td;
2180 	tw->tw_handle_callback_value = NULL;
2181 
2182 	if ((uhci_create_setup_pkt(uhcip, pp, tw)) != USB_SUCCESS) {
2183 		tw->tw_ctrl_state = 0;
2184 
2185 		/* free the transfer wrapper */
2186 		uhci_deallocate_tw(uhcip, pp, tw);
2187 
2188 		return (USB_NO_RESOURCES);
2189 	}
2190 
2191 	tw->tw_ctrl_state = SETUP;
2192 
2193 	return (USB_SUCCESS);
2194 }
2195 
2196 
2197 /*
2198  * uhci_create_setup_pkt:
2199  *	create a setup packet to initiate a control transfer.
2200  *
2201  *	OHCI driver has seen the case where devices fail if there is
2202  *	more than one control transfer to the device within a frame.
2203  *	So, the UHCI ensures that only one TD will be put on the control
2204  *	pipe to one device (to be consistent with OHCI driver).
2205  */
2206 static int
2207 uhci_create_setup_pkt(
2208 	uhci_state_t		*uhcip,
2209 	uhci_pipe_private_t	*pp,
2210 	uhci_trans_wrapper_t	*tw)
2211 {
2212 	int		sdata;
2213 	usb_ctrl_req_t	*req = (usb_ctrl_req_t *)tw->tw_curr_xfer_reqp;
2214 
2215 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
2216 	    "uhci_create_setup_pkt: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%p",
2217 	    req->ctrl_bmRequestType, req->ctrl_bRequest, req->ctrl_wValue,
2218 	    req->ctrl_wIndex, req->ctrl_wLength, (void *)req->ctrl_data);
2219 
2220 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
2221 	ASSERT(tw != NULL);
2222 
2223 	/* Create the first four bytes of the setup packet */
2224 	sdata = (req->ctrl_bmRequestType | (req->ctrl_bRequest << 8) |
2225 	    (req->ctrl_wValue << 16));
2226 	ddi_put32(tw->tw_accesshandle, (uint_t *)tw->tw_buf, sdata);
2227 
2228 	/* Create the second four bytes */
2229 	sdata = (uint32_t)(req->ctrl_wIndex | (req->ctrl_wLength << 16));
2230 	ddi_put32(tw->tw_accesshandle,
2231 	    (uint_t *)(tw->tw_buf + sizeof (uint_t)), sdata);
2232 
2233 	/*
2234 	 * The TD's are placed on the QH one at a time.
2235 	 * Once this TD is placed on the done list, the
2236 	 * data or status phase TD will be enqueued.
2237 	 */
2238 	if ((uhci_insert_hc_td(uhcip, 0, SETUP_SIZE,
2239 	    pp, tw, PID_SETUP, req->ctrl_attributes)) != USB_SUCCESS) {
2240 
2241 		return (USB_NO_RESOURCES);
2242 	}
2243 
2244 	USB_DPRINTF_L3(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
2245 	    "Create_setup: pp = 0x%p, attrs = 0x%x", (void *)pp,
2246 	    req->ctrl_attributes);
2247 
2248 	/*
2249 	 * If this control transfer has a data phase, record the
2250 	 * direction. If the data phase is an OUT transaction ,
2251 	 * copy the data into the buffer of the transfer wrapper.
2252 	 */
2253 	if (req->ctrl_wLength != 0) {
2254 		/* There is a data stage.  Find the direction */
2255 		if (req->ctrl_bmRequestType & USB_DEV_REQ_DEV_TO_HOST) {
2256 			tw->tw_direction = PID_IN;
2257 		} else {
2258 			tw->tw_direction = PID_OUT;
2259 
2260 			/* Copy the data into the buffer */
2261 			ddi_rep_put8(tw->tw_accesshandle,
2262 			    req->ctrl_data->b_rptr,
2263 			    (uint8_t *)(tw->tw_buf + UHCI_CTRL_EPT_MAX_SIZE),
2264 			    req->ctrl_wLength,
2265 			    DDI_DEV_AUTOINCR);
2266 		}
2267 	}
2268 
2269 	return (USB_SUCCESS);
2270 }
2271 
2272 
2273 /*
2274  * uhci_create_stats:
2275  *	Allocate and initialize the uhci kstat structures
2276  */
2277 void
2278 uhci_create_stats(uhci_state_t *uhcip)
2279 {
2280 	int			i;
2281 	char			kstatname[KSTAT_STRLEN];
2282 	char			*usbtypes[USB_N_COUNT_KSTATS] =
2283 	    {"ctrl", "isoch", "bulk", "intr"};
2284 	uint_t			instance = uhcip->uhci_instance;
2285 	const char		*dname = ddi_driver_name(uhcip->uhci_dip);
2286 	uhci_intrs_stats_t	*isp;
2287 
2288 	if (UHCI_INTRS_STATS(uhcip) == NULL) {
2289 		(void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,intrs",
2290 		    dname, instance);
2291 		UHCI_INTRS_STATS(uhcip) = kstat_create("usba", instance,
2292 		    kstatname, "usb_interrupts", KSTAT_TYPE_NAMED,
2293 		    sizeof (uhci_intrs_stats_t) / sizeof (kstat_named_t),
2294 		    KSTAT_FLAG_PERSISTENT);
2295 
2296 		if (UHCI_INTRS_STATS(uhcip) != NULL) {
2297 			isp = UHCI_INTRS_STATS_DATA(uhcip);
2298 			kstat_named_init(&isp->uhci_intrs_hc_halted,
2299 			    "HC Halted", KSTAT_DATA_UINT64);
2300 			kstat_named_init(&isp->uhci_intrs_hc_process_err,
2301 			    "HC Process Errors", KSTAT_DATA_UINT64);
2302 			kstat_named_init(&isp->uhci_intrs_host_sys_err,
2303 			    "Host Sys Errors", KSTAT_DATA_UINT64);
2304 			kstat_named_init(&isp->uhci_intrs_resume_detected,
2305 			    "Resume Detected", KSTAT_DATA_UINT64);
2306 			kstat_named_init(&isp->uhci_intrs_usb_err_intr,
2307 			    "USB Error", KSTAT_DATA_UINT64);
2308 			kstat_named_init(&isp->uhci_intrs_usb_intr,
2309 			    "USB Interrupts", KSTAT_DATA_UINT64);
2310 			kstat_named_init(&isp->uhci_intrs_total,
2311 			    "Total Interrupts", KSTAT_DATA_UINT64);
2312 			kstat_named_init(&isp->uhci_intrs_not_claimed,
2313 			    "Not Claimed", KSTAT_DATA_UINT64);
2314 
2315 			UHCI_INTRS_STATS(uhcip)->ks_private = uhcip;
2316 			UHCI_INTRS_STATS(uhcip)->ks_update = nulldev;
2317 			kstat_install(UHCI_INTRS_STATS(uhcip));
2318 		}
2319 	}
2320 
2321 	if (UHCI_TOTAL_STATS(uhcip) == NULL) {
2322 		(void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,total",
2323 		    dname, instance);
2324 		UHCI_TOTAL_STATS(uhcip) = kstat_create("usba", instance,
2325 		    kstatname, "usb_byte_count", KSTAT_TYPE_IO, 1,
2326 		    KSTAT_FLAG_PERSISTENT);
2327 
2328 		if (UHCI_TOTAL_STATS(uhcip) != NULL) {
2329 			kstat_install(UHCI_TOTAL_STATS(uhcip));
2330 		}
2331 	}
2332 
2333 	for (i = 0; i < USB_N_COUNT_KSTATS; i++) {
2334 		if (uhcip->uhci_count_stats[i] == NULL) {
2335 			(void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,%s",
2336 			    dname, instance, usbtypes[i]);
2337 			uhcip->uhci_count_stats[i] = kstat_create("usba",
2338 			    instance, kstatname, "usb_byte_count",
2339 			    KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT);
2340 
2341 			if (uhcip->uhci_count_stats[i] != NULL) {
2342 				kstat_install(uhcip->uhci_count_stats[i]);
2343 			}
2344 		}
2345 	}
2346 }
2347 
2348 
2349 /*
2350  * uhci_destroy_stats:
2351  *	Clean up uhci kstat structures
2352  */
2353 void
2354 uhci_destroy_stats(uhci_state_t *uhcip)
2355 {
2356 	int i;
2357 
2358 	if (UHCI_INTRS_STATS(uhcip)) {
2359 		kstat_delete(UHCI_INTRS_STATS(uhcip));
2360 		UHCI_INTRS_STATS(uhcip) = NULL;
2361 	}
2362 
2363 	if (UHCI_TOTAL_STATS(uhcip)) {
2364 		kstat_delete(UHCI_TOTAL_STATS(uhcip));
2365 		UHCI_TOTAL_STATS(uhcip) = NULL;
2366 	}
2367 
2368 	for (i = 0; i < USB_N_COUNT_KSTATS; i++) {
2369 		if (uhcip->uhci_count_stats[i]) {
2370 			kstat_delete(uhcip->uhci_count_stats[i]);
2371 			uhcip->uhci_count_stats[i] = NULL;
2372 		}
2373 	}
2374 }
2375 
2376 
2377 void
2378 uhci_do_intrs_stats(uhci_state_t *uhcip, int val)
2379 {
2380 	if (UHCI_INTRS_STATS(uhcip) == NULL) {
2381 
2382 		return;
2383 	}
2384 
2385 	UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_total.value.ui64++;
2386 	switch (val) {
2387 	case USBSTS_REG_HC_HALTED:
2388 		UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_hc_halted.value.ui64++;
2389 		break;
2390 	case USBSTS_REG_HC_PROCESS_ERR:
2391 		UHCI_INTRS_STATS_DATA(uhcip)->
2392 		    uhci_intrs_hc_process_err.value.ui64++;
2393 		break;
2394 	case USBSTS_REG_HOST_SYS_ERR:
2395 		UHCI_INTRS_STATS_DATA(uhcip)->
2396 		    uhci_intrs_host_sys_err.value.ui64++;
2397 		break;
2398 	case USBSTS_REG_RESUME_DETECT:
2399 		UHCI_INTRS_STATS_DATA(uhcip)->
2400 		    uhci_intrs_resume_detected.value.ui64++;
2401 		break;
2402 	case USBSTS_REG_USB_ERR_INTR:
2403 		UHCI_INTRS_STATS_DATA(uhcip)->
2404 		    uhci_intrs_usb_err_intr.value.ui64++;
2405 		break;
2406 	case USBSTS_REG_USB_INTR:
2407 		UHCI_INTRS_STATS_DATA(uhcip)->uhci_intrs_usb_intr.value.ui64++;
2408 		break;
2409 	default:
2410 		UHCI_INTRS_STATS_DATA(uhcip)->
2411 		    uhci_intrs_not_claimed.value.ui64++;
2412 		break;
2413 	}
2414 }
2415 
2416 
2417 void
2418 uhci_do_byte_stats(uhci_state_t *uhcip, size_t len, uint8_t attr, uint8_t addr)
2419 {
2420 	uint8_t type = attr & USB_EP_ATTR_MASK;
2421 	uint8_t dir = addr & USB_EP_DIR_MASK;
2422 
2423 	switch (dir) {
2424 	case USB_EP_DIR_IN:
2425 		UHCI_TOTAL_STATS_DATA(uhcip)->reads++;
2426 		UHCI_TOTAL_STATS_DATA(uhcip)->nread += len;
2427 		switch (type) {
2428 		case USB_EP_ATTR_CONTROL:
2429 			UHCI_CTRL_STATS(uhcip)->reads++;
2430 			UHCI_CTRL_STATS(uhcip)->nread += len;
2431 			break;
2432 		case USB_EP_ATTR_BULK:
2433 			UHCI_BULK_STATS(uhcip)->reads++;
2434 			UHCI_BULK_STATS(uhcip)->nread += len;
2435 			break;
2436 		case USB_EP_ATTR_INTR:
2437 			UHCI_INTR_STATS(uhcip)->reads++;
2438 			UHCI_INTR_STATS(uhcip)->nread += len;
2439 			break;
2440 		case USB_EP_ATTR_ISOCH:
2441 			UHCI_ISOC_STATS(uhcip)->reads++;
2442 			UHCI_ISOC_STATS(uhcip)->nread += len;
2443 			break;
2444 		}
2445 		break;
2446 	case USB_EP_DIR_OUT:
2447 		UHCI_TOTAL_STATS_DATA(uhcip)->writes++;
2448 		UHCI_TOTAL_STATS_DATA(uhcip)->nwritten += len;
2449 		switch (type) {
2450 		case USB_EP_ATTR_CONTROL:
2451 			UHCI_CTRL_STATS(uhcip)->writes++;
2452 			UHCI_CTRL_STATS(uhcip)->nwritten += len;
2453 			break;
2454 		case USB_EP_ATTR_BULK:
2455 			UHCI_BULK_STATS(uhcip)->writes++;
2456 			UHCI_BULK_STATS(uhcip)->nwritten += len;
2457 			break;
2458 		case USB_EP_ATTR_INTR:
2459 			UHCI_INTR_STATS(uhcip)->writes++;
2460 			UHCI_INTR_STATS(uhcip)->nwritten += len;
2461 			break;
2462 		case USB_EP_ATTR_ISOCH:
2463 			UHCI_ISOC_STATS(uhcip)->writes++;
2464 			UHCI_ISOC_STATS(uhcip)->nwritten += len;
2465 			break;
2466 		}
2467 		break;
2468 	}
2469 }
2470 
2471 
2472 /*
2473  * uhci_free_tw:
2474  *	Free the Transfer Wrapper (TW).
2475  */
2476 void
2477 uhci_free_tw(uhci_state_t *uhcip, uhci_trans_wrapper_t *tw)
2478 {
2479 	int rval, i;
2480 
2481 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl, "uhci_free_tw:");
2482 
2483 	ASSERT(tw != NULL);
2484 
2485 	if (tw->tw_isoc_strtlen > 0) {
2486 		ASSERT(tw->tw_isoc_bufs != NULL);
2487 		for (i = 0; i < tw->tw_ncookies; i++) {
2488 			rval = ddi_dma_unbind_handle(
2489 			    tw->tw_isoc_bufs[i].dma_handle);
2490 			ASSERT(rval == USB_SUCCESS);
2491 			ddi_dma_mem_free(&tw->tw_isoc_bufs[i].mem_handle);
2492 			ddi_dma_free_handle(&tw->tw_isoc_bufs[i].dma_handle);
2493 		}
2494 		kmem_free(tw->tw_isoc_bufs, tw->tw_isoc_strtlen);
2495 	} else if (tw->tw_dmahandle != NULL) {
2496 		rval = ddi_dma_unbind_handle(tw->tw_dmahandle);
2497 		ASSERT(rval == DDI_SUCCESS);
2498 
2499 		ddi_dma_mem_free(&tw->tw_accesshandle);
2500 		ddi_dma_free_handle(&tw->tw_dmahandle);
2501 	}
2502 
2503 	kmem_free(tw, sizeof (uhci_trans_wrapper_t));
2504 }
2505 
2506 
2507 /*
2508  * uhci_deallocate_tw:
2509  *	Deallocate of a Transaction Wrapper (TW) and this involves
2510  *	the freeing of DMA resources.
2511  */
2512 void
2513 uhci_deallocate_tw(uhci_state_t *uhcip,
2514     uhci_pipe_private_t *pp, uhci_trans_wrapper_t *tw)
2515 {
2516 	uhci_trans_wrapper_t	*head;
2517 
2518 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
2519 	    "uhci_deallocate_tw:");
2520 
2521 	/*
2522 	 * If the transfer wrapper has no Host Controller (HC)
2523 	 * Transfer Descriptors (TD) associated with it,  then
2524 	 * remove the transfer wrapper. The transfers are done
2525 	 * in FIFO order, so this should be the first transfer
2526 	 * wrapper on the list.
2527 	 */
2528 	if (tw->tw_hctd_head != NULL) {
2529 		ASSERT(tw->tw_hctd_tail != NULL);
2530 
2531 		return;
2532 	}
2533 
2534 	ASSERT(tw->tw_hctd_tail == NULL);
2535 	ASSERT(pp->pp_tw_head != NULL);
2536 
2537 	/*
2538 	 * If pp->pp_tw_head is NULL, set the tail also to NULL.
2539 	 */
2540 	head = pp->pp_tw_head;
2541 
2542 	if (head == tw) {
2543 		pp->pp_tw_head = head->tw_next;
2544 		if (pp->pp_tw_head == NULL) {
2545 			pp->pp_tw_tail = NULL;
2546 		}
2547 	} else {
2548 		while (head->tw_next != tw)
2549 			head = head->tw_next;
2550 		head->tw_next = tw->tw_next;
2551 		if (tw->tw_next == NULL) {
2552 			pp->pp_tw_tail = head;
2553 		}
2554 	}
2555 	uhci_free_tw(uhcip, tw);
2556 }
2557 
2558 
2559 void
2560 uhci_delete_td(uhci_state_t *uhcip, uhci_td_t *td)
2561 {
2562 	uhci_td_t		*tmp_td;
2563 	uhci_trans_wrapper_t	*tw = td->tw;
2564 
2565 	if ((td->outst_td_next == NULL) && (td->outst_td_prev == NULL)) {
2566 		uhcip->uhci_outst_tds_head = NULL;
2567 		uhcip->uhci_outst_tds_tail = NULL;
2568 	} else if (td->outst_td_next == NULL) {
2569 		td->outst_td_prev->outst_td_next = NULL;
2570 		uhcip->uhci_outst_tds_tail = td->outst_td_prev;
2571 	} else if (td->outst_td_prev == NULL) {
2572 		td->outst_td_next->outst_td_prev = NULL;
2573 		uhcip->uhci_outst_tds_head = td->outst_td_next;
2574 	} else {
2575 		td->outst_td_prev->outst_td_next = td->outst_td_next;
2576 		td->outst_td_next->outst_td_prev = td->outst_td_prev;
2577 	}
2578 
2579 	tmp_td = tw->tw_hctd_head;
2580 
2581 	if (tmp_td != td) {
2582 		while (tmp_td->tw_td_next != td) {
2583 			tmp_td = tmp_td->tw_td_next;
2584 		}
2585 		ASSERT(tmp_td);
2586 		tmp_td->tw_td_next = td->tw_td_next;
2587 		if (td->tw_td_next == NULL) {
2588 			tw->tw_hctd_tail = tmp_td;
2589 		}
2590 	} else {
2591 		tw->tw_hctd_head = tw->tw_hctd_head->tw_td_next;
2592 		if (tw->tw_hctd_head == NULL) {
2593 			tw->tw_hctd_tail = NULL;
2594 		}
2595 	}
2596 
2597 	td->flag  = TD_FLAG_FREE;
2598 }
2599 
2600 
2601 void
2602 uhci_remove_tds_tws(
2603 	uhci_state_t		*uhcip,
2604 	usba_pipe_handle_data_t	*ph)
2605 {
2606 	usb_opaque_t		curr_reqp;
2607 	uhci_pipe_private_t	*pp = (uhci_pipe_private_t *)ph->p_hcd_private;
2608 	usb_ep_descr_t		*ept = &pp->pp_pipe_handle->p_ep;
2609 	uhci_trans_wrapper_t	*tw_tmp;
2610 	uhci_trans_wrapper_t	*tw_head = pp->pp_tw_head;
2611 
2612 	while (tw_head != NULL) {
2613 		tw_tmp = tw_head;
2614 		tw_head = tw_head->tw_next;
2615 
2616 		curr_reqp = tw_tmp->tw_curr_xfer_reqp;
2617 		if (curr_reqp) {
2618 			/* do this for control/bulk/intr */
2619 			if ((tw_tmp->tw_direction == PID_IN) &&
2620 			    (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_INTR)) {
2621 				uhci_deallocate_periodic_in_resource(uhcip,
2622 				    pp, tw_tmp);
2623 			} else {
2624 				uhci_hcdi_callback(uhcip, pp,
2625 				    pp->pp_pipe_handle, tw_tmp, USB_CR_FLUSHED);
2626 			}
2627 		} /* end of curr_reqp */
2628 
2629 		if (tw_tmp->tw_claim != UHCI_MODIFY_TD_BITS_CLAIMED) {
2630 			continue;
2631 		}
2632 
2633 		while (tw_tmp->tw_hctd_head != NULL) {
2634 			uhci_delete_td(uhcip, tw_tmp->tw_hctd_head);
2635 		}
2636 
2637 		uhci_deallocate_tw(uhcip, pp, tw_tmp);
2638 	}
2639 }
2640 
2641 
2642 /*
2643  * uhci_remove_qh:
2644  *	Remove the Queue Head from the Host Controller's
2645  *	appropriate QH list.
2646  */
2647 void
2648 uhci_remove_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp)
2649 {
2650 	uhci_td_t	*dummy_td;
2651 
2652 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
2653 
2654 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2655 	    "uhci_remove_qh:");
2656 
2657 	dummy_td = pp->pp_qh->td_tailp;
2658 	dummy_td->flag = TD_FLAG_FREE;
2659 
2660 	switch (UHCI_XFER_TYPE(&pp->pp_pipe_handle->p_ep)) {
2661 	case USB_EP_ATTR_CONTROL:
2662 		uhci_remove_ctrl_qh(uhcip, pp);
2663 		break;
2664 	case USB_EP_ATTR_BULK:
2665 		uhci_remove_bulk_qh(uhcip, pp);
2666 		break;
2667 	case USB_EP_ATTR_INTR:
2668 		uhci_remove_intr_qh(uhcip, pp);
2669 		break;
2670 	}
2671 }
2672 
2673 
2674 static void
2675 uhci_remove_intr_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp)
2676 {
2677 	queue_head_t   *qh = pp->pp_qh;
2678 	queue_head_t   *next_lattice_qh =
2679 	    QH_VADDR(GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK);
2680 
2681 	qh->prev_qh->link_ptr	 = qh->link_ptr;
2682 	next_lattice_qh->prev_qh = qh->prev_qh;
2683 	qh->qh_flag = QUEUE_HEAD_FLAG_FREE;
2684 
2685 }
2686 
2687 /*
2688  * uhci_remove_bulk_qh:
2689  *	Remove a bulk QH from the Host Controller's QH list. There may be a
2690  *	loop for bulk QHs, we must care about this while removing a bulk QH.
2691  */
2692 static void
2693 uhci_remove_bulk_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp)
2694 {
2695 	queue_head_t   *qh = pp->pp_qh;
2696 	queue_head_t   *next_lattice_qh;
2697 	uint32_t	paddr;
2698 
2699 	paddr = (GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK);
2700 	next_lattice_qh = (qh == uhcip->uhci_bulk_xfers_q_tail) ?
2701 	    0 : QH_VADDR(paddr);
2702 
2703 	if ((qh == uhcip->uhci_bulk_xfers_q_tail) &&
2704 	    (qh->prev_qh == uhcip->uhci_bulk_xfers_q_head)) {
2705 		SetQH32(uhcip, qh->prev_qh->link_ptr, HC_END_OF_LIST);
2706 	} else {
2707 		qh->prev_qh->link_ptr = qh->link_ptr;
2708 	}
2709 
2710 	if (next_lattice_qh == NULL) {
2711 		uhcip->uhci_bulk_xfers_q_tail = qh->prev_qh;
2712 	} else {
2713 		next_lattice_qh->prev_qh = qh->prev_qh;
2714 	}
2715 
2716 	qh->qh_flag = QUEUE_HEAD_FLAG_FREE;
2717 
2718 }
2719 
2720 
2721 static void
2722 uhci_remove_ctrl_qh(uhci_state_t *uhcip, uhci_pipe_private_t *pp)
2723 {
2724 	queue_head_t   *qh = pp->pp_qh;
2725 	queue_head_t   *next_lattice_qh =
2726 	    QH_VADDR(GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK);
2727 
2728 	qh->prev_qh->link_ptr = qh->link_ptr;
2729 	if (next_lattice_qh->prev_qh != NULL) {
2730 		next_lattice_qh->prev_qh = qh->prev_qh;
2731 	} else {
2732 		uhcip->uhci_ctrl_xfers_q_tail = qh->prev_qh;
2733 	}
2734 
2735 	qh->qh_flag = QUEUE_HEAD_FLAG_FREE;
2736 }
2737 
2738 
2739 /*
2740  * uhci_allocate_td_from_pool:
2741  *	Allocate a Transfer Descriptor (TD) from the TD buffer pool.
2742  */
2743 static uhci_td_t *
2744 uhci_allocate_td_from_pool(uhci_state_t *uhcip)
2745 {
2746 	int		index;
2747 	uhci_td_t	*td;
2748 
2749 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
2750 
2751 	/*
2752 	 * Search for a blank Transfer Descriptor (TD)
2753 	 * in the TD buffer pool.
2754 	 */
2755 	for (index = 0; index < uhci_td_pool_size; index ++) {
2756 		if (uhcip->uhci_td_pool_addr[index].flag == TD_FLAG_FREE) {
2757 			break;
2758 		}
2759 	}
2760 
2761 	if (index == uhci_td_pool_size) {
2762 		USB_DPRINTF_L2(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
2763 		    "uhci_allocate_td_from_pool: TD exhausted");
2764 
2765 		return (NULL);
2766 	}
2767 
2768 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, uhcip->uhci_log_hdl,
2769 	    "uhci_allocate_td_from_pool: Allocated %d", index);
2770 
2771 	/* Create a new dummy for the end of the TD list */
2772 	td = &uhcip->uhci_td_pool_addr[index];
2773 
2774 	/* Mark the newly allocated TD as a dummy */
2775 	td->flag =  TD_FLAG_DUMMY;
2776 	td->qh_td_prev	=  NULL;
2777 
2778 	return (td);
2779 }
2780 
2781 
2782 /*
2783  * uhci_insert_bulk_td:
2784  */
2785 int
2786 uhci_insert_bulk_td(
2787 	uhci_state_t		*uhcip,
2788 	usba_pipe_handle_data_t	*ph,
2789 	usb_bulk_req_t		*req,
2790 	usb_flags_t		flags)
2791 {
2792 	size_t			length;
2793 	uint_t			mps;	/* MaxPacketSize */
2794 	uint_t			num_bulk_tds, i, j;
2795 	uint32_t		buf_offs;
2796 	uhci_td_t		*bulk_td_ptr;
2797 	uhci_td_t		*current_dummy, *tmp_td;
2798 	uhci_pipe_private_t	*pp = (uhci_pipe_private_t *)ph->p_hcd_private;
2799 	uhci_trans_wrapper_t	*tw;
2800 	uhci_bulk_isoc_xfer_t	*bulk_xfer_info;
2801 	uhci_bulk_isoc_td_pool_t *td_pool_ptr;
2802 
2803 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2804 	    "uhci_insert_bulk_td: req: 0x%p, flags = 0x%x", (void *)req, flags);
2805 
2806 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
2807 
2808 	/*
2809 	 * Create transfer wrapper
2810 	 */
2811 	if ((tw = uhci_create_transfer_wrapper(uhcip, pp, req->bulk_len,
2812 	    flags)) == NULL) {
2813 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2814 		    "uhci_insert_bulk_td: TW allocation failed");
2815 
2816 		return (USB_NO_RESOURCES);
2817 	}
2818 
2819 	tw->tw_bytes_xfered		= 0;
2820 	tw->tw_bytes_pending		= req->bulk_len;
2821 	tw->tw_handle_td		= uhci_handle_bulk_td;
2822 	tw->tw_handle_callback_value	= (usb_opaque_t)req->bulk_data;
2823 	tw->tw_timeout_cnt		= req->bulk_timeout;
2824 	tw->tw_data			= req->bulk_data;
2825 	tw->tw_curr_xfer_reqp		= (usb_opaque_t)req;
2826 
2827 	/* Get the bulk pipe direction */
2828 	tw->tw_direction = (UHCI_XFER_DIR(&ph->p_ep) == USB_EP_DIR_OUT) ?
2829 	    PID_OUT : PID_IN;
2830 
2831 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2832 	    "uhci_insert_bulk_td: direction: 0x%x", tw->tw_direction);
2833 
2834 	/* If the DATA OUT, copy the data into transfer buffer. */
2835 	if (tw->tw_direction == PID_OUT) {
2836 		if (req->bulk_len) {
2837 			ASSERT(req->bulk_data != NULL);
2838 
2839 			/* Copy the data into the message */
2840 			ddi_rep_put8(tw->tw_accesshandle,
2841 			    req->bulk_data->b_rptr,
2842 			    (uint8_t *)tw->tw_buf,
2843 			    req->bulk_len, DDI_DEV_AUTOINCR);
2844 		}
2845 	}
2846 
2847 	/* Get the max packet size.  */
2848 	length = mps = pp->pp_pipe_handle->p_ep.wMaxPacketSize;
2849 
2850 	/*
2851 	 * Calculate number of TD's to insert in the current frame interval.
2852 	 * Max number TD's allowed (driver implementation) is 128
2853 	 * in one frame interval. Once all the TD's are completed
2854 	 * then the remaining TD's will be inserted into the lattice
2855 	 * in the uhci_handle_bulk_td().
2856 	 */
2857 	if ((tw->tw_bytes_pending / mps) >= MAX_NUM_BULK_TDS_PER_XFER) {
2858 		num_bulk_tds = MAX_NUM_BULK_TDS_PER_XFER;
2859 	} else {
2860 		num_bulk_tds = (tw->tw_bytes_pending / mps);
2861 
2862 		if (tw->tw_bytes_pending % mps || tw->tw_bytes_pending == 0) {
2863 			num_bulk_tds++;
2864 			length = (tw->tw_bytes_pending % mps);
2865 		}
2866 	}
2867 
2868 	/*
2869 	 * Allocate memory for the bulk xfer information structure
2870 	 */
2871 	if ((bulk_xfer_info = kmem_zalloc(
2872 	    sizeof (uhci_bulk_isoc_xfer_t), KM_NOSLEEP)) == NULL) {
2873 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2874 		    "uhci_insert_bulk_td: kmem_zalloc failed");
2875 
2876 		/* Free the transfer wrapper */
2877 		uhci_deallocate_tw(uhcip, pp, tw);
2878 
2879 		return (USB_FAILURE);
2880 	}
2881 
2882 	/* Allocate memory for the bulk TD's */
2883 	if (uhci_alloc_bulk_isoc_tds(uhcip, num_bulk_tds, bulk_xfer_info) !=
2884 	    USB_SUCCESS) {
2885 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2886 		    "uhci_insert_bulk_td: alloc_bulk_isoc_tds failed");
2887 
2888 		kmem_free(bulk_xfer_info, sizeof (uhci_bulk_isoc_xfer_t));
2889 
2890 		/* Free the transfer wrapper */
2891 		uhci_deallocate_tw(uhcip, pp, tw);
2892 
2893 		return (USB_FAILURE);
2894 	}
2895 
2896 	td_pool_ptr = &bulk_xfer_info->td_pools[0];
2897 	bulk_td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr;
2898 	bulk_td_ptr[0].qh_td_prev = NULL;
2899 	current_dummy = pp->pp_qh->td_tailp;
2900 	buf_offs = 0;
2901 	pp->pp_qh->bulk_xfer_info = bulk_xfer_info;
2902 
2903 	/* Fill up all the bulk TD's */
2904 	for (i = 0; i < bulk_xfer_info->num_pools; i++) {
2905 		for (j = 0; j < (td_pool_ptr->num_tds - 1); j++) {
2906 			uhci_fill_in_bulk_isoc_td(uhcip, &bulk_td_ptr[j],
2907 			    &bulk_td_ptr[j+1], BULKTD_PADDR(td_pool_ptr,
2908 			    &bulk_td_ptr[j+1]), ph, buf_offs, mps, tw);
2909 			buf_offs += mps;
2910 		}
2911 
2912 		/* fill in the last TD */
2913 		if (i == (bulk_xfer_info->num_pools - 1)) {
2914 			uhci_fill_in_bulk_isoc_td(uhcip, &bulk_td_ptr[j],
2915 			    current_dummy, TD_PADDR(current_dummy),
2916 			    ph, buf_offs, length, tw);
2917 		} else {
2918 			/* fill in the TD at the tail of a pool */
2919 			tmp_td = &bulk_td_ptr[j];
2920 			td_pool_ptr = &bulk_xfer_info->td_pools[i + 1];
2921 			bulk_td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr;
2922 			uhci_fill_in_bulk_isoc_td(uhcip, tmp_td,
2923 			    &bulk_td_ptr[0], BULKTD_PADDR(td_pool_ptr,
2924 			    &bulk_td_ptr[0]), ph, buf_offs, mps, tw);
2925 			buf_offs += mps;
2926 		}
2927 	}
2928 
2929 	bulk_xfer_info->num_tds	= (ushort_t)num_bulk_tds;
2930 
2931 	/*
2932 	 * Point the end of the lattice tree to the start of the bulk xfers
2933 	 * queue head. This allows the HC to execute the same Queue Head/TD
2934 	 * in the same frame. There are some bulk devices, which NAKs after
2935 	 * completing each TD. As a result, the performance on such devices
2936 	 * is very bad.  This loop will  provide a chance to execute NAk'ed
2937 	 * bulk TDs again in the same frame.
2938 	 */
2939 	if (uhcip->uhci_pending_bulk_cmds++ == 0) {
2940 		uhcip->uhci_bulk_xfers_q_tail->link_ptr =
2941 		    uhcip->uhci_bulk_xfers_q_head->link_ptr;
2942 		USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
2943 		    "uhci_insert_bulk_td: count = %d no tds  %d",
2944 		    uhcip->uhci_pending_bulk_cmds, num_bulk_tds);
2945 	}
2946 
2947 	/* Insert on the bulk queue head for the execution by HC */
2948 	SetQH32(uhcip, pp->pp_qh->element_ptr,
2949 	    bulk_xfer_info->td_pools[0].cookie.dmac_address);
2950 
2951 	return (USB_SUCCESS);
2952 }
2953 
2954 
2955 /*
2956  * uhci_fill_in_bulk_isoc_td
2957  *     Fills the bulk/isoc TD
2958  *
2959  * offset - different meanings for bulk and isoc TDs:
2960  *	    starting offset into the TW buffer for a bulk TD
2961  *	    and the index into the isoc packet list for an isoc TD
2962  */
2963 void
2964 uhci_fill_in_bulk_isoc_td(uhci_state_t *uhcip, uhci_td_t *current_td,
2965 	uhci_td_t		*next_td,
2966 	uint32_t		next_td_paddr,
2967 	usba_pipe_handle_data_t	*ph,
2968 	uint_t			offset,
2969 	uint_t			length,
2970 	uhci_trans_wrapper_t	*tw)
2971 {
2972 	uhci_pipe_private_t	*pp = (uhci_pipe_private_t *)ph->p_hcd_private;
2973 	usb_ep_descr_t		*ept = &pp->pp_pipe_handle->p_ep;
2974 	uint32_t		buf_addr;
2975 
2976 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
2977 	    "uhci_fill_in_bulk_isoc_td: tw 0x%p offs 0x%x length 0x%x",
2978 	    (void *)tw, offset, length);
2979 
2980 	bzero((char *)current_td, sizeof (uhci_td_t));
2981 	SetTD32(uhcip, current_td->link_ptr, next_td_paddr | HC_DEPTH_FIRST);
2982 
2983 	switch (UHCI_XFER_TYPE(ept)) {
2984 	case USB_EP_ATTR_ISOCH:
2985 		if (((usb_isoc_req_t *)tw->tw_curr_xfer_reqp)->isoc_attributes
2986 		    & USB_ATTRS_SHORT_XFER_OK) {
2987 			SetTD_spd(uhcip, current_td, 1);
2988 		}
2989 		break;
2990 	case USB_EP_ATTR_BULK:
2991 		if (((usb_bulk_req_t *)tw->tw_curr_xfer_reqp)->bulk_attributes
2992 		    & USB_ATTRS_SHORT_XFER_OK) {
2993 			SetTD_spd(uhcip, current_td, 1);
2994 		}
2995 		break;
2996 	}
2997 
2998 	mutex_enter(&ph->p_usba_device->usb_mutex);
2999 
3000 	SetTD_c_err(uhcip, current_td, UHCI_MAX_ERR_COUNT);
3001 	SetTD_status(uhcip, current_td, UHCI_TD_ACTIVE);
3002 	SetTD_ioc(uhcip, current_td, INTERRUPT_ON_COMPLETION);
3003 	SetTD_mlen(uhcip, current_td,
3004 	    (length == 0) ? ZERO_LENGTH : (length - 1));
3005 	SetTD_dtogg(uhcip, current_td, pp->pp_data_toggle);
3006 	SetTD_devaddr(uhcip, current_td, ph->p_usba_device->usb_addr);
3007 	SetTD_endpt(uhcip, current_td, ph->p_ep.bEndpointAddress &
3008 	    END_POINT_ADDRESS_MASK);
3009 	SetTD_PID(uhcip, current_td, tw->tw_direction);
3010 
3011 	/* Get the right buffer address for the current TD */
3012 	switch (UHCI_XFER_TYPE(ept)) {
3013 	case USB_EP_ATTR_ISOCH:
3014 		buf_addr = tw->tw_isoc_bufs[offset].cookie.dmac_address;
3015 		break;
3016 	case USB_EP_ATTR_BULK:
3017 		buf_addr = uhci_get_tw_paddr_by_offs(uhcip, offset,
3018 		    length, tw);
3019 		break;
3020 	}
3021 	SetTD32(uhcip, current_td->buffer_address, buf_addr);
3022 
3023 	/*
3024 	 * Adjust the data toggle.
3025 	 * The data toggle bit must always be 0 for isoc transfers.
3026 	 * And set the "iso" bit in the TD for isoc transfers.
3027 	 */
3028 	if (UHCI_XFER_TYPE(ept) == USB_EP_ATTR_ISOCH) {
3029 		pp->pp_data_toggle = 0;
3030 		SetTD_iso(uhcip, current_td, 1);
3031 	} else {
3032 		ADJ_DATA_TOGGLE(pp);
3033 		next_td->qh_td_prev = current_td;
3034 		pp->pp_qh->td_tailp = next_td;
3035 	}
3036 
3037 	current_td->outst_td_next = NULL;
3038 	current_td->outst_td_prev = uhcip->uhci_outst_tds_tail;
3039 	if (uhcip->uhci_outst_tds_head == NULL) {
3040 		uhcip->uhci_outst_tds_head = current_td;
3041 	} else {
3042 		uhcip->uhci_outst_tds_tail->outst_td_next = current_td;
3043 	}
3044 	uhcip->uhci_outst_tds_tail = current_td;
3045 	current_td->tw = tw;
3046 
3047 	if (tw->tw_hctd_head == NULL) {
3048 		ASSERT(tw->tw_hctd_tail == NULL);
3049 		tw->tw_hctd_head = current_td;
3050 		tw->tw_hctd_tail = current_td;
3051 	} else {
3052 		/* Add the td to the end of the list */
3053 		tw->tw_hctd_tail->tw_td_next = current_td;
3054 		tw->tw_hctd_tail = current_td;
3055 	}
3056 
3057 	mutex_exit(&ph->p_usba_device->usb_mutex);
3058 }
3059 
3060 
3061 /*
3062  * uhci_alloc_bulk_isoc_tds:
3063  *	- Allocates the isoc/bulk TD pools. It will allocate one whole
3064  *	  pool to store all the TDs if the system allows. Only when the
3065  *	  first allocation fails, it tries to allocate several small
3066  *	  pools with each pool limited in physical page size.
3067  */
3068 static int
3069 uhci_alloc_bulk_isoc_tds(
3070 	uhci_state_t		*uhcip,
3071 	uint_t			num_tds,
3072 	uhci_bulk_isoc_xfer_t	*info)
3073 {
3074 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3075 	    "uhci_alloc_bulk_isoc_tds: num_tds: 0x%x info: 0x%p",
3076 	    num_tds, (void *)info);
3077 
3078 	info->num_pools = 1;
3079 	/* allocate as a whole pool at the first time */
3080 	if (uhci_alloc_memory_for_tds(uhcip, num_tds, info) !=
3081 	    USB_SUCCESS) {
3082 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3083 		    "alloc_memory_for_tds failed: num_tds %d num_pools %d",
3084 		    num_tds, info->num_pools);
3085 
3086 		/* reduce the td number per pool and alloc again */
3087 		info->num_pools = num_tds / UHCI_MAX_TD_NUM_PER_POOL;
3088 		if (num_tds % UHCI_MAX_TD_NUM_PER_POOL) {
3089 			info->num_pools++;
3090 		}
3091 
3092 		if (uhci_alloc_memory_for_tds(uhcip, num_tds, info) !=
3093 		    USB_SUCCESS) {
3094 			USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3095 			    "alloc_memory_for_tds failed: num_tds %d "
3096 			    "num_pools %d", num_tds, info->num_pools);
3097 
3098 			return (USB_NO_RESOURCES);
3099 		}
3100 	}
3101 
3102 	return (USB_SUCCESS);
3103 }
3104 
3105 
3106 /*
3107  * uhci_alloc_memory_for_tds:
3108  *	- Allocates memory for the isoc/bulk td pools.
3109  */
3110 static int
3111 uhci_alloc_memory_for_tds(
3112 	uhci_state_t		*uhcip,
3113 	uint_t			num_tds,
3114 	uhci_bulk_isoc_xfer_t	*info)
3115 {
3116 	int			result, i, j, err;
3117 	size_t			real_length;
3118 	uint_t			ccount, num;
3119 	ddi_device_acc_attr_t	dev_attr;
3120 	uhci_bulk_isoc_td_pool_t *td_pool_ptr1, *td_pool_ptr2;
3121 
3122 	USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
3123 	    "uhci_alloc_memory_for_tds: num_tds: 0x%x info: 0x%p "
3124 	    "num_pools: %u", num_tds, (void *)info, info->num_pools);
3125 
3126 	/* The host controller will be little endian */
3127 	dev_attr.devacc_attr_version		= DDI_DEVICE_ATTR_V0;
3128 	dev_attr.devacc_attr_endian_flags	= DDI_STRUCTURE_LE_ACC;
3129 	dev_attr.devacc_attr_dataorder		= DDI_STRICTORDER_ACC;
3130 
3131 	/* Allocate the TD pool structures */
3132 	if ((info->td_pools = kmem_zalloc(
3133 	    (sizeof (uhci_bulk_isoc_td_pool_t) * info->num_pools),
3134 	    KM_SLEEP)) == NULL) {
3135 		USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
3136 		    "uhci_alloc_memory_for_tds: alloc td_pools failed");
3137 
3138 		return (USB_FAILURE);
3139 	}
3140 
3141 	for (i = 0; i < info->num_pools; i++) {
3142 		if (info->num_pools == 1) {
3143 			num = num_tds;
3144 		} else if (i < (info->num_pools - 1)) {
3145 			num = UHCI_MAX_TD_NUM_PER_POOL;
3146 		} else {
3147 			num = (num_tds % UHCI_MAX_TD_NUM_PER_POOL);
3148 		}
3149 
3150 		td_pool_ptr1 = &info->td_pools[i];
3151 
3152 		/* Allocate the bulk TD pool DMA handle */
3153 		if (ddi_dma_alloc_handle(uhcip->uhci_dip,
3154 		    &uhcip->uhci_dma_attr, DDI_DMA_SLEEP, 0,
3155 		    &td_pool_ptr1->dma_handle) != DDI_SUCCESS) {
3156 
3157 			for (j = 0; j < i; j++) {
3158 				td_pool_ptr2 = &info->td_pools[j];
3159 				result = ddi_dma_unbind_handle(
3160 				    td_pool_ptr2->dma_handle);
3161 				ASSERT(result == DDI_SUCCESS);
3162 				ddi_dma_mem_free(&td_pool_ptr2->mem_handle);
3163 				ddi_dma_free_handle(&td_pool_ptr2->dma_handle);
3164 			}
3165 
3166 			kmem_free(info->td_pools,
3167 			    (sizeof (uhci_bulk_isoc_td_pool_t) *
3168 			    info->num_pools));
3169 
3170 			return (USB_FAILURE);
3171 		}
3172 
3173 		/* Allocate the memory for the bulk TD pool */
3174 		if (ddi_dma_mem_alloc(td_pool_ptr1->dma_handle,
3175 		    num * sizeof (uhci_td_t), &dev_attr,
3176 		    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
3177 		    &td_pool_ptr1->pool_addr, &real_length,
3178 		    &td_pool_ptr1->mem_handle) != DDI_SUCCESS) {
3179 
3180 			ddi_dma_free_handle(&td_pool_ptr1->dma_handle);
3181 
3182 			for (j = 0; j < i; j++) {
3183 				td_pool_ptr2 = &info->td_pools[j];
3184 				result = ddi_dma_unbind_handle(
3185 				    td_pool_ptr2->dma_handle);
3186 				ASSERT(result == DDI_SUCCESS);
3187 				ddi_dma_mem_free(&td_pool_ptr2->mem_handle);
3188 				ddi_dma_free_handle(&td_pool_ptr2->dma_handle);
3189 			}
3190 
3191 			kmem_free(info->td_pools,
3192 			    (sizeof (uhci_bulk_isoc_td_pool_t) *
3193 			    info->num_pools));
3194 
3195 			return (USB_FAILURE);
3196 		}
3197 
3198 		/* Map the bulk TD pool into the I/O address space */
3199 		result = ddi_dma_addr_bind_handle(td_pool_ptr1->dma_handle,
3200 		    NULL, (caddr_t)td_pool_ptr1->pool_addr, real_length,
3201 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
3202 		    &td_pool_ptr1->cookie, &ccount);
3203 
3204 		/* Process the result */
3205 		err = USB_SUCCESS;
3206 
3207 		if (result != DDI_DMA_MAPPED) {
3208 			USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
3209 			    "uhci_allocate_memory_for_tds: Result = %d",
3210 			    result);
3211 			uhci_decode_ddi_dma_addr_bind_handle_result(uhcip,
3212 			    result);
3213 
3214 			err = USB_FAILURE;
3215 		}
3216 
3217 		if ((result == DDI_DMA_MAPPED) && (ccount != 1)) {
3218 			/* The cookie count should be 1 */
3219 			USB_DPRINTF_L2(PRINT_MASK_ATTA,
3220 			    uhcip->uhci_log_hdl,
3221 			    "uhci_allocate_memory_for_tds: "
3222 			    "More than 1 cookie");
3223 
3224 			result = ddi_dma_unbind_handle(
3225 			    td_pool_ptr1->dma_handle);
3226 			ASSERT(result == DDI_SUCCESS);
3227 
3228 			err = USB_FAILURE;
3229 		}
3230 
3231 		if (err == USB_FAILURE) {
3232 
3233 			ddi_dma_mem_free(&td_pool_ptr1->mem_handle);
3234 			ddi_dma_free_handle(&td_pool_ptr1->dma_handle);
3235 
3236 			for (j = 0; j < i; j++) {
3237 				td_pool_ptr2 = &info->td_pools[j];
3238 				result = ddi_dma_unbind_handle(
3239 				    td_pool_ptr2->dma_handle);
3240 				ASSERT(result == DDI_SUCCESS);
3241 				ddi_dma_mem_free(&td_pool_ptr2->mem_handle);
3242 				ddi_dma_free_handle(&td_pool_ptr2->dma_handle);
3243 			}
3244 
3245 			kmem_free(info->td_pools,
3246 			    (sizeof (uhci_bulk_isoc_td_pool_t) *
3247 			    info->num_pools));
3248 
3249 			return (USB_FAILURE);
3250 		}
3251 
3252 		bzero((void *)td_pool_ptr1->pool_addr,
3253 		    num * sizeof (uhci_td_t));
3254 		td_pool_ptr1->num_tds = (ushort_t)num;
3255 	}
3256 
3257 	return (USB_SUCCESS);
3258 }
3259 
3260 
3261 /*
3262  * uhci_handle_bulk_td:
3263  *
3264  *	Handles the completed bulk transfer descriptors
3265  */
3266 void
3267 uhci_handle_bulk_td(uhci_state_t *uhcip, uhci_td_t *td)
3268 {
3269 	uint_t			num_bulk_tds, index, td_count, j;
3270 	usb_cr_t		error;
3271 	uint_t			length, bytes_xfered;
3272 	ushort_t		MaxPacketSize;
3273 	uint32_t		buf_offs, paddr;
3274 	uhci_td_t		*bulk_td_ptr, *current_dummy, *td_head;
3275 	uhci_td_t		*tmp_td;
3276 	queue_head_t		*qh, *next_qh;
3277 	uhci_trans_wrapper_t	*tw = td->tw;
3278 	uhci_pipe_private_t	*pp = tw->tw_pipe_private;
3279 	uhci_bulk_isoc_xfer_t	*bulk_xfer_info;
3280 	uhci_bulk_isoc_td_pool_t *td_pool_ptr;
3281 	usba_pipe_handle_data_t	*ph;
3282 
3283 	USB_DPRINTF_L4(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
3284 	    "uhci_handle_bulk_td: td = 0x%p tw = 0x%p", (void *)td, (void *)tw);
3285 
3286 	/*
3287 	 * Update the tw_bytes_pending, and tw_bytes_xfered
3288 	 */
3289 	bytes_xfered = ZERO_LENGTH;
3290 
3291 	/*
3292 	 * Check whether there are any errors occurred in the xfer.
3293 	 * If so, update the data_toggle for the queue head and
3294 	 * return error to the upper layer.
3295 	 */
3296 	if (GetTD_status(uhcip, td) & TD_STATUS_MASK) {
3297 		uhci_handle_bulk_td_errors(uhcip, td);
3298 
3299 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3300 		    "uhci_handle_bulk_td: error; data toggle: 0x%x",
3301 		    pp->pp_data_toggle);
3302 
3303 		return;
3304 	}
3305 
3306 	/*
3307 	 * Update the tw_bytes_pending, and tw_bytes_xfered
3308 	 */
3309 	bytes_xfered = GetTD_alen(uhcip, td);
3310 	if (bytes_xfered != ZERO_LENGTH) {
3311 		tw->tw_bytes_pending -= (bytes_xfered + 1);
3312 		tw->tw_bytes_xfered  += (bytes_xfered + 1);
3313 	}
3314 
3315 	/*
3316 	 * Get Bulk pipe information and pipe handle
3317 	 */
3318 	bulk_xfer_info	= pp->pp_qh->bulk_xfer_info;
3319 	ph = tw->tw_pipe_private->pp_pipe_handle;
3320 
3321 	/*
3322 	 * Check whether data underrun occurred.
3323 	 * If so, complete the transfer
3324 	 * Update the data toggle bit
3325 	 */
3326 	if (bytes_xfered != GetTD_mlen(uhcip, td)) {
3327 		bulk_xfer_info->num_tds = 1;
3328 		USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
3329 		    "uhci_handle_bulk_td: Data underrun occured");
3330 
3331 		pp->pp_data_toggle = GetTD_dtogg(uhcip, td) == 0 ? 1 : 0;
3332 	}
3333 
3334 	/*
3335 	 * If the TD's in the current frame are completed, then check
3336 	 * whether we have any more bytes to xfer. If so, insert TD's.
3337 	 * If no more bytes needs to be transferred, then do callback to the
3338 	 * upper layer.
3339 	 * If the TD's in the current frame are not completed, then
3340 	 * just delete the TD from the linked lists.
3341 	 */
3342 	USB_DPRINTF_L3(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3343 	    "uhci_handle_bulk_td: completed TD data toggle: 0x%x",
3344 	    GetTD_dtogg(uhcip, td));
3345 
3346 	if (--bulk_xfer_info->num_tds == 0) {
3347 		uhci_delete_td(uhcip, td);
3348 
3349 		if ((tw->tw_bytes_pending) &&
3350 		    (GetTD_mlen(uhcip, td) - GetTD_alen(uhcip, td) == 0)) {
3351 
3352 			MaxPacketSize = pp->pp_pipe_handle->p_ep.wMaxPacketSize;
3353 			length = MaxPacketSize;
3354 
3355 			qh = pp->pp_qh;
3356 			paddr = GetQH32(uhcip, qh->link_ptr) & QH_LINK_PTR_MASK;
3357 			if (GetQH32(uhcip, qh->link_ptr) !=
3358 			    GetQH32(uhcip,
3359 			    uhcip->uhci_bulk_xfers_q_head->link_ptr)) {
3360 				next_qh = QH_VADDR(paddr);
3361 				SetQH32(uhcip, qh->prev_qh->link_ptr,
3362 				    paddr|(0x2));
3363 				next_qh->prev_qh = qh->prev_qh;
3364 				SetQH32(uhcip, qh->link_ptr,
3365 				    GetQH32(uhcip,
3366 				    uhcip->uhci_bulk_xfers_q_head->link_ptr));
3367 				qh->prev_qh = uhcip->uhci_bulk_xfers_q_tail;
3368 				SetQH32(uhcip,
3369 				    uhcip->uhci_bulk_xfers_q_tail->link_ptr,
3370 				    QH_PADDR(qh) | 0x2);
3371 				uhcip->uhci_bulk_xfers_q_tail = qh;
3372 			}
3373 
3374 			if ((tw->tw_bytes_pending / MaxPacketSize) >=
3375 			    MAX_NUM_BULK_TDS_PER_XFER) {
3376 				num_bulk_tds = MAX_NUM_BULK_TDS_PER_XFER;
3377 			} else {
3378 				num_bulk_tds =
3379 				    (tw->tw_bytes_pending / MaxPacketSize);
3380 				if (tw->tw_bytes_pending % MaxPacketSize) {
3381 					num_bulk_tds++;
3382 					length = (tw->tw_bytes_pending %
3383 					    MaxPacketSize);
3384 				}
3385 			}
3386 
3387 			current_dummy = pp->pp_qh->td_tailp;
3388 			td_pool_ptr = &bulk_xfer_info->td_pools[0];
3389 			bulk_td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr;
3390 			buf_offs = tw->tw_bytes_xfered;
3391 			td_count = num_bulk_tds;
3392 			index = 0;
3393 
3394 			/* reuse the TDs to transfer more data */
3395 			while (td_count > 0) {
3396 				for (j = 0;
3397 				    (j < (td_pool_ptr->num_tds - 1)) &&
3398 				    (td_count > 1); j++, td_count--) {
3399 					uhci_fill_in_bulk_isoc_td(uhcip,
3400 					    &bulk_td_ptr[j], &bulk_td_ptr[j+1],
3401 					    BULKTD_PADDR(td_pool_ptr,
3402 					    &bulk_td_ptr[j+1]), ph, buf_offs,
3403 					    MaxPacketSize, tw);
3404 					buf_offs += MaxPacketSize;
3405 				}
3406 
3407 				if (td_count == 1) {
3408 					uhci_fill_in_bulk_isoc_td(uhcip,
3409 					    &bulk_td_ptr[j], current_dummy,
3410 					    TD_PADDR(current_dummy), ph,
3411 					    buf_offs, length, tw);
3412 
3413 					break;
3414 				} else {
3415 					tmp_td = &bulk_td_ptr[j];
3416 					ASSERT(index <
3417 					    (bulk_xfer_info->num_pools - 1));
3418 					td_pool_ptr = &bulk_xfer_info->
3419 					    td_pools[index + 1];
3420 					bulk_td_ptr = (uhci_td_t *)
3421 					    td_pool_ptr->pool_addr;
3422 					uhci_fill_in_bulk_isoc_td(uhcip,
3423 					    tmp_td, &bulk_td_ptr[0],
3424 					    BULKTD_PADDR(td_pool_ptr,
3425 					    &bulk_td_ptr[0]), ph, buf_offs,
3426 					    MaxPacketSize, tw);
3427 					buf_offs += MaxPacketSize;
3428 					td_count--;
3429 					index++;
3430 				}
3431 			}
3432 
3433 			pp->pp_qh->bulk_xfer_info = bulk_xfer_info;
3434 			bulk_xfer_info->num_tds	= (ushort_t)num_bulk_tds;
3435 			SetQH32(uhcip, pp->pp_qh->element_ptr,
3436 			    bulk_xfer_info->td_pools[0].cookie.dmac_address);
3437 		} else {
3438 			usba_pipe_handle_data_t *usb_pp = pp->pp_pipe_handle;
3439 
3440 			pp->pp_qh->bulk_xfer_info = NULL;
3441 
3442 			if (tw->tw_bytes_pending) {
3443 				/* Update the element pointer */
3444 				SetQH32(uhcip, pp->pp_qh->element_ptr,
3445 				    TD_PADDR(pp->pp_qh->td_tailp));
3446 
3447 				/* Remove all the tds */
3448 				td_head = tw->tw_hctd_head;
3449 				while (td_head != NULL) {
3450 					uhci_delete_td(uhcip, td_head);
3451 					td_head = tw->tw_hctd_head;
3452 				}
3453 			}
3454 
3455 			if (tw->tw_direction == PID_IN) {
3456 				usb_req_attrs_t	attrs = ((usb_bulk_req_t *)
3457 				    tw->tw_curr_xfer_reqp)->bulk_attributes;
3458 
3459 				error = USB_CR_OK;
3460 
3461 				/* Data run occurred */
3462 				if (tw->tw_bytes_pending &&
3463 				    (!(attrs & USB_ATTRS_SHORT_XFER_OK))) {
3464 					error = USB_CR_DATA_UNDERRUN;
3465 				}
3466 
3467 				uhci_sendup_td_message(uhcip, error, tw);
3468 			} else {
3469 				uhci_do_byte_stats(uhcip, tw->tw_length,
3470 				    usb_pp->p_ep.bmAttributes,
3471 				    usb_pp->p_ep.bEndpointAddress);
3472 
3473 				/* Data underrun occurred */
3474 				if (tw->tw_bytes_pending) {
3475 
3476 					tw->tw_data->b_rptr +=
3477 					    tw->tw_bytes_xfered;
3478 
3479 					USB_DPRINTF_L2(PRINT_MASK_ATTA,
3480 					    uhcip->uhci_log_hdl,
3481 					    "uhci_handle_bulk_td: "
3482 					    "data underrun occurred");
3483 
3484 					uhci_hcdi_callback(uhcip, pp,
3485 					    tw->tw_pipe_private->pp_pipe_handle,
3486 					    tw, USB_CR_DATA_UNDERRUN);
3487 				} else {
3488 					uhci_hcdi_callback(uhcip, pp,
3489 					    tw->tw_pipe_private->pp_pipe_handle,
3490 					    tw, USB_CR_OK);
3491 				}
3492 			} /* direction */
3493 
3494 			/* Deallocate DMA memory */
3495 			uhci_deallocate_tw(uhcip, pp, tw);
3496 			for (j = 0; j < bulk_xfer_info->num_pools; j++) {
3497 				td_pool_ptr = &bulk_xfer_info->td_pools[j];
3498 				(void) ddi_dma_unbind_handle(
3499 				    td_pool_ptr->dma_handle);
3500 				ddi_dma_mem_free(&td_pool_ptr->mem_handle);
3501 				ddi_dma_free_handle(&td_pool_ptr->dma_handle);
3502 			}
3503 			kmem_free(bulk_xfer_info->td_pools,
3504 			    (sizeof (uhci_bulk_isoc_td_pool_t) *
3505 			    bulk_xfer_info->num_pools));
3506 			kmem_free(bulk_xfer_info,
3507 			    sizeof (uhci_bulk_isoc_xfer_t));
3508 
3509 			/*
3510 			 * When there are no pending bulk commands, point the
3511 			 * end of the lattice tree to NULL. This will make sure
3512 			 * that the HC control does not loop anymore and PCI
3513 			 * bus is not affected.
3514 			 */
3515 			if (--uhcip->uhci_pending_bulk_cmds == 0) {
3516 				uhcip->uhci_bulk_xfers_q_tail->link_ptr =
3517 				    HC_END_OF_LIST;
3518 				USB_DPRINTF_L3(PRINT_MASK_ATTA,
3519 				    uhcip->uhci_log_hdl,
3520 				    "uhci_handle_bulk_td: count = %d",
3521 				    uhcip->uhci_pending_bulk_cmds);
3522 			}
3523 		}
3524 	} else {
3525 		uhci_delete_td(uhcip, td);
3526 	}
3527 }
3528 
3529 
3530 void
3531 uhci_handle_bulk_td_errors(uhci_state_t *uhcip, uhci_td_t *td)
3532 {
3533 	usb_cr_t		usb_err;
3534 	uint32_t		paddr_tail, element_ptr, paddr;
3535 	uhci_td_t		*next_td;
3536 	uhci_pipe_private_t	*pp;
3537 	uhci_trans_wrapper_t	*tw = td->tw;
3538 	usba_pipe_handle_data_t	*ph;
3539 	uhci_bulk_isoc_td_pool_t *td_pool_ptr = NULL;
3540 
3541 	USB_DPRINTF_L2(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
3542 	    "uhci_handle_bulk_td_errors: td = %p", (void *)td);
3543 
3544 #ifdef	DEBUG
3545 	uhci_print_td(uhcip, td);
3546 #endif
3547 
3548 	tw = td->tw;
3549 	ph = tw->tw_pipe_private->pp_pipe_handle;
3550 	pp = (uhci_pipe_private_t *)ph->p_hcd_private;
3551 
3552 	/*
3553 	 * Find the type of error occurred and return the error
3554 	 * to the upper layer. And adjust the data toggle.
3555 	 */
3556 	element_ptr = GetQH32(uhcip, pp->pp_qh->element_ptr) &
3557 	    QH_ELEMENT_PTR_MASK;
3558 	paddr_tail = TD_PADDR(pp->pp_qh->td_tailp);
3559 
3560 	/*
3561 	 * If a timeout occurs before a transfer has completed,
3562 	 * the timeout handler sets the CRC/Timeout bit and clears the Active
3563 	 * bit in the link_ptr for each td in the transfer.
3564 	 * It then waits (at least) 1 ms so that any tds the controller might
3565 	 * have been executing will have completed.
3566 	 * So at this point element_ptr will point to either:
3567 	 * 1) the next td for the transfer (which has not been executed,
3568 	 * and has the CRC/Timeout status bit set and Active bit cleared),
3569 	 * 2) the dummy td for this qh.
3570 	 * So if the element_ptr does not point to the dummy td, we know
3571 	 * it points to the next td that would have been executed.
3572 	 * That td has the data toggle we want to save.
3573 	 * All outstanding tds have been marked as CRC/Timeout,
3574 	 * so it doesn't matter which td we pass to uhci_parse_td_error
3575 	 * for the error status.
3576 	 */
3577 	if (element_ptr != paddr_tail) {
3578 		paddr = (element_ptr & QH_ELEMENT_PTR_MASK);
3579 		uhci_get_bulk_td_by_paddr(uhcip, pp->pp_qh->bulk_xfer_info,
3580 		    paddr, &td_pool_ptr);
3581 		next_td = BULKTD_VADDR(td_pool_ptr, paddr);
3582 		USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3583 		    "uhci_handle_bulk_td_errors: next td = %p",
3584 		    (void *)next_td);
3585 
3586 		usb_err = uhci_parse_td_error(uhcip, pp, next_td);
3587 	} else {
3588 		usb_err = uhci_parse_td_error(uhcip, pp, td);
3589 	}
3590 
3591 	/*
3592 	 * Update the link pointer.
3593 	 */
3594 	SetQH32(uhcip, pp->pp_qh->element_ptr, TD_PADDR(pp->pp_qh->td_tailp));
3595 
3596 	/*
3597 	 * Send up number of bytes transferred before the error condition.
3598 	 */
3599 	if ((tw->tw_direction == PID_OUT) && tw->tw_data) {
3600 		tw->tw_data->b_rptr += tw->tw_bytes_xfered;
3601 	}
3602 
3603 	uhci_remove_bulk_tds_tws(uhcip, tw->tw_pipe_private, UHCI_IN_ERROR);
3604 
3605 	/*
3606 	 * When there  are no pending bulk commands, point the end of the
3607 	 * lattice tree to NULL. This will make sure that the  HC control
3608 	 * does not loop anymore and PCI bus is not affected.
3609 	 */
3610 	if (--uhcip->uhci_pending_bulk_cmds == 0) {
3611 		uhcip->uhci_bulk_xfers_q_tail->link_ptr = HC_END_OF_LIST;
3612 		USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
3613 		    "uhci_handle_bulk_td_errors: count = %d",
3614 		    uhcip->uhci_pending_bulk_cmds);
3615 	}
3616 
3617 	uhci_hcdi_callback(uhcip, pp, ph, tw, usb_err);
3618 	uhci_deallocate_tw(uhcip, pp, tw);
3619 }
3620 
3621 
3622 /*
3623  * uhci_get_bulk_td_by_paddr:
3624  *	Obtain the address of the TD pool the physical address falls in.
3625  *
3626  * td_pool_pp - pointer to the address of the TD pool containing the paddr
3627  */
3628 /* ARGSUSED */
3629 static void
3630 uhci_get_bulk_td_by_paddr(
3631 	uhci_state_t			*uhcip,
3632 	uhci_bulk_isoc_xfer_t		*info,
3633 	uint32_t			paddr,
3634 	uhci_bulk_isoc_td_pool_t	**td_pool_pp)
3635 {
3636 	uint_t				i = 0;
3637 
3638 	while (i < info->num_pools) {
3639 		*td_pool_pp = &info->td_pools[i];
3640 		if (((*td_pool_pp)->cookie.dmac_address <= paddr) &&
3641 		    (((*td_pool_pp)->cookie.dmac_address +
3642 		    (*td_pool_pp)->cookie.dmac_size) > paddr)) {
3643 
3644 			break;
3645 		}
3646 		i++;
3647 	}
3648 
3649 	ASSERT(i < info->num_pools);
3650 }
3651 
3652 
3653 void
3654 uhci_remove_bulk_tds_tws(
3655 	uhci_state_t		*uhcip,
3656 	uhci_pipe_private_t	*pp,
3657 	int			what)
3658 {
3659 	uint_t			rval, i;
3660 	uhci_td_t		*head;
3661 	uhci_td_t		*head_next;
3662 	usb_opaque_t		curr_reqp;
3663 	uhci_bulk_isoc_xfer_t	*info;
3664 	uhci_bulk_isoc_td_pool_t *td_pool_ptr;
3665 
3666 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
3667 
3668 	if ((info = pp->pp_qh->bulk_xfer_info) == NULL) {
3669 
3670 		return;
3671 	}
3672 
3673 	head = uhcip->uhci_outst_tds_head;
3674 
3675 	while (head) {
3676 		uhci_trans_wrapper_t *tw_tmp = head->tw;
3677 		head_next = head->outst_td_next;
3678 
3679 		if (pp->pp_qh == tw_tmp->tw_pipe_private->pp_qh) {
3680 			curr_reqp = tw_tmp->tw_curr_xfer_reqp;
3681 			if (curr_reqp &&
3682 			    ((what == UHCI_IN_CLOSE) ||
3683 			    (what == UHCI_IN_RESET))) {
3684 				uhci_hcdi_callback(uhcip, pp,
3685 				    pp->pp_pipe_handle,
3686 				    tw_tmp, USB_CR_FLUSHED);
3687 			} /* end of curr_reqp */
3688 
3689 			uhci_delete_td(uhcip, head);
3690 
3691 			if (what == UHCI_IN_CLOSE || what == UHCI_IN_RESET) {
3692 				ASSERT(info->num_tds > 0);
3693 				if (--info->num_tds == 0) {
3694 					uhci_deallocate_tw(uhcip, pp, tw_tmp);
3695 
3696 					/*
3697 					 * This will make sure that the HC
3698 					 * does not loop anymore when there
3699 					 * are no pending bulk commands.
3700 					 */
3701 					if (--uhcip->uhci_pending_bulk_cmds
3702 					    == 0) {
3703 						uhcip->uhci_bulk_xfers_q_tail->
3704 						    link_ptr = HC_END_OF_LIST;
3705 						USB_DPRINTF_L3(PRINT_MASK_ATTA,
3706 						    uhcip->uhci_log_hdl,
3707 						    "uhci_remove_bulk_tds_tws:"
3708 						    " count = %d",
3709 						    uhcip->
3710 						    uhci_pending_bulk_cmds);
3711 					}
3712 				}
3713 			}
3714 		}
3715 
3716 		head = head_next;
3717 	}
3718 
3719 	if (what == UHCI_IN_CLOSE || what == UHCI_IN_RESET) {
3720 		ASSERT(info->num_tds == 0);
3721 	}
3722 
3723 	for (i = 0; i < info->num_pools; i++) {
3724 		td_pool_ptr = &info->td_pools[i];
3725 		rval = ddi_dma_unbind_handle(td_pool_ptr->dma_handle);
3726 		ASSERT(rval == DDI_SUCCESS);
3727 		ddi_dma_mem_free(&td_pool_ptr->mem_handle);
3728 		ddi_dma_free_handle(&td_pool_ptr->dma_handle);
3729 	}
3730 	kmem_free(info->td_pools, (sizeof (uhci_bulk_isoc_td_pool_t) *
3731 	    info->num_pools));
3732 	kmem_free(info, sizeof (uhci_bulk_isoc_xfer_t));
3733 	pp->pp_qh->bulk_xfer_info = NULL;
3734 }
3735 
3736 
3737 /*
3738  * uhci_save_data_toggle ()
3739  *	Save the data toggle in the usba_device structure
3740  */
3741 void
3742 uhci_save_data_toggle(uhci_pipe_private_t *pp)
3743 {
3744 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
3745 
3746 	/* Save the data toggle in the usb devices structure. */
3747 	mutex_enter(&ph->p_mutex);
3748 	usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress,
3749 	    pp->pp_data_toggle);
3750 	mutex_exit(&ph->p_mutex);
3751 }
3752 
3753 /*
3754  * uhci_create_isoc_transfer_wrapper:
3755  *	Create a Transaction Wrapper (TW) for isoc transfer.
3756  *	This involves the allocating of DMA resources.
3757  *
3758  *	For isoc transfers, one isoc transfer includes multiple packets
3759  *	and each packet may have a different length. So each packet is
3760  *	transfered by one TD. We only know the individual packet length
3761  *	won't exceed 1023 bytes, but we don't know exactly the lengths.
3762  *	It is hard to make one physically discontiguous DMA buffer which
3763  *	can fit in all the TDs like what can be done to the ctrl/bulk/
3764  *	intr transfers. It is also undesirable to make one physically
3765  *	contiguous DMA buffer for all the packets, since this may easily
3766  *	fail when the system is in low memory. So an individual DMA
3767  *	buffer is allocated for an individual isoc packet and each DMA
3768  *	buffer is physically contiguous. An extra structure is allocated
3769  *	to save the multiple DMA handles.
3770  */
3771 static uhci_trans_wrapper_t *
3772 uhci_create_isoc_transfer_wrapper(
3773 	uhci_state_t		*uhcip,
3774 	uhci_pipe_private_t	*pp,
3775 	usb_isoc_req_t		*req,
3776 	size_t			length,
3777 	usb_flags_t		usb_flags)
3778 {
3779 	int			result;
3780 	size_t			real_length, strtlen, xfer_size;
3781 	uhci_trans_wrapper_t	*tw;
3782 	ddi_device_acc_attr_t	dev_attr;
3783 	ddi_dma_attr_t		dma_attr;
3784 	int			kmem_flag;
3785 	int			(*dmamem_wait)(caddr_t);
3786 	uint_t			i, j, ccount;
3787 	usb_isoc_req_t		*tmp_req = req;
3788 
3789 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
3790 
3791 	if (UHCI_XFER_TYPE(&pp->pp_pipe_handle->p_ep) != USB_EP_ATTR_ISOCH) {
3792 
3793 		return (NULL);
3794 	}
3795 
3796 	if ((req == NULL) && (UHCI_XFER_DIR(&pp->pp_pipe_handle->p_ep) ==
3797 	    USB_EP_DIR_IN)) {
3798 		tmp_req = (usb_isoc_req_t *)pp->pp_client_periodic_in_reqp;
3799 	}
3800 
3801 	if (tmp_req == NULL) {
3802 
3803 		return (NULL);
3804 	}
3805 
3806 
3807 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3808 	    "uhci_create_isoc_transfer_wrapper: length = 0x%lx flags = 0x%x",
3809 	    length, usb_flags);
3810 
3811 	/* SLEEP flag should not be used in interrupt context */
3812 	if (servicing_interrupt()) {
3813 		kmem_flag = KM_NOSLEEP;
3814 		dmamem_wait = DDI_DMA_DONTWAIT;
3815 	} else {
3816 		kmem_flag = KM_SLEEP;
3817 		dmamem_wait = DDI_DMA_SLEEP;
3818 	}
3819 
3820 	/* Allocate space for the transfer wrapper */
3821 	if ((tw = kmem_zalloc(sizeof (uhci_trans_wrapper_t), kmem_flag)) ==
3822 	    NULL) {
3823 		USB_DPRINTF_L2(PRINT_MASK_LISTS,  uhcip->uhci_log_hdl,
3824 		    "uhci_create_isoc_transfer_wrapper: kmem_alloc failed");
3825 
3826 		return (NULL);
3827 	}
3828 
3829 	/* Allocate space for the isoc buffer handles */
3830 	strtlen = sizeof (uhci_isoc_buf_t) * tmp_req->isoc_pkts_count;
3831 	if ((tw->tw_isoc_bufs = kmem_zalloc(strtlen, kmem_flag)) == NULL) {
3832 		USB_DPRINTF_L2(PRINT_MASK_LISTS,  uhcip->uhci_log_hdl,
3833 		    "uhci_create_isoc_transfer_wrapper: kmem_alloc "
3834 		    "isoc buffer failed");
3835 		kmem_free(tw, sizeof (uhci_trans_wrapper_t));
3836 
3837 		return (NULL);
3838 	}
3839 
3840 	bcopy(&uhcip->uhci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t));
3841 	dma_attr.dma_attr_sgllen = 1;
3842 
3843 	dev_attr.devacc_attr_version		= DDI_DEVICE_ATTR_V0;
3844 	dev_attr.devacc_attr_endian_flags	= DDI_STRUCTURE_LE_ACC;
3845 	dev_attr.devacc_attr_dataorder		= DDI_STRICTORDER_ACC;
3846 
3847 	/* Store the transfer length */
3848 	tw->tw_length = length;
3849 
3850 	for (i = 0; i < tmp_req->isoc_pkts_count; i++) {
3851 		tw->tw_isoc_bufs[i].index = (ushort_t)i;
3852 
3853 		/* Allocate the DMA handle */
3854 		if ((result = ddi_dma_alloc_handle(uhcip->uhci_dip, &dma_attr,
3855 		    dmamem_wait, 0, &tw->tw_isoc_bufs[i].dma_handle)) !=
3856 		    DDI_SUCCESS) {
3857 			USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3858 			    "uhci_create_isoc_transfer_wrapper: "
3859 			    "Alloc handle %d failed", i);
3860 
3861 			for (j = 0; j < i; j++) {
3862 				result = ddi_dma_unbind_handle(
3863 				    tw->tw_isoc_bufs[j].dma_handle);
3864 				ASSERT(result == USB_SUCCESS);
3865 				ddi_dma_mem_free(&tw->tw_isoc_bufs[j].
3866 				    mem_handle);
3867 				ddi_dma_free_handle(&tw->tw_isoc_bufs[j].
3868 				    dma_handle);
3869 			}
3870 			kmem_free(tw->tw_isoc_bufs, strtlen);
3871 			kmem_free(tw, sizeof (uhci_trans_wrapper_t));
3872 
3873 			return (NULL);
3874 		}
3875 
3876 		/* Allocate the memory */
3877 		xfer_size = tmp_req->isoc_pkt_descr[i].isoc_pkt_length;
3878 		if ((result = ddi_dma_mem_alloc(tw->tw_isoc_bufs[i].dma_handle,
3879 		    xfer_size, &dev_attr, DDI_DMA_CONSISTENT, dmamem_wait,
3880 		    NULL, (caddr_t *)&tw->tw_isoc_bufs[i].buf_addr,
3881 		    &real_length, &tw->tw_isoc_bufs[i].mem_handle)) !=
3882 		    DDI_SUCCESS) {
3883 			USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3884 			    "uhci_create_isoc_transfer_wrapper: "
3885 			    "dma_mem_alloc %d fail", i);
3886 			ddi_dma_free_handle(&tw->tw_isoc_bufs[i].dma_handle);
3887 
3888 			for (j = 0; j < i; j++) {
3889 				result = ddi_dma_unbind_handle(
3890 				    tw->tw_isoc_bufs[j].dma_handle);
3891 				ASSERT(result == USB_SUCCESS);
3892 				ddi_dma_mem_free(&tw->tw_isoc_bufs[j].
3893 				    mem_handle);
3894 				ddi_dma_free_handle(&tw->tw_isoc_bufs[j].
3895 				    dma_handle);
3896 			}
3897 			kmem_free(tw->tw_isoc_bufs, strtlen);
3898 			kmem_free(tw, sizeof (uhci_trans_wrapper_t));
3899 
3900 			return (NULL);
3901 		}
3902 
3903 		ASSERT(real_length >= xfer_size);
3904 
3905 		/* Bind the handle */
3906 		result = ddi_dma_addr_bind_handle(
3907 		    tw->tw_isoc_bufs[i].dma_handle, NULL,
3908 		    (caddr_t)tw->tw_isoc_bufs[i].buf_addr, real_length,
3909 		    DDI_DMA_RDWR|DDI_DMA_CONSISTENT, dmamem_wait, NULL,
3910 		    &tw->tw_isoc_bufs[i].cookie, &ccount);
3911 
3912 		if ((result == DDI_DMA_MAPPED) && (ccount == 1)) {
3913 			tw->tw_isoc_bufs[i].length = xfer_size;
3914 
3915 			continue;
3916 		} else {
3917 			USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3918 			    "uhci_create_isoc_transfer_wrapper: "
3919 			    "Bind handle %d failed", i);
3920 			if (result == DDI_DMA_MAPPED) {
3921 				result = ddi_dma_unbind_handle(
3922 				    tw->tw_isoc_bufs[i].dma_handle);
3923 				ASSERT(result == USB_SUCCESS);
3924 			}
3925 			ddi_dma_mem_free(&tw->tw_isoc_bufs[i].mem_handle);
3926 			ddi_dma_free_handle(&tw->tw_isoc_bufs[i].dma_handle);
3927 
3928 			for (j = 0; j < i; j++) {
3929 				result = ddi_dma_unbind_handle(
3930 				    tw->tw_isoc_bufs[j].dma_handle);
3931 				ASSERT(result == USB_SUCCESS);
3932 				ddi_dma_mem_free(&tw->tw_isoc_bufs[j].
3933 				    mem_handle);
3934 				ddi_dma_free_handle(&tw->tw_isoc_bufs[j].
3935 				    dma_handle);
3936 			}
3937 			kmem_free(tw->tw_isoc_bufs, strtlen);
3938 			kmem_free(tw, sizeof (uhci_trans_wrapper_t));
3939 
3940 			return (NULL);
3941 		}
3942 	}
3943 
3944 	tw->tw_ncookies = tmp_req->isoc_pkts_count;
3945 	tw->tw_isoc_strtlen = strtlen;
3946 
3947 	/*
3948 	 * Only allow one wrapper to be added at a time. Insert the
3949 	 * new transaction wrapper into the list for this pipe.
3950 	 */
3951 	if (pp->pp_tw_head == NULL) {
3952 		pp->pp_tw_head = tw;
3953 		pp->pp_tw_tail = tw;
3954 	} else {
3955 		pp->pp_tw_tail->tw_next = tw;
3956 		pp->pp_tw_tail = tw;
3957 		ASSERT(tw->tw_next == NULL);
3958 	}
3959 
3960 	/* Store a back pointer to the pipe private structure */
3961 	tw->tw_pipe_private = pp;
3962 
3963 	/* Store the transfer type - synchronous or asynchronous */
3964 	tw->tw_flags = usb_flags;
3965 
3966 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
3967 	    "uhci_create_isoc_transfer_wrapper: tw = 0x%p, ncookies = %u",
3968 	    (void *)tw, tw->tw_ncookies);
3969 
3970 	return (tw);
3971 }
3972 
3973 /*
3974  * uhci_insert_isoc_td:
3975  *	- Create transfer wrapper
3976  *	- Allocate memory for the isoc td's
3977  *	- Fill up all the TD's and submit to the HC
3978  *	- Update all the linked lists
3979  */
3980 int
3981 uhci_insert_isoc_td(
3982 	uhci_state_t		*uhcip,
3983 	usba_pipe_handle_data_t	*ph,
3984 	usb_isoc_req_t		*isoc_req,
3985 	size_t			length,
3986 	usb_flags_t		flags)
3987 {
3988 	int			rval = USB_SUCCESS;
3989 	int			error;
3990 	uint_t			ddic;
3991 	uint32_t		i, j, index;
3992 	uint32_t		bytes_to_xfer;
3993 	uint32_t		expired_frames = 0;
3994 	usb_frame_number_t	start_frame, end_frame, current_frame;
3995 	uhci_td_t		*td_ptr;
3996 	uhci_pipe_private_t	*pp = (uhci_pipe_private_t *)ph->p_hcd_private;
3997 	uhci_trans_wrapper_t	*tw;
3998 	uhci_bulk_isoc_xfer_t	*isoc_xfer_info;
3999 	uhci_bulk_isoc_td_pool_t *td_pool_ptr;
4000 
4001 	USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4002 	    "uhci_insert_isoc_td: ph = 0x%p isoc req = %p length = %lu",
4003 	    (void *)ph, (void *)isoc_req, length);
4004 
4005 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4006 
4007 	/* Allocate a transfer wrapper */
4008 	if ((tw = uhci_create_isoc_transfer_wrapper(uhcip, pp, isoc_req,
4009 	    length, flags)) == NULL) {
4010 		USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4011 		    "uhci_insert_isoc_td: TW allocation failed");
4012 
4013 		return (USB_NO_RESOURCES);
4014 	}
4015 
4016 	/* Save current isochronous request pointer */
4017 	tw->tw_curr_xfer_reqp = (usb_opaque_t)isoc_req;
4018 
4019 	/*
4020 	 * Initialize the transfer wrapper. These values are useful
4021 	 * for sending back the reply.
4022 	 */
4023 	tw->tw_handle_td		= uhci_handle_isoc_td;
4024 	tw->tw_handle_callback_value	= NULL;
4025 	tw->tw_direction = (UHCI_XFER_DIR(&ph->p_ep) == USB_EP_DIR_OUT) ?
4026 	    PID_OUT : PID_IN;
4027 
4028 	/*
4029 	 * If the transfer isoc send, then copy the data from the request
4030 	 * to the transfer wrapper.
4031 	 */
4032 	if ((tw->tw_direction == PID_OUT) && length) {
4033 		uchar_t *p;
4034 
4035 		ASSERT(isoc_req->isoc_data != NULL);
4036 		p = isoc_req->isoc_data->b_rptr;
4037 
4038 		/* Copy the data into the message */
4039 		for (i = 0; i < isoc_req->isoc_pkts_count; i++) {
4040 			ddi_rep_put8(tw->tw_isoc_bufs[i].mem_handle,
4041 			    p, (uint8_t *)tw->tw_isoc_bufs[i].buf_addr,
4042 			    isoc_req->isoc_pkt_descr[i].isoc_pkt_length,
4043 			    DDI_DEV_AUTOINCR);
4044 			p += isoc_req->isoc_pkt_descr[i].isoc_pkt_length;
4045 		}
4046 	}
4047 
4048 	if (tw->tw_direction == PID_IN) {
4049 		if ((rval = uhci_allocate_periodic_in_resource(uhcip, pp, tw,
4050 		    flags)) != USB_SUCCESS) {
4051 			USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4052 			    "uhci_insert_isoc_td: isoc_req_t alloc failed");
4053 			uhci_deallocate_tw(uhcip, pp, tw);
4054 
4055 			return (rval);
4056 		}
4057 
4058 		isoc_req = (usb_isoc_req_t *)tw->tw_curr_xfer_reqp;
4059 	}
4060 
4061 	tw->tw_isoc_req	= (usb_isoc_req_t *)tw->tw_curr_xfer_reqp;
4062 
4063 	/* Get the pointer to the isoc_xfer_info structure */
4064 	isoc_xfer_info = (uhci_bulk_isoc_xfer_t *)&tw->tw_xfer_info;
4065 	isoc_xfer_info->num_tds = isoc_req->isoc_pkts_count;
4066 
4067 	/*
4068 	 * Allocate memory for isoc tds
4069 	 */
4070 	if ((rval = uhci_alloc_bulk_isoc_tds(uhcip, isoc_req->isoc_pkts_count,
4071 	    isoc_xfer_info)) != USB_SUCCESS) {
4072 		USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4073 		    "uhci_alloc_bulk_isoc_td: Memory allocation failure");
4074 
4075 		if (tw->tw_direction == PID_IN) {
4076 			uhci_deallocate_periodic_in_resource(uhcip, pp, tw);
4077 		}
4078 		uhci_deallocate_tw(uhcip, pp, tw);
4079 
4080 		return (rval);
4081 	}
4082 
4083 	/*
4084 	 * Get the isoc td pool address, buffer address and
4085 	 * max packet size that the device supports.
4086 	 */
4087 	td_pool_ptr = &isoc_xfer_info->td_pools[0];
4088 	td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr;
4089 	index = 0;
4090 
4091 	/*
4092 	 * Fill up the isoc tds
4093 	 */
4094 	USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4095 	    "uhci_insert_isoc_td : isoc pkts %d", isoc_req->isoc_pkts_count);
4096 
4097 	for (i = 0; i < isoc_xfer_info->num_pools; i++) {
4098 		for (j = 0; j < td_pool_ptr->num_tds; j++) {
4099 			bytes_to_xfer =
4100 			    isoc_req->isoc_pkt_descr[index].isoc_pkt_length;
4101 
4102 			uhci_fill_in_bulk_isoc_td(uhcip, &td_ptr[j],
4103 			    (uhci_td_t *)NULL, HC_END_OF_LIST, ph, index,
4104 			    bytes_to_xfer, tw);
4105 			td_ptr[j].isoc_pkt_index = (ushort_t)index;
4106 			index++;
4107 		}
4108 
4109 		if (i < (isoc_xfer_info->num_pools - 1)) {
4110 			td_pool_ptr = &isoc_xfer_info->td_pools[i + 1];
4111 			td_ptr = (uhci_td_t *)td_pool_ptr->pool_addr;
4112 		}
4113 	}
4114 
4115 	/*
4116 	 * Get the starting frame number.
4117 	 * The client drivers sets the flag USB_ATTRS_ISOC_XFER_ASAP to inform
4118 	 * the HCD to care of starting frame number.
4119 	 *
4120 	 * Following code is very time critical. So, perform atomic execution.
4121 	 */
4122 	ddic = ddi_enter_critical();
4123 	current_frame = uhci_get_sw_frame_number(uhcip);
4124 
4125 	if (isoc_req->isoc_attributes & USB_ATTRS_ISOC_START_FRAME) {
4126 		start_frame = isoc_req->isoc_frame_no;
4127 		end_frame = start_frame + isoc_req->isoc_pkts_count;
4128 
4129 		/* Check available frames */
4130 		if ((end_frame - current_frame) < UHCI_MAX_ISOC_FRAMES) {
4131 			if (current_frame > start_frame) {
4132 				if ((current_frame + FRNUM_OFFSET) <
4133 				    end_frame) {
4134 					expired_frames = current_frame +
4135 					    FRNUM_OFFSET - start_frame;
4136 					start_frame = current_frame +
4137 					    FRNUM_OFFSET;
4138 				} else {
4139 					rval = USB_INVALID_START_FRAME;
4140 				}
4141 			}
4142 		} else {
4143 			rval = USB_INVALID_START_FRAME;
4144 		}
4145 
4146 	} else if (isoc_req->isoc_attributes & USB_ATTRS_ISOC_XFER_ASAP) {
4147 		start_frame = pp->pp_frame_num;
4148 
4149 		if (start_frame == INVALID_FRNUM) {
4150 			start_frame = current_frame + FRNUM_OFFSET;
4151 		} else if (current_frame > start_frame) {
4152 			start_frame = current_frame + FRNUM_OFFSET;
4153 		}
4154 
4155 		end_frame = start_frame + isoc_req->isoc_pkts_count;
4156 		isoc_req->isoc_frame_no = start_frame;
4157 
4158 	}
4159 
4160 	if (rval != USB_SUCCESS) {
4161 
4162 		/* Exit the critical */
4163 		ddi_exit_critical(ddic);
4164 
4165 		USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4166 		    "uhci_insert_isoc_td: Invalid starting frame number");
4167 
4168 		if (tw->tw_direction == PID_IN) {
4169 			uhci_deallocate_periodic_in_resource(uhcip, pp, tw);
4170 		}
4171 
4172 		while (tw->tw_hctd_head) {
4173 			uhci_delete_td(uhcip, tw->tw_hctd_head);
4174 		}
4175 
4176 		for (i = 0; i < isoc_xfer_info->num_pools; i++) {
4177 			td_pool_ptr = &isoc_xfer_info->td_pools[i];
4178 			error = ddi_dma_unbind_handle(td_pool_ptr->dma_handle);
4179 			ASSERT(error == DDI_SUCCESS);
4180 			ddi_dma_mem_free(&td_pool_ptr->mem_handle);
4181 			ddi_dma_free_handle(&td_pool_ptr->dma_handle);
4182 		}
4183 		kmem_free(isoc_xfer_info->td_pools,
4184 		    (sizeof (uhci_bulk_isoc_td_pool_t) *
4185 		    isoc_xfer_info->num_pools));
4186 
4187 		uhci_deallocate_tw(uhcip, pp, tw);
4188 
4189 		return (rval);
4190 	}
4191 
4192 	for (i = 0; i < expired_frames; i++) {
4193 		isoc_req->isoc_pkt_descr[i].isoc_pkt_status =
4194 		    USB_CR_NOT_ACCESSED;
4195 		isoc_req->isoc_pkt_descr[i].isoc_pkt_actual_length =
4196 		    isoc_req->isoc_pkt_descr[i].isoc_pkt_length;
4197 		uhci_get_isoc_td_by_index(uhcip, isoc_xfer_info, i,
4198 		    &td_ptr, &td_pool_ptr);
4199 		uhci_delete_td(uhcip, td_ptr);
4200 		--isoc_xfer_info->num_tds;
4201 	}
4202 
4203 	/*
4204 	 * Add the TD's to the HC list
4205 	 */
4206 	start_frame = (start_frame & 0x3ff);
4207 	for (; i < isoc_req->isoc_pkts_count; i++) {
4208 		uhci_get_isoc_td_by_index(uhcip, isoc_xfer_info, i,
4209 		    &td_ptr, &td_pool_ptr);
4210 		if (uhcip->uhci_isoc_q_tailp[start_frame]) {
4211 			td_ptr->isoc_prev =
4212 			    uhcip->uhci_isoc_q_tailp[start_frame];
4213 			td_ptr->isoc_next = NULL;
4214 			td_ptr->link_ptr =
4215 			    uhcip->uhci_isoc_q_tailp[start_frame]->link_ptr;
4216 			uhcip->uhci_isoc_q_tailp[start_frame]->isoc_next =
4217 			    td_ptr;
4218 			SetTD32(uhcip,
4219 			    uhcip->uhci_isoc_q_tailp[start_frame]->link_ptr,
4220 			    ISOCTD_PADDR(td_pool_ptr, td_ptr));
4221 			uhcip->uhci_isoc_q_tailp[start_frame] = td_ptr;
4222 		} else {
4223 			uhcip->uhci_isoc_q_tailp[start_frame] = td_ptr;
4224 			td_ptr->isoc_next = NULL;
4225 			td_ptr->isoc_prev = NULL;
4226 			SetTD32(uhcip, td_ptr->link_ptr,
4227 			    GetFL32(uhcip,
4228 			    uhcip->uhci_frame_lst_tablep[start_frame]));
4229 			SetFL32(uhcip,
4230 			    uhcip->uhci_frame_lst_tablep[start_frame],
4231 			    ISOCTD_PADDR(td_pool_ptr, td_ptr));
4232 		}
4233 		td_ptr->starting_frame = (uint_t)start_frame;
4234 
4235 		if (++start_frame == NUM_FRAME_LST_ENTRIES)
4236 			start_frame = 0;
4237 	}
4238 
4239 	ddi_exit_critical(ddic);
4240 	pp->pp_frame_num = end_frame;
4241 
4242 	USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4243 	    "uhci_insert_isoc_td: current frame number 0x%llx, pipe frame num"
4244 	    " 0x%llx", (unsigned long long)current_frame,
4245 	    (unsigned long long)(pp->pp_frame_num));
4246 
4247 	return (rval);
4248 }
4249 
4250 
4251 /*
4252  * uhci_get_isoc_td_by_index:
4253  *	Obtain the addresses of the TD pool and the TD at the index.
4254  *
4255  * tdpp - pointer to the address of the TD at the isoc packet index
4256  * td_pool_pp - pointer to the address of the TD pool containing
4257  *		the specified TD
4258  */
4259 /* ARGSUSED */
4260 static void
4261 uhci_get_isoc_td_by_index(
4262 	uhci_state_t			*uhcip,
4263 	uhci_bulk_isoc_xfer_t		*info,
4264 	uint_t				index,
4265 	uhci_td_t			**tdpp,
4266 	uhci_bulk_isoc_td_pool_t	**td_pool_pp)
4267 {
4268 	uint_t			i = 0, j = 0;
4269 	uhci_td_t		*td_ptr;
4270 
4271 	while (j < info->num_pools) {
4272 		if ((i + info->td_pools[j].num_tds) <= index) {
4273 			i += info->td_pools[j].num_tds;
4274 			j++;
4275 		} else {
4276 			i = index - i;
4277 
4278 			break;
4279 		}
4280 	}
4281 
4282 	ASSERT(j < info->num_pools);
4283 	*td_pool_pp = &info->td_pools[j];
4284 	td_ptr = (uhci_td_t *)((*td_pool_pp)->pool_addr);
4285 	*tdpp = &td_ptr[i];
4286 }
4287 
4288 
4289 /*
4290  * uhci_handle_isoc_td:
4291  *	Handles the completed isoc tds
4292  */
4293 void
4294 uhci_handle_isoc_td(uhci_state_t *uhcip, uhci_td_t *td)
4295 {
4296 	uint_t			rval, i;
4297 	uint32_t		pkt_index = td->isoc_pkt_index;
4298 	usb_cr_t		cr;
4299 	uhci_trans_wrapper_t	*tw = td->tw;
4300 	usb_isoc_req_t		*isoc_req = (usb_isoc_req_t *)tw->tw_isoc_req;
4301 	uhci_pipe_private_t	*pp = tw->tw_pipe_private;
4302 	uhci_bulk_isoc_xfer_t	*isoc_xfer_info = &tw->tw_xfer_info;
4303 	usba_pipe_handle_data_t	*usb_pp;
4304 	uhci_bulk_isoc_td_pool_t *td_pool_ptr;
4305 
4306 	USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4307 	    "uhci_handle_isoc_td: td = 0x%p, pp = 0x%p, tw = 0x%p, req = 0x%p, "
4308 	    "index = %x", (void *)td, (void *)pp, (void *)tw, (void *)isoc_req,
4309 	    pkt_index);
4310 
4311 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4312 
4313 	usb_pp = pp->pp_pipe_handle;
4314 
4315 	/*
4316 	 * Check whether there are any errors occurred. If so, update error
4317 	 * count and return it to the upper.But never return a non zero
4318 	 * completion reason.
4319 	 */
4320 	cr = USB_CR_OK;
4321 	if (GetTD_status(uhcip, td) & TD_STATUS_MASK) {
4322 		USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4323 		    "uhci_handle_isoc_td: Error Occurred: TD Status = %x",
4324 		    GetTD_status(uhcip, td));
4325 		isoc_req->isoc_error_count++;
4326 	}
4327 
4328 	if (isoc_req != NULL) {
4329 		isoc_req->isoc_pkt_descr[pkt_index].isoc_pkt_status = cr;
4330 		isoc_req->isoc_pkt_descr[pkt_index].isoc_pkt_actual_length =
4331 		    (GetTD_alen(uhcip, td) == ZERO_LENGTH) ? 0 :
4332 		    GetTD_alen(uhcip, td) + 1;
4333 	}
4334 
4335 	uhci_delete_isoc_td(uhcip, td);
4336 
4337 	if (--isoc_xfer_info->num_tds != 0) {
4338 		USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4339 		    "uhci_handle_isoc_td: Number of TDs %d",
4340 		    isoc_xfer_info->num_tds);
4341 
4342 		return;
4343 	}
4344 
4345 	tw->tw_claim = UHCI_INTR_HDLR_CLAIMED;
4346 	if (tw->tw_direction == PID_IN) {
4347 		uhci_sendup_td_message(uhcip, cr, tw);
4348 
4349 		if ((uhci_handle_isoc_receive(uhcip, pp, tw)) != USB_SUCCESS) {
4350 			USB_DPRINTF_L3(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4351 			    "uhci_handle_isoc_td: Drop message");
4352 		}
4353 
4354 	} else {
4355 		/* update kstats only for OUT. sendup_td_msg() does it for IN */
4356 		uhci_do_byte_stats(uhcip, tw->tw_length,
4357 		    usb_pp->p_ep.bmAttributes, usb_pp->p_ep.bEndpointAddress);
4358 
4359 		uhci_hcdi_callback(uhcip, pp, usb_pp, tw, USB_CR_OK);
4360 	}
4361 
4362 	for (i = 0; i < isoc_xfer_info->num_pools; i++) {
4363 		td_pool_ptr = &isoc_xfer_info->td_pools[i];
4364 		rval = ddi_dma_unbind_handle(td_pool_ptr->dma_handle);
4365 		ASSERT(rval == DDI_SUCCESS);
4366 		ddi_dma_mem_free(&td_pool_ptr->mem_handle);
4367 		ddi_dma_free_handle(&td_pool_ptr->dma_handle);
4368 	}
4369 	kmem_free(isoc_xfer_info->td_pools,
4370 	    (sizeof (uhci_bulk_isoc_td_pool_t) *
4371 	    isoc_xfer_info->num_pools));
4372 	uhci_deallocate_tw(uhcip, pp, tw);
4373 }
4374 
4375 
4376 /*
4377  * uhci_handle_isoc_receive:
4378  *	- Sends the isoc data to the client
4379  *	- Inserts another isoc receive request
4380  */
4381 static int
4382 uhci_handle_isoc_receive(
4383 	uhci_state_t		*uhcip,
4384 	uhci_pipe_private_t	*pp,
4385 	uhci_trans_wrapper_t	*tw)
4386 {
4387 	USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4388 	    "uhci_handle_isoc_receive: tw = 0x%p", (void *)tw);
4389 
4390 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4391 
4392 	/*
4393 	 * -- check for pipe state being polling before
4394 	 * inserting a new request. Check when is TD
4395 	 * de-allocation being done? (so we can reuse the same TD)
4396 	 */
4397 	if (uhci_start_isoc_receive_polling(uhcip,
4398 	    pp->pp_pipe_handle, (usb_isoc_req_t *)tw->tw_curr_xfer_reqp,
4399 	    0) != USB_SUCCESS) {
4400 		USB_DPRINTF_L2(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4401 		    "uhci_handle_isoc_receive: receive polling failed");
4402 
4403 		return (USB_FAILURE);
4404 	}
4405 
4406 	return (USB_SUCCESS);
4407 }
4408 
4409 
4410 /*
4411  * uhci_delete_isoc_td:
4412  *	- Delete from the outstanding command queue
4413  *	- Delete from the tw queue
4414  *	- Delete from the isoc queue
4415  *	- Delete from the HOST CONTROLLER list
4416  */
4417 static void
4418 uhci_delete_isoc_td(uhci_state_t *uhcip, uhci_td_t *td)
4419 {
4420 	uint32_t	starting_frame = td->starting_frame;
4421 
4422 	if ((td->isoc_next == NULL) && (td->isoc_prev == NULL)) {
4423 		SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[starting_frame],
4424 		    GetTD32(uhcip, td->link_ptr));
4425 		uhcip->uhci_isoc_q_tailp[starting_frame] = 0;
4426 	} else if (td->isoc_next == NULL) {
4427 		td->isoc_prev->link_ptr = td->link_ptr;
4428 		td->isoc_prev->isoc_next = NULL;
4429 		uhcip->uhci_isoc_q_tailp[starting_frame] = td->isoc_prev;
4430 	} else if (td->isoc_prev == NULL) {
4431 		td->isoc_next->isoc_prev = NULL;
4432 		SetFL32(uhcip, uhcip->uhci_frame_lst_tablep[starting_frame],
4433 		    GetTD32(uhcip, td->link_ptr));
4434 	} else {
4435 		td->isoc_prev->isoc_next = td->isoc_next;
4436 		td->isoc_next->isoc_prev = td->isoc_prev;
4437 		td->isoc_prev->link_ptr = td->link_ptr;
4438 	}
4439 
4440 	uhci_delete_td(uhcip, td);
4441 }
4442 
4443 
4444 /*
4445  * uhci_send_isoc_receive
4446  *	- Allocates usb_isoc_request
4447  *	- Updates the isoc request
4448  *	- Inserts the isoc td's into the HC processing list.
4449  */
4450 int
4451 uhci_start_isoc_receive_polling(
4452 	uhci_state_t		*uhcip,
4453 	usba_pipe_handle_data_t	*ph,
4454 	usb_isoc_req_t		*isoc_req,
4455 	usb_flags_t		usb_flags)
4456 {
4457 	int			ii, error;
4458 	size_t			max_isoc_xfer_size, length, isoc_pkts_length;
4459 	ushort_t		isoc_pkt_count;
4460 	uhci_pipe_private_t	*pp = (uhci_pipe_private_t *)ph->p_hcd_private;
4461 	usb_isoc_pkt_descr_t	*isoc_pkt_descr;
4462 
4463 	USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4464 	    "uhci_start_isoc_receive_polling: usb_flags = %x", usb_flags);
4465 
4466 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4467 
4468 	max_isoc_xfer_size = ph->p_ep.wMaxPacketSize * UHCI_MAX_ISOC_PKTS;
4469 
4470 	if (isoc_req) {
4471 		isoc_pkt_descr = isoc_req->isoc_pkt_descr;
4472 		isoc_pkt_count = isoc_req->isoc_pkts_count;
4473 		isoc_pkts_length = isoc_req->isoc_pkts_length;
4474 	} else {
4475 		isoc_pkt_descr = ((usb_isoc_req_t *)
4476 		    pp->pp_client_periodic_in_reqp)->isoc_pkt_descr;
4477 		isoc_pkt_count = ((usb_isoc_req_t *)
4478 		    pp->pp_client_periodic_in_reqp)->isoc_pkts_count;
4479 		isoc_pkts_length = ((usb_isoc_req_t *)
4480 		    pp->pp_client_periodic_in_reqp)->isoc_pkts_length;
4481 	}
4482 
4483 	for (ii = 0, length = 0; ii < isoc_pkt_count; ii++) {
4484 		length += isoc_pkt_descr->isoc_pkt_length;
4485 		isoc_pkt_descr++;
4486 	}
4487 
4488 	if ((isoc_pkts_length) && (isoc_pkts_length != length)) {
4489 
4490 		USB_DPRINTF_L2(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
4491 		    "uhci_start_isoc_receive_polling: isoc_pkts_length 0x%lx "
4492 		    "is not equal to the sum of all pkt lengths 0x%lx in "
4493 		    "an isoc request", isoc_pkts_length, length);
4494 
4495 		return (USB_FAILURE);
4496 	}
4497 
4498 	/* Check the size of isochronous request */
4499 	if (length > max_isoc_xfer_size) {
4500 		USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4501 		    "uhci_start_isoc_receive_polling: "
4502 		    "Max isoc request size = %lx, Given isoc req size = %lx",
4503 		    max_isoc_xfer_size, length);
4504 
4505 		return (USB_FAILURE);
4506 	}
4507 
4508 	/* Add the TD into the Host Controller's isoc list */
4509 	error = uhci_insert_isoc_td(uhcip, ph, isoc_req, length, usb_flags);
4510 
4511 	return (error);
4512 }
4513 
4514 
4515 /*
4516  * uhci_remove_isoc_tds_tws
4517  *	This routine scans the pipe and removes all the td's
4518  *	and transfer wrappers and deallocates the memory
4519  *	associated with those td's and tw's.
4520  */
4521 void
4522 uhci_remove_isoc_tds_tws(uhci_state_t *uhcip, uhci_pipe_private_t *pp)
4523 {
4524 	uint_t			rval, i;
4525 	uhci_td_t		*tmp_td, *td_head;
4526 	usb_isoc_req_t		*isoc_req;
4527 	uhci_trans_wrapper_t	*tmp_tw, *tw_head;
4528 	uhci_bulk_isoc_xfer_t	*isoc_xfer_info;
4529 	uhci_bulk_isoc_td_pool_t *td_pool_ptr;
4530 
4531 	USB_DPRINTF_L4(PRINT_MASK_ISOC, uhcip->uhci_log_hdl,
4532 	    "uhci_remove_isoc_tds_tws: pp = %p", (void *)pp);
4533 
4534 	tw_head = pp->pp_tw_head;
4535 	while (tw_head) {
4536 		tmp_tw = tw_head;
4537 		tw_head = tw_head->tw_next;
4538 		td_head = tmp_tw->tw_hctd_head;
4539 		if (tmp_tw->tw_direction == PID_IN) {
4540 			uhci_deallocate_periodic_in_resource(uhcip, pp,
4541 			    tmp_tw);
4542 		} else if (tmp_tw->tw_direction == PID_OUT) {
4543 			uhci_hcdi_callback(uhcip, pp, pp->pp_pipe_handle,
4544 			    tmp_tw, USB_CR_FLUSHED);
4545 		}
4546 
4547 		while (td_head) {
4548 			tmp_td = td_head;
4549 			td_head = td_head->tw_td_next;
4550 			uhci_delete_isoc_td(uhcip, tmp_td);
4551 		}
4552 
4553 		isoc_req = (usb_isoc_req_t *)tmp_tw->tw_isoc_req;
4554 		if (isoc_req) {
4555 			usb_free_isoc_req(isoc_req);
4556 		}
4557 
4558 		ASSERT(tmp_tw->tw_hctd_head == NULL);
4559 
4560 		if (tmp_tw->tw_xfer_info.td_pools) {
4561 			isoc_xfer_info =
4562 			    (uhci_bulk_isoc_xfer_t *)&tmp_tw->tw_xfer_info;
4563 			for (i = 0; i < isoc_xfer_info->num_pools; i++) {
4564 				td_pool_ptr = &isoc_xfer_info->td_pools[i];
4565 				rval = ddi_dma_unbind_handle(
4566 				    td_pool_ptr->dma_handle);
4567 				ASSERT(rval == DDI_SUCCESS);
4568 				ddi_dma_mem_free(&td_pool_ptr->mem_handle);
4569 				ddi_dma_free_handle(&td_pool_ptr->dma_handle);
4570 			}
4571 			kmem_free(isoc_xfer_info->td_pools,
4572 			    (sizeof (uhci_bulk_isoc_td_pool_t) *
4573 			    isoc_xfer_info->num_pools));
4574 		}
4575 
4576 		uhci_deallocate_tw(uhcip, pp, tmp_tw);
4577 	}
4578 }
4579 
4580 
4581 /*
4582  * uhci_isoc_update_sw_frame_number()
4583  *	to avoid code duplication, call uhci_get_sw_frame_number()
4584  */
4585 void
4586 uhci_isoc_update_sw_frame_number(uhci_state_t *uhcip)
4587 {
4588 	(void) uhci_get_sw_frame_number(uhcip);
4589 }
4590 
4591 
4592 /*
4593  * uhci_get_sw_frame_number:
4594  *	Hold the uhci_int_mutex before calling this routine.
4595  */
4596 uint64_t
4597 uhci_get_sw_frame_number(uhci_state_t *uhcip)
4598 {
4599 	uint64_t sw_frnum, hw_frnum, current_frnum;
4600 
4601 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4602 
4603 	sw_frnum = uhcip->uhci_sw_frnum;
4604 	hw_frnum = Get_OpReg16(FRNUM);
4605 
4606 	/*
4607 	 * Check bit 10 in the software counter and hardware frame counter.
4608 	 * If both are same, then don't increment the software frame counter
4609 	 * (Bit 10 of hw frame counter toggle for every 1024 frames)
4610 	 * The lower 11 bits of software counter contains the hardware frame
4611 	 * counter value. The MSB (bit 10) of software counter is incremented
4612 	 * for every 1024 frames either here or in get frame number routine.
4613 	 */
4614 	if ((sw_frnum & UHCI_BIT_10_MASK) == (hw_frnum & UHCI_BIT_10_MASK)) {
4615 		/* The MSB of hw counter did not toggle */
4616 		current_frnum = ((sw_frnum & (SW_FRNUM_MASK)) | hw_frnum);
4617 	} else {
4618 		/*
4619 		 * The hw counter wrapped around. And the interrupt handler
4620 		 * did not get a chance to update the sw frame counter.
4621 		 * So, update the sw frame counter and return correct frame no.
4622 		 */
4623 		sw_frnum >>= UHCI_SIZE_OF_HW_FRNUM - 1;
4624 		current_frnum =
4625 		    ((++sw_frnum << (UHCI_SIZE_OF_HW_FRNUM - 1)) | hw_frnum);
4626 	}
4627 	uhcip->uhci_sw_frnum = current_frnum;
4628 
4629 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
4630 	    "uhci_get_sw_frame_number: sw=%lld hd=%lld",
4631 	    (unsigned long long)(uhcip->uhci_sw_frnum),
4632 	    (unsigned long long)hw_frnum);
4633 
4634 	return (current_frnum);
4635 }
4636 
4637 
4638 /*
4639  * uhci_cmd_timeout_hdlr:
4640  *	This routine will get called for every second. It checks for
4641  *	timed out control commands/bulk commands. Timeout any commands
4642  *	that exceeds the time out period specified by the pipe policy.
4643  */
4644 void
4645 uhci_cmd_timeout_hdlr(void *arg)
4646 {
4647 	uint_t			flag = B_FALSE;
4648 	uhci_td_t		*head, *tmp_td;
4649 	uhci_state_t		*uhcip = (uhci_state_t *)arg;
4650 	uhci_pipe_private_t	*pp;
4651 
4652 	/*
4653 	 * Check whether any of the control xfers are timed out.
4654 	 * If so, complete those commands with time out as reason.
4655 	 */
4656 	mutex_enter(&uhcip->uhci_int_mutex);
4657 	head = uhcip->uhci_outst_tds_head;
4658 
4659 	while (head) {
4660 		/*
4661 		 * If timeout out is zero, then dont timeout command.
4662 		 */
4663 		if (head->tw->tw_timeout_cnt == 0)  {
4664 			head = head->outst_td_next;
4665 			continue;
4666 		}
4667 
4668 		if (!(head->tw->tw_flags & TW_TIMEOUT_FLAG)) {
4669 			head->tw->tw_flags |= TW_TIMEOUT_FLAG;
4670 			--head->tw->tw_timeout_cnt;
4671 		}
4672 
4673 		/* only do it for bulk and control TDs */
4674 		if ((head->tw->tw_timeout_cnt == 0) &&
4675 		    (head->tw->tw_handle_td != uhci_handle_isoc_td)) {
4676 
4677 			USB_DPRINTF_L3(PRINT_MASK_ATTA, uhcip->uhci_log_hdl,
4678 			    "Command timed out: td = %p", (void *)head);
4679 
4680 			head->tw->tw_claim = UHCI_TIMEOUT_HDLR_CLAIMED;
4681 
4682 			/*
4683 			 * Check finally whether the command completed
4684 			 */
4685 			if (GetTD_status(uhcip, head) & UHCI_TD_ACTIVE) {
4686 				SetTD32(uhcip, head->link_ptr,
4687 				    GetTD32(uhcip, head->link_ptr) |
4688 				    HC_END_OF_LIST);
4689 				pp = head->tw->tw_pipe_private;
4690 				SetQH32(uhcip, pp->pp_qh->element_ptr,
4691 				    GetQH32(uhcip, pp->pp_qh->element_ptr) |
4692 				    HC_END_OF_LIST);
4693 			}
4694 
4695 			flag = B_TRUE;
4696 		}
4697 
4698 		head = head->outst_td_next;
4699 	}
4700 
4701 	if (flag) {
4702 		(void) uhci_wait_for_sof(uhcip);
4703 	}
4704 
4705 	head = uhcip->uhci_outst_tds_head;
4706 	while (head) {
4707 		if (head->tw->tw_flags & TW_TIMEOUT_FLAG) {
4708 			head->tw->tw_flags &= ~TW_TIMEOUT_FLAG;
4709 		}
4710 		if (head->tw->tw_claim == UHCI_TIMEOUT_HDLR_CLAIMED) {
4711 			head->tw->tw_claim = UHCI_NOT_CLAIMED;
4712 			tmp_td = head->tw->tw_hctd_head;
4713 			while (tmp_td) {
4714 				SetTD_status(uhcip, tmp_td,
4715 				    UHCI_TD_CRC_TIMEOUT);
4716 				tmp_td = tmp_td->tw_td_next;
4717 			}
4718 		}
4719 		head = head->outst_td_next;
4720 	}
4721 
4722 	/*
4723 	 * Process the td which was completed before shifting from normal
4724 	 * mode to polled mode
4725 	 */
4726 	if (uhcip->uhci_polled_flag == UHCI_POLLED_FLAG_TRUE) {
4727 		uhci_process_submitted_td_queue(uhcip);
4728 		uhcip->uhci_polled_flag = UHCI_POLLED_FLAG_FALSE;
4729 	} else if (flag) {
4730 		/* Process the completed/timed out commands */
4731 		uhci_process_submitted_td_queue(uhcip);
4732 	}
4733 
4734 	/* Re-register the control/bulk/intr commands' timeout handler */
4735 	if (uhcip->uhci_cmd_timeout_id) {
4736 		uhcip->uhci_cmd_timeout_id = timeout(uhci_cmd_timeout_hdlr,
4737 		    (void *)uhcip, UHCI_ONE_SECOND);
4738 	}
4739 
4740 	mutex_exit(&uhcip->uhci_int_mutex);
4741 }
4742 
4743 
4744 /*
4745  * uhci_wait_for_sof:
4746  *	Wait for the start of the next frame (implying any changes made in the
4747  *	lattice have now taken effect).
4748  *	To be sure this is the case, we wait for the completion of the current
4749  *	frame (which might have already been pending), then another complete
4750  *	frame to ensure everything has taken effect.
4751  */
4752 int
4753 uhci_wait_for_sof(uhci_state_t *uhcip)
4754 {
4755 	int	n, error;
4756 	ushort_t    cmd_reg;
4757 	usb_frame_number_t	before_frame_number, after_frame_number;
4758 	clock_t	time, rval;
4759 	USB_DPRINTF_L4(PRINT_MASK_LISTS, uhcip->uhci_log_hdl,
4760 	    "uhci_wait_for_sof: uhcip = %p", (void *)uhcip);
4761 
4762 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4763 
4764 	error = uhci_state_is_operational(uhcip);
4765 
4766 	if (error != USB_SUCCESS) {
4767 
4768 		return (error);
4769 	}
4770 
4771 	before_frame_number =  uhci_get_sw_frame_number(uhcip);
4772 	for (n = 0; n < MAX_SOF_WAIT_COUNT; n++) {
4773 		SetTD_ioc(uhcip, uhcip->uhci_sof_td, 1);
4774 		uhcip->uhci_cv_signal = B_TRUE;
4775 
4776 		time = ddi_get_lbolt() + UHCI_ONE_SECOND;
4777 		rval = cv_timedwait(&uhcip->uhci_cv_SOF,
4778 		    &uhcip->uhci_int_mutex, time);
4779 
4780 		after_frame_number = uhci_get_sw_frame_number(uhcip);
4781 		if ((rval == -1) &&
4782 		    (after_frame_number <= before_frame_number)) {
4783 			cmd_reg = Get_OpReg16(USBCMD);
4784 			Set_OpReg16(USBCMD, (cmd_reg | USBCMD_REG_HC_RUN));
4785 			Set_OpReg16(USBINTR, ENABLE_ALL_INTRS);
4786 			after_frame_number = uhci_get_sw_frame_number(uhcip);
4787 		}
4788 		before_frame_number = after_frame_number;
4789 	}
4790 
4791 	SetTD_ioc(uhcip, uhcip->uhci_sof_td, 0);
4792 
4793 	return (uhcip->uhci_cv_signal ? USB_FAILURE : USB_SUCCESS);
4794 
4795 }
4796 
4797 /*
4798  * uhci_allocate_periodic_in_resource:
4799  *	Allocate interrupt/isochronous request structure for the
4800  *	interrupt/isochronous IN transfer.
4801  */
4802 int
4803 uhci_allocate_periodic_in_resource(
4804 	uhci_state_t		*uhcip,
4805 	uhci_pipe_private_t	*pp,
4806 	uhci_trans_wrapper_t	*tw,
4807 	usb_flags_t		flags)
4808 {
4809 	size_t			length = 0;
4810 	usb_opaque_t		client_periodic_in_reqp;
4811 	usb_intr_req_t		*cur_intr_req;
4812 	usb_isoc_req_t		*curr_isoc_reqp;
4813 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
4814 
4815 	USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl,
4816 	    "uhci_allocate_periodic_in_resource:\n\t"
4817 	    "ph = 0x%p, pp = 0x%p, tw = 0x%p, flags = 0x%x",
4818 	    (void *)ph, (void *)pp, (void *)tw, flags);
4819 
4820 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4821 
4822 	/* Check the current periodic in request pointer */
4823 	if (tw->tw_curr_xfer_reqp) {
4824 		USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl,
4825 		    "uhci_allocate_periodic_in_resource: Interrupt "
4826 		    "request structure already exists: "
4827 		    "allocation failed");
4828 
4829 		return (USB_SUCCESS);
4830 	}
4831 
4832 	/* Get the client periodic in request pointer */
4833 	client_periodic_in_reqp = pp->pp_client_periodic_in_reqp;
4834 
4835 	/*
4836 	 * If it a periodic IN request and periodic request is NULL,
4837 	 * allocate corresponding usb periodic IN request for the
4838 	 * current periodic polling request and copy the information
4839 	 * from the saved periodic request structure.
4840 	 */
4841 	if (UHCI_XFER_TYPE(&ph->p_ep) == USB_EP_ATTR_INTR) {
4842 		/* Get the interrupt transfer length */
4843 		length = ((usb_intr_req_t *)client_periodic_in_reqp)->
4844 		    intr_len;
4845 
4846 		cur_intr_req = usba_hcdi_dup_intr_req(ph->p_dip,
4847 		    (usb_intr_req_t *)client_periodic_in_reqp, length, flags);
4848 		if (cur_intr_req == NULL) {
4849 			USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl,
4850 			    "uhci_allocate_periodic_in_resource: Interrupt "
4851 			    "request structure allocation failed");
4852 
4853 			return (USB_NO_RESOURCES);
4854 		}
4855 
4856 		/* Check and save the timeout value */
4857 		tw->tw_timeout_cnt = (cur_intr_req->intr_attributes &
4858 		    USB_ATTRS_ONE_XFER) ? cur_intr_req->intr_timeout: 0;
4859 		tw->tw_curr_xfer_reqp = (usb_opaque_t)cur_intr_req;
4860 		tw->tw_length = cur_intr_req->intr_len;
4861 	} else {
4862 		ASSERT(client_periodic_in_reqp != NULL);
4863 
4864 		if ((curr_isoc_reqp = usba_hcdi_dup_isoc_req(ph->p_dip,
4865 		    (usb_isoc_req_t *)client_periodic_in_reqp, flags)) ==
4866 		    NULL) {
4867 			USB_DPRINTF_L2(PRINT_MASK_HCDI, uhcip->uhci_log_hdl,
4868 			    "uhci_allocate_periodic_in_resource: Isochronous "
4869 			    "request structure allocation failed");
4870 
4871 			return (USB_NO_RESOURCES);
4872 		}
4873 
4874 		/*
4875 		 * Save the client's isochronous request pointer and
4876 		 * length of isochronous transfer in transfer wrapper.
4877 		 * The dup'ed request is saved in pp_client_periodic_in_reqp
4878 		 */
4879 		tw->tw_curr_xfer_reqp =
4880 		    (usb_opaque_t)pp->pp_client_periodic_in_reqp;
4881 		pp->pp_client_periodic_in_reqp = (usb_opaque_t)curr_isoc_reqp;
4882 	}
4883 
4884 	mutex_enter(&ph->p_mutex);
4885 	ph->p_req_count++;
4886 	mutex_exit(&ph->p_mutex);
4887 
4888 	return (USB_SUCCESS);
4889 }
4890 
4891 
4892 /*
4893  * uhci_deallocate_periodic_in_resource:
4894  *	Deallocate interrupt/isochronous request structure for the
4895  *	interrupt/isochronous IN transfer.
4896  */
4897 void
4898 uhci_deallocate_periodic_in_resource(
4899 	uhci_state_t		*uhcip,
4900 	uhci_pipe_private_t	*pp,
4901 	uhci_trans_wrapper_t	*tw)
4902 {
4903 	usb_opaque_t		curr_xfer_reqp;
4904 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
4905 
4906 	USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl,
4907 	    "uhci_deallocate_periodic_in_resource: "
4908 	    "pp = 0x%p tw = 0x%p", (void *)pp, (void *)tw);
4909 
4910 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4911 
4912 	curr_xfer_reqp = tw->tw_curr_xfer_reqp;
4913 	if (curr_xfer_reqp) {
4914 		/*
4915 		 * Reset periodic in request usb isoch
4916 		 * packet request pointers to null.
4917 		 */
4918 		tw->tw_curr_xfer_reqp = NULL;
4919 		tw->tw_isoc_req = NULL;
4920 
4921 		mutex_enter(&ph->p_mutex);
4922 		ph->p_req_count--;
4923 		mutex_exit(&ph->p_mutex);
4924 
4925 		/*
4926 		 * Free pre-allocated interrupt or isochronous requests.
4927 		 */
4928 		switch (UHCI_XFER_TYPE(&ph->p_ep)) {
4929 		case USB_EP_ATTR_INTR:
4930 			usb_free_intr_req((usb_intr_req_t *)curr_xfer_reqp);
4931 			break;
4932 		case USB_EP_ATTR_ISOCH:
4933 			usb_free_isoc_req((usb_isoc_req_t *)curr_xfer_reqp);
4934 			break;
4935 		}
4936 	}
4937 }
4938 
4939 
4940 /*
4941  * uhci_hcdi_callback()
4942  *	convenience wrapper around usba_hcdi_callback()
4943  */
4944 void
4945 uhci_hcdi_callback(uhci_state_t *uhcip, uhci_pipe_private_t *pp,
4946     usba_pipe_handle_data_t *ph, uhci_trans_wrapper_t *tw, usb_cr_t cr)
4947 {
4948 	usb_opaque_t	curr_xfer_reqp;
4949 
4950 	USB_DPRINTF_L4(PRINT_MASK_HCDI, uhcip->uhci_log_hdl,
4951 	    "uhci_hcdi_callback: ph = 0x%p, tw = 0x%p, cr = 0x%x",
4952 	    (void *)ph, (void *)tw, cr);
4953 
4954 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4955 
4956 	if (tw && tw->tw_curr_xfer_reqp) {
4957 		curr_xfer_reqp = tw->tw_curr_xfer_reqp;
4958 		tw->tw_curr_xfer_reqp = NULL;
4959 		tw->tw_isoc_req = NULL;
4960 	} else {
4961 		ASSERT(pp->pp_client_periodic_in_reqp != NULL);
4962 
4963 		curr_xfer_reqp = pp->pp_client_periodic_in_reqp;
4964 		pp->pp_client_periodic_in_reqp = NULL;
4965 	}
4966 
4967 	ASSERT(curr_xfer_reqp != NULL);
4968 
4969 	mutex_exit(&uhcip->uhci_int_mutex);
4970 	usba_hcdi_cb(ph, curr_xfer_reqp, cr);
4971 	mutex_enter(&uhcip->uhci_int_mutex);
4972 }
4973 
4974 
4975 /*
4976  * uhci_state_is_operational:
4977  *
4978  * Check the Host controller state and return proper values.
4979  */
4980 int
4981 uhci_state_is_operational(uhci_state_t	*uhcip)
4982 {
4983 	int	val;
4984 
4985 	ASSERT(mutex_owned(&uhcip->uhci_int_mutex));
4986 
4987 	switch (uhcip->uhci_hc_soft_state) {
4988 	case UHCI_CTLR_INIT_STATE:
4989 	case UHCI_CTLR_SUSPEND_STATE:
4990 		val = USB_FAILURE;
4991 		break;
4992 	case UHCI_CTLR_OPERATIONAL_STATE:
4993 		val = USB_SUCCESS;
4994 		break;
4995 	case UHCI_CTLR_ERROR_STATE:
4996 		val = USB_HC_HARDWARE_ERROR;
4997 		break;
4998 	default:
4999 		val = USB_FAILURE;
5000 		break;
5001 	}
5002 
5003 	return (val);
5004 }
5005 
5006 
5007 #ifdef DEBUG
5008 static void
5009 uhci_print_td(uhci_state_t *uhcip, uhci_td_t *td)
5010 {
5011 	uint_t	*ptr = (uint_t *)td;
5012 
5013 #ifndef lint
5014 	_NOTE(NO_COMPETING_THREADS_NOW);
5015 #endif
5016 	USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl,
5017 	    "\tDWORD 1 0x%x\t DWORD 2 0x%x", ptr[0], ptr[1]);
5018 	USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl,
5019 	    "\tDWORD 3 0x%x\t DWORD 4 0x%x", ptr[2], ptr[3]);
5020 	USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl,
5021 	    "\tBytes xfered    = %d", td->tw->tw_bytes_xfered);
5022 	USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl,
5023 	    "\tBytes Pending   = %d", td->tw->tw_bytes_pending);
5024 	USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl,
5025 	    "Queue Head Details:");
5026 	uhci_print_qh(uhcip, td->tw->tw_pipe_private->pp_qh);
5027 
5028 #ifndef lint
5029 	_NOTE(COMPETING_THREADS_NOW);
5030 #endif
5031 }
5032 
5033 
5034 static void
5035 uhci_print_qh(uhci_state_t *uhcip, queue_head_t *qh)
5036 {
5037 	uint_t	*ptr = (uint_t *)qh;
5038 
5039 	USB_DPRINTF_L3(PRINT_MASK_DUMPING, uhcip->uhci_log_hdl,
5040 	    "\tLink Ptr = %x Element Ptr = %x", ptr[0], ptr[1]);
5041 }
5042 #endif
5043