xref: /illumos-gate/usr/src/uts/common/io/usb/hcd/ehci/ehci_xfer.c (revision 22f5594a529d50114d839d4ddecc2c499731a3d7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * EHCI Host Controller Driver (EHCI)
30  *
31  * The EHCI driver is a software driver which interfaces to the Universal
32  * Serial Bus layer (USBA) and the Host Controller (HC). The interface to
33  * the Host Controller is defined by the EHCI Host Controller Interface.
34  *
35  * This module contains the main EHCI driver code which handles all USB
36  * transfers, bandwidth allocations and other general functionalities.
37  */
38 
39 #include <sys/usb/hcd/ehci/ehcid.h>
40 #include <sys/usb/hcd/ehci/ehci_intr.h>
41 #include <sys/usb/hcd/ehci/ehci_util.h>
42 #include <sys/usb/hcd/ehci/ehci_isoch.h>
43 
44 /* Adjustable variables for the size of the pools */
45 extern int ehci_qh_pool_size;
46 extern int ehci_qtd_pool_size;
47 
48 
49 /* Endpoint Descriptor (QH) related functions */
50 ehci_qh_t	*ehci_alloc_qh(
51 				ehci_state_t		*ehcip,
52 				usba_pipe_handle_data_t	*ph,
53 				uint_t			flag);
54 static void	ehci_unpack_endpoint(
55 				ehci_state_t		*ehcip,
56 				usba_pipe_handle_data_t	*ph,
57 				ehci_qh_t		*qh);
58 void		ehci_insert_qh(
59 				ehci_state_t		*ehcip,
60 				usba_pipe_handle_data_t	*ph);
61 static void	ehci_insert_async_qh(
62 				ehci_state_t		*ehcip,
63 				ehci_pipe_private_t	*pp);
64 static void	ehci_insert_intr_qh(
65 				ehci_state_t		*ehcip,
66 				ehci_pipe_private_t	*pp);
67 static void	ehci_modify_qh_status_bit(
68 				ehci_state_t		*ehcip,
69 				ehci_pipe_private_t	*pp,
70 				halt_bit_t		action);
71 static void	ehci_halt_hs_qh(
72 				ehci_state_t		*ehcip,
73 				ehci_pipe_private_t	*pp,
74 				ehci_qh_t		*qh);
75 static void	ehci_halt_fls_ctrl_and_bulk_qh(
76 				ehci_state_t		*ehcip,
77 				ehci_pipe_private_t	*pp,
78 				ehci_qh_t		*qh);
79 static void	ehci_clear_tt_buffer(
80 				ehci_state_t		*ehcip,
81 				usba_pipe_handle_data_t	*ph,
82 				ehci_qh_t		*qh);
83 static void	ehci_halt_fls_intr_qh(
84 				ehci_state_t		*ehcip,
85 				ehci_qh_t		*qh);
86 void		ehci_remove_qh(
87 				ehci_state_t		*ehcip,
88 				ehci_pipe_private_t	*pp,
89 				boolean_t		reclaim);
90 static void	ehci_remove_async_qh(
91 				ehci_state_t		*ehcip,
92 				ehci_pipe_private_t	*pp,
93 				boolean_t		reclaim);
94 static void	ehci_remove_intr_qh(
95 				ehci_state_t		*ehcip,
96 				ehci_pipe_private_t	*pp,
97 				boolean_t		reclaim);
98 static void	ehci_insert_qh_on_reclaim_list(
99 				ehci_state_t		*ehcip,
100 				ehci_pipe_private_t	*pp);
101 void		ehci_deallocate_qh(
102 				ehci_state_t		*ehcip,
103 				ehci_qh_t		*old_qh);
104 uint32_t	ehci_qh_cpu_to_iommu(
105 				ehci_state_t		*ehcip,
106 				ehci_qh_t		*addr);
107 ehci_qh_t	*ehci_qh_iommu_to_cpu(
108 				ehci_state_t		*ehcip,
109 				uintptr_t		addr);
110 
111 /* Transfer Descriptor (QTD) related functions */
112 static int	ehci_initialize_dummy(
113 				ehci_state_t		*ehcip,
114 				ehci_qh_t		*qh);
115 ehci_trans_wrapper_t *ehci_allocate_ctrl_resources(
116 				ehci_state_t		*ehcip,
117 				ehci_pipe_private_t	*pp,
118 				usb_ctrl_req_t		*ctrl_reqp,
119 				usb_flags_t		usb_flags);
120 void		ehci_insert_ctrl_req(
121 				ehci_state_t		*ehcip,
122 				usba_pipe_handle_data_t	*ph,
123 				usb_ctrl_req_t		*ctrl_reqp,
124 				ehci_trans_wrapper_t	*tw,
125 				usb_flags_t		usb_flags);
126 ehci_trans_wrapper_t *ehci_allocate_bulk_resources(
127 				ehci_state_t		*ehcip,
128 				ehci_pipe_private_t	*pp,
129 				usb_bulk_req_t		*bulk_reqp,
130 				usb_flags_t		usb_flags);
131 void		ehci_insert_bulk_req(
132 				ehci_state_t		*ehcip,
133 				usba_pipe_handle_data_t	*ph,
134 				usb_bulk_req_t		*bulk_reqp,
135 				ehci_trans_wrapper_t	*tw,
136 				usb_flags_t		flags);
137 int		ehci_start_periodic_pipe_polling(
138 				ehci_state_t		*ehcip,
139 				usba_pipe_handle_data_t	*ph,
140 				usb_opaque_t		periodic_in_reqp,
141 				usb_flags_t		flags);
142 static int	ehci_start_pipe_polling(
143 				ehci_state_t		*ehcip,
144 				usba_pipe_handle_data_t	*ph,
145 				usb_flags_t		flags);
146 static int	ehci_start_intr_polling(
147 				ehci_state_t		*ehcip,
148 				usba_pipe_handle_data_t	*ph,
149 				usb_flags_t		flags);
150 static void	ehci_set_periodic_pipe_polling(
151 				ehci_state_t		*ehcip,
152 				usba_pipe_handle_data_t	*ph);
153 ehci_trans_wrapper_t *ehci_allocate_intr_resources(
154 				ehci_state_t		*ehcip,
155 				usba_pipe_handle_data_t	*ph,
156 				usb_intr_req_t		*intr_reqp,
157 				usb_flags_t		usb_flags);
158 void		ehci_insert_intr_req(
159 				ehci_state_t		*ehcip,
160 				ehci_pipe_private_t	*pp,
161 				ehci_trans_wrapper_t	*tw,
162 				usb_flags_t		flags);
163 int		ehci_stop_periodic_pipe_polling(
164 				ehci_state_t		*ehcip,
165 				usba_pipe_handle_data_t	*ph,
166 				usb_flags_t		flags);
167 int		ehci_insert_qtd(
168 				ehci_state_t		*ehcip,
169 				uint32_t		qtd_ctrl,
170 				size_t			qtd_dma_offs,
171 				size_t			qtd_length,
172 				uint32_t		qtd_ctrl_phase,
173 				ehci_pipe_private_t	*pp,
174 				ehci_trans_wrapper_t	*tw);
175 static ehci_qtd_t *ehci_allocate_qtd_from_pool(
176 				ehci_state_t		*ehcip);
177 static void	ehci_fill_in_qtd(
178 				ehci_state_t		*ehcip,
179 				ehci_qtd_t		*qtd,
180 				uint32_t		qtd_ctrl,
181 				size_t			qtd_dma_offs,
182 				size_t			qtd_length,
183 				uint32_t		qtd_ctrl_phase,
184 				ehci_pipe_private_t	*pp,
185 				ehci_trans_wrapper_t	*tw);
186 static void	ehci_insert_qtd_on_tw(
187 				ehci_state_t		*ehcip,
188 				ehci_trans_wrapper_t	*tw,
189 				ehci_qtd_t		*qtd);
190 static void	ehci_insert_qtd_into_active_qtd_list(
191 				ehci_state_t		*ehcip,
192 				ehci_qtd_t		*curr_qtd);
193 void		ehci_remove_qtd_from_active_qtd_list(
194 				ehci_state_t		*ehcip,
195 				ehci_qtd_t		*curr_qtd);
196 static void	ehci_traverse_qtds(
197 				ehci_state_t		*ehcip,
198 				usba_pipe_handle_data_t	*ph);
199 void		ehci_deallocate_qtd(
200 				ehci_state_t		*ehcip,
201 				ehci_qtd_t		*old_qtd);
202 uint32_t	ehci_qtd_cpu_to_iommu(
203 				ehci_state_t		*ehcip,
204 				ehci_qtd_t		*addr);
205 ehci_qtd_t	*ehci_qtd_iommu_to_cpu(
206 				ehci_state_t		*ehcip,
207 				uintptr_t		addr);
208 
209 /* Transfer Wrapper (TW) functions */
210 static ehci_trans_wrapper_t  *ehci_create_transfer_wrapper(
211 				ehci_state_t		*ehcip,
212 				ehci_pipe_private_t	*pp,
213 				size_t			length,
214 				uint_t			usb_flags);
215 int		ehci_allocate_tds_for_tw(
216 				ehci_state_t		*ehcip,
217 				ehci_pipe_private_t	*pp,
218 				ehci_trans_wrapper_t	*tw,
219 				size_t			qtd_count);
220 static ehci_trans_wrapper_t  *ehci_allocate_tw_resources(
221 				ehci_state_t		*ehcip,
222 				ehci_pipe_private_t	*pp,
223 				size_t			length,
224 				usb_flags_t		usb_flags,
225 				size_t			td_count);
226 static void	ehci_free_tw_td_resources(
227 				ehci_state_t		*ehcip,
228 				ehci_trans_wrapper_t	*tw);
229 static void	ehci_start_xfer_timer(
230 				ehci_state_t		*ehcip,
231 				ehci_pipe_private_t	*pp,
232 				ehci_trans_wrapper_t	*tw);
233 void		ehci_stop_xfer_timer(
234 				ehci_state_t		*ehcip,
235 				ehci_trans_wrapper_t	*tw,
236 				uint_t			flag);
237 static void	ehci_xfer_timeout_handler(void		*arg);
238 static void	ehci_remove_tw_from_timeout_list(
239 				ehci_state_t		*ehcip,
240 				ehci_trans_wrapper_t	*tw);
241 static void	ehci_start_timer(ehci_state_t		*ehcip,
242 				ehci_pipe_private_t	*pp);
243 void		ehci_deallocate_tw(
244 				ehci_state_t		*ehcip,
245 				ehci_pipe_private_t	*pp,
246 				ehci_trans_wrapper_t	*tw);
247 void		ehci_free_dma_resources(
248 				ehci_state_t		*ehcip,
249 				usba_pipe_handle_data_t	*ph);
250 static void	ehci_free_tw(
251 				ehci_state_t		*ehcip,
252 				ehci_pipe_private_t	*pp,
253 				ehci_trans_wrapper_t	*tw);
254 
255 /* Miscellaneous functions */
256 int		ehci_allocate_intr_in_resource(
257 				ehci_state_t		*ehcip,
258 				ehci_pipe_private_t	*pp,
259 				ehci_trans_wrapper_t	*tw,
260 				usb_flags_t		flags);
261 void		ehci_pipe_cleanup(
262 				ehci_state_t		*ehcip,
263 				usba_pipe_handle_data_t	*ph);
264 static void	ehci_wait_for_transfers_completion(
265 				ehci_state_t		*ehcip,
266 				ehci_pipe_private_t	*pp);
267 void		ehci_check_for_transfers_completion(
268 				ehci_state_t		*ehcip,
269 				ehci_pipe_private_t	*pp);
270 static void	ehci_save_data_toggle(
271 				ehci_state_t		*ehcip,
272 				usba_pipe_handle_data_t	*ph);
273 void		ehci_restore_data_toggle(
274 				ehci_state_t		*ehcip,
275 				usba_pipe_handle_data_t	*ph);
276 void		ehci_handle_outstanding_requests(
277 				ehci_state_t		*ehcip,
278 				ehci_pipe_private_t	*pp);
279 void		ehci_deallocate_intr_in_resource(
280 				ehci_state_t		*ehcip,
281 				ehci_pipe_private_t	*pp,
282 				ehci_trans_wrapper_t	*tw);
283 void		ehci_do_client_periodic_in_req_callback(
284 				ehci_state_t		*ehcip,
285 				ehci_pipe_private_t	*pp,
286 				usb_cr_t		completion_reason);
287 void		ehci_hcdi_callback(
288 				usba_pipe_handle_data_t	*ph,
289 				ehci_trans_wrapper_t	*tw,
290 				usb_cr_t		completion_reason);
291 
292 
293 /*
294  * Endpoint Descriptor (QH) manipulations functions
295  */
296 
297 /*
298  * ehci_alloc_qh:
299  *
300  * Allocate an endpoint descriptor (QH)
301  *
302  * NOTE: This function is also called from POLLED MODE.
303  */
304 ehci_qh_t *
305 ehci_alloc_qh(
306 	ehci_state_t		*ehcip,
307 	usba_pipe_handle_data_t	*ph,
308 	uint_t			flag)
309 {
310 	int			i, state;
311 	ehci_qh_t		*qh;
312 
313 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
314 	    "ehci_alloc_qh: ph = 0x%p flag = 0x%x", (void *)ph, flag);
315 
316 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
317 
318 	/*
319 	 * If this is for a ISOC endpoint return null.
320 	 * Isochronous uses ITD put directly onto the PFL.
321 	 */
322 	if (ph) {
323 		if (EHCI_ISOC_ENDPOINT((&ph->p_ep))) {
324 
325 			return (NULL);
326 		}
327 	}
328 
329 	/*
330 	 * The first 63 endpoints in the Endpoint Descriptor (QH)
331 	 * buffer pool are reserved for building interrupt lattice
332 	 * tree. Search for a blank endpoint descriptor in the QH
333 	 * buffer pool.
334 	 */
335 	for (i = EHCI_NUM_STATIC_NODES; i < ehci_qh_pool_size; i ++) {
336 		state = Get_QH(ehcip->ehci_qh_pool_addr[i].qh_state);
337 
338 		if (state == EHCI_QH_FREE) {
339 			break;
340 		}
341 	}
342 
343 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
344 	    "ehci_alloc_qh: Allocated %d", i);
345 
346 	if (i == ehci_qh_pool_size) {
347 		USB_DPRINTF_L2(PRINT_MASK_ALLOC,  ehcip->ehci_log_hdl,
348 		    "ehci_alloc_qh: QH exhausted");
349 
350 		return (NULL);
351 	} else {
352 		qh = &ehcip->ehci_qh_pool_addr[i];
353 		bzero((void *)qh, sizeof (ehci_qh_t));
354 
355 		USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
356 		    "ehci_alloc_qh: Allocated address 0x%p", (void *)qh);
357 
358 		/* Check polled mode flag */
359 		if (flag == EHCI_POLLED_MODE_FLAG) {
360 			Set_QH(qh->qh_link_ptr, EHCI_QH_LINK_PTR_VALID);
361 			Set_QH(qh->qh_ctrl, EHCI_QH_CTRL_ED_INACTIVATE);
362 		}
363 
364 		/* Unpack the endpoint descriptor into a control field */
365 		if (ph) {
366 			if ((ehci_initialize_dummy(ehcip,
367 			    qh)) == USB_NO_RESOURCES) {
368 
369 				Set_QH(qh->qh_state, EHCI_QH_FREE);
370 
371 				return (NULL);
372 			}
373 
374 			ehci_unpack_endpoint(ehcip, ph, qh);
375 
376 			Set_QH(qh->qh_curr_qtd, NULL);
377 			Set_QH(qh->qh_alt_next_qtd,
378 			    EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
379 
380 			/* Change QH's state Active */
381 			Set_QH(qh->qh_state, EHCI_QH_ACTIVE);
382 		} else {
383 			Set_QH(qh->qh_status, EHCI_QH_STS_HALTED);
384 
385 			/* Change QH's state Static */
386 			Set_QH(qh->qh_state, EHCI_QH_STATIC);
387 		}
388 
389 		ehci_print_qh(ehcip, qh);
390 
391 		return (qh);
392 	}
393 }
394 
395 
396 /*
397  * ehci_unpack_endpoint:
398  *
399  * Unpack the information in the pipe handle and create the first byte
400  * of the Host Controller's (HC) Endpoint Descriptor (QH).
401  */
402 static void
403 ehci_unpack_endpoint(
404 	ehci_state_t		*ehcip,
405 	usba_pipe_handle_data_t	*ph,
406 	ehci_qh_t		*qh)
407 {
408 	usb_ep_descr_t		*endpoint = &ph->p_ep;
409 	uint_t			maxpacketsize, addr, xactions;
410 	uint_t			ctrl = 0, status = 0, split_ctrl = 0;
411 	usb_port_status_t	usb_port_status;
412 	usba_device_t		*usba_device = ph->p_usba_device;
413 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
414 
415 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
416 	    "ehci_unpack_endpoint:");
417 
418 	mutex_enter(&usba_device->usb_mutex);
419 	ctrl = usba_device->usb_addr;
420 	usb_port_status = usba_device->usb_port_status;
421 	mutex_exit(&usba_device->usb_mutex);
422 
423 	addr = endpoint->bEndpointAddress;
424 
425 	/* Assign the endpoint's address */
426 	ctrl |= ((addr & USB_EP_NUM_MASK) << EHCI_QH_CTRL_ED_NUMBER_SHIFT);
427 
428 	/* Assign the speed */
429 	switch (usb_port_status) {
430 	case USBA_LOW_SPEED_DEV:
431 		ctrl |= EHCI_QH_CTRL_ED_LOW_SPEED;
432 		break;
433 	case USBA_FULL_SPEED_DEV:
434 		ctrl |= EHCI_QH_CTRL_ED_FULL_SPEED;
435 		break;
436 	case USBA_HIGH_SPEED_DEV:
437 		ctrl |= EHCI_QH_CTRL_ED_HIGH_SPEED;
438 		break;
439 	}
440 
441 	switch (endpoint->bmAttributes & USB_EP_ATTR_MASK) {
442 	case USB_EP_ATTR_CONTROL:
443 		/* Assign data toggle information */
444 		ctrl |= EHCI_QH_CTRL_DATA_TOGGLE;
445 
446 		if (usb_port_status != USBA_HIGH_SPEED_DEV) {
447 			ctrl |= EHCI_QH_CTRL_CONTROL_ED_FLAG;
448 		}
449 		/* FALLTHRU */
450 	case USB_EP_ATTR_BULK:
451 		/* Maximum nak counter */
452 		ctrl |= EHCI_QH_CTRL_MAX_NC;
453 
454 		if (usb_port_status == USBA_HIGH_SPEED_DEV) {
455 			/*
456 			 * Perform ping before executing control
457 			 * and bulk transactions.
458 			 */
459 			status = EHCI_QH_STS_DO_PING;
460 		}
461 		break;
462 	case USB_EP_ATTR_INTR:
463 		/* Set start split mask */
464 		split_ctrl = (pp->pp_smask & EHCI_QH_SPLIT_CTRL_INTR_MASK);
465 
466 		/*
467 		 * Set complete split mask for low/full speed
468 		 * usb devices.
469 		 */
470 		if (usb_port_status != USBA_HIGH_SPEED_DEV) {
471 			split_ctrl |= ((pp->pp_cmask <<
472 			    EHCI_QH_SPLIT_CTRL_COMP_SHIFT) &
473 			    EHCI_QH_SPLIT_CTRL_COMP_MASK);
474 		}
475 		break;
476 	}
477 
478 	/* Get the max transactions per microframe */
479 	xactions = (endpoint->wMaxPacketSize &
480 	    USB_EP_MAX_XACTS_MASK) >>  USB_EP_MAX_XACTS_SHIFT;
481 
482 	switch (xactions) {
483 	case 0:
484 		split_ctrl |= EHCI_QH_SPLIT_CTRL_1_XACTS;
485 		break;
486 	case 1:
487 		split_ctrl |= EHCI_QH_SPLIT_CTRL_2_XACTS;
488 		break;
489 	case 2:
490 		split_ctrl |= EHCI_QH_SPLIT_CTRL_3_XACTS;
491 		break;
492 	default:
493 		split_ctrl |= EHCI_QH_SPLIT_CTRL_1_XACTS;
494 		break;
495 	}
496 
497 	/*
498 	 * For low/full speed devices, program high speed hub
499 	 * address and port number.
500 	 */
501 	if (usb_port_status != USBA_HIGH_SPEED_DEV) {
502 		mutex_enter(&usba_device->usb_mutex);
503 		split_ctrl |= ((usba_device->usb_hs_hub_addr
504 		    << EHCI_QH_SPLIT_CTRL_HUB_ADDR_SHIFT) &
505 		    EHCI_QH_SPLIT_CTRL_HUB_ADDR);
506 
507 		split_ctrl |= ((usba_device->usb_hs_hub_port
508 		    << EHCI_QH_SPLIT_CTRL_HUB_PORT_SHIFT) &
509 		    EHCI_QH_SPLIT_CTRL_HUB_PORT);
510 
511 		mutex_exit(&usba_device->usb_mutex);
512 
513 		/* Set start split transaction state */
514 		status = EHCI_QH_STS_DO_START_SPLIT;
515 	}
516 
517 	/* Assign endpoint's maxpacketsize */
518 	maxpacketsize = endpoint->wMaxPacketSize & USB_EP_MAX_PKTSZ_MASK;
519 	maxpacketsize = maxpacketsize << EHCI_QH_CTRL_MAXPKTSZ_SHIFT;
520 	ctrl |= (maxpacketsize & EHCI_QH_CTRL_MAXPKTSZ);
521 
522 	Set_QH(qh->qh_ctrl, ctrl);
523 	Set_QH(qh->qh_split_ctrl, split_ctrl);
524 	Set_QH(qh->qh_status, status);
525 }
526 
527 
528 /*
529  * ehci_insert_qh:
530  *
531  * Add the Endpoint Descriptor (QH) into the Host Controller's
532  * (HC) appropriate endpoint list.
533  */
534 void
535 ehci_insert_qh(
536 	ehci_state_t		*ehcip,
537 	usba_pipe_handle_data_t	*ph)
538 {
539 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
540 
541 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
542 	    "ehci_insert_qh: qh=0x%p", (void *)pp->pp_qh);
543 
544 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
545 
546 	switch (ph->p_ep.bmAttributes & USB_EP_ATTR_MASK) {
547 	case USB_EP_ATTR_CONTROL:
548 	case USB_EP_ATTR_BULK:
549 		ehci_insert_async_qh(ehcip, pp);
550 		ehcip->ehci_open_async_count++;
551 		break;
552 	case USB_EP_ATTR_INTR:
553 		ehci_insert_intr_qh(ehcip, pp);
554 		ehcip->ehci_open_periodic_count++;
555 		break;
556 	case USB_EP_ATTR_ISOCH:
557 		/* ISOCH does not use QH, don't do anything but update count */
558 		ehcip->ehci_open_periodic_count++;
559 		break;
560 	}
561 	ehci_toggle_scheduler(ehcip);
562 }
563 
564 
565 /*
566  * ehci_insert_async_qh:
567  *
568  * Insert a control/bulk endpoint into the Host Controller's (HC)
569  * Asynchronous schedule endpoint list.
570  */
571 static void
572 ehci_insert_async_qh(
573 	ehci_state_t		*ehcip,
574 	ehci_pipe_private_t	*pp)
575 {
576 	ehci_qh_t		*qh = pp->pp_qh;
577 	ehci_qh_t		*async_head_qh;
578 	ehci_qh_t		*next_qh;
579 	uintptr_t		qh_addr;
580 
581 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
582 	    "ehci_insert_async_qh:");
583 
584 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
585 
586 	/* Make sure this QH is not already in the list */
587 	ASSERT((Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR) == NULL);
588 
589 	qh_addr = ehci_qh_cpu_to_iommu(ehcip, qh);
590 
591 	/* Obtain a ptr to the head of the Async schedule list */
592 	async_head_qh = ehcip->ehci_head_of_async_sched_list;
593 
594 	if (async_head_qh == NULL) {
595 		/* Set this QH to be the "head" of the circular list */
596 		Set_QH(qh->qh_ctrl,
597 		    (Get_QH(qh->qh_ctrl) | EHCI_QH_CTRL_RECLAIM_HEAD));
598 
599 		/* Set new QH's link and previous pointer to itself */
600 		Set_QH(qh->qh_link_ptr, qh_addr | EHCI_QH_LINK_REF_QH);
601 		Set_QH(qh->qh_prev, qh_addr);
602 
603 		ehcip->ehci_head_of_async_sched_list = qh;
604 
605 		/* Set the head ptr to the new endpoint */
606 		Set_OpReg(ehci_async_list_addr, qh_addr);
607 
608 		/*
609 		 * For some reason this register might get nulled out by
610 		 * the Uli M1575 South Bridge. To workaround the hardware
611 		 * problem, check the value after write and retry if the
612 		 * last write fails.
613 		 *
614 		 * If the ASYNCLISTADDR remains "stuck" after
615 		 * EHCI_MAX_RETRY retries, then the M1575 is broken
616 		 * and is stuck in an inconsistent state and is about
617 		 * to crash the machine with a trn_oor panic when it
618 		 * does a DMA read from 0x0.  It is better to panic
619 		 * now rather than wait for the trn_oor crash; this
620 		 * way Customer Service will have a clean signature
621 		 * that indicts the M1575 chip rather than a
622 		 * mysterious and hard-to-diagnose trn_oor panic.
623 		 */
624 		if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
625 		    (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
626 		    (qh_addr != Get_OpReg(ehci_async_list_addr))) {
627 			int retry = 0;
628 
629 			Set_OpRegRetry(ehci_async_list_addr, qh_addr, retry);
630 			if (retry >= EHCI_MAX_RETRY)
631 				cmn_err(CE_PANIC, "ehci_insert_async_qh:"
632 				    " ASYNCLISTADDR write failed.");
633 
634 			USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
635 			    "ehci_insert_async_qh: ASYNCLISTADDR "
636 			    "write failed, retry=%d", retry);
637 		}
638 	} else {
639 		ASSERT(Get_QH(async_head_qh->qh_ctrl) &
640 		    EHCI_QH_CTRL_RECLAIM_HEAD);
641 
642 		/* Ensure this QH's "H" bit is not set */
643 		Set_QH(qh->qh_ctrl,
644 		    (Get_QH(qh->qh_ctrl) & ~EHCI_QH_CTRL_RECLAIM_HEAD));
645 
646 		next_qh = ehci_qh_iommu_to_cpu(ehcip,
647 		    Get_QH(async_head_qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
648 
649 		/* Set new QH's link and previous pointers */
650 		Set_QH(qh->qh_link_ptr,
651 		    Get_QH(async_head_qh->qh_link_ptr) | EHCI_QH_LINK_REF_QH);
652 		Set_QH(qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, async_head_qh));
653 
654 		/* Set next QH's prev pointer */
655 		Set_QH(next_qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, qh));
656 
657 		/* Set QH Head's link pointer points to new QH */
658 		Set_QH(async_head_qh->qh_link_ptr,
659 		    qh_addr | EHCI_QH_LINK_REF_QH);
660 	}
661 }
662 
663 
664 /*
665  * ehci_insert_intr_qh:
666  *
667  * Insert a interrupt endpoint into the Host Controller's (HC) interrupt
668  * lattice tree.
669  */
670 static void
671 ehci_insert_intr_qh(
672 	ehci_state_t		*ehcip,
673 	ehci_pipe_private_t	*pp)
674 {
675 	ehci_qh_t		*qh = pp->pp_qh;
676 	ehci_qh_t		*next_lattice_qh, *lattice_qh;
677 	uint_t			hnode;
678 
679 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
680 	    "ehci_insert_intr_qh:");
681 
682 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
683 
684 	/* Make sure this QH is not already in the list */
685 	ASSERT((Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR) == NULL);
686 
687 	/*
688 	 * The appropriate high speed node was found
689 	 * during the opening of the pipe.
690 	 */
691 	hnode = pp->pp_pnode;
692 
693 	/* Find the lattice endpoint */
694 	lattice_qh = &ehcip->ehci_qh_pool_addr[hnode];
695 
696 	/* Find the next lattice endpoint */
697 	next_lattice_qh = ehci_qh_iommu_to_cpu(
698 	    ehcip, (Get_QH(lattice_qh->qh_link_ptr) & EHCI_QH_LINK_PTR));
699 
700 	/* Update the previous pointer */
701 	Set_QH(qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, lattice_qh));
702 
703 	/* Check next_lattice_qh value */
704 	if (next_lattice_qh) {
705 		/* Update this qh to point to the next one in the lattice */
706 		Set_QH(qh->qh_link_ptr, Get_QH(lattice_qh->qh_link_ptr));
707 
708 		/* Update the previous pointer of qh->qh_link_ptr */
709 		if (Get_QH(next_lattice_qh->qh_state) != EHCI_QH_STATIC) {
710 			Set_QH(next_lattice_qh->qh_prev,
711 			    ehci_qh_cpu_to_iommu(ehcip, qh));
712 		}
713 	} else {
714 		/* Update qh's link pointer to terminate periodic list */
715 		Set_QH(qh->qh_link_ptr,
716 		    (Get_QH(lattice_qh->qh_link_ptr) | EHCI_QH_LINK_PTR_VALID));
717 	}
718 
719 	/* Insert this endpoint into the lattice */
720 	Set_QH(lattice_qh->qh_link_ptr,
721 	    (ehci_qh_cpu_to_iommu(ehcip, qh) | EHCI_QH_LINK_REF_QH));
722 }
723 
724 
725 /*
726  * ehci_modify_qh_status_bit:
727  *
728  * Modify the halt bit on the Host Controller (HC) Endpoint Descriptor (QH).
729  *
730  * If several threads try to halt the same pipe, they will need to wait on
731  * a condition variable.  Only one thread is allowed to halt or unhalt the
732  * pipe at a time.
733  *
734  * Usually after a halt pipe, an unhalt pipe will follow soon after.  There
735  * is an assumption that an Unhalt pipe will never occur without a halt pipe.
736  */
737 static void
738 ehci_modify_qh_status_bit(
739 	ehci_state_t		*ehcip,
740 	ehci_pipe_private_t	*pp,
741 	halt_bit_t		action)
742 {
743 	ehci_qh_t		*qh = pp->pp_qh;
744 	uint_t			smask, eps, split_intr_qh;
745 	uint_t			status;
746 
747 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
748 	    "ehci_modify_qh_status_bit: action=0x%x qh=0x%p",
749 	    action, (void *)qh);
750 
751 	ehci_print_qh(ehcip, qh);
752 
753 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
754 
755 	/*
756 	 * If this pipe is in the middle of halting don't allow another
757 	 * thread to come in and modify the same pipe.
758 	 */
759 	while (pp->pp_halt_state & EHCI_HALT_STATE_HALTING) {
760 
761 		cv_wait(&pp->pp_halt_cmpl_cv,
762 		    &ehcip->ehci_int_mutex);
763 	}
764 
765 	/* Sync the QH QTD pool to get up to date information */
766 	Sync_QH_QTD_Pool(ehcip);
767 
768 
769 	if (action == CLEAR_HALT) {
770 		/*
771 		 * If the halt bit is to be cleared, just clear it.
772 		 * there shouldn't be any race condition problems.
773 		 * If the host controller reads the bit before the
774 		 * driver has a chance to set the bit, the bit will
775 		 * be reread on the next frame.
776 		 */
777 		Set_QH(qh->qh_ctrl,
778 		    (Get_QH(qh->qh_ctrl) & ~EHCI_QH_CTRL_ED_INACTIVATE));
779 		Set_QH(qh->qh_status,
780 		    Get_QH(qh->qh_status) & ~(EHCI_QH_STS_XACT_STATUS));
781 
782 		goto success;
783 	}
784 
785 	/* Halt the the QH, but first check to see if it is already halted */
786 	status = Get_QH(qh->qh_status);
787 	if (!(status & EHCI_QH_STS_HALTED)) {
788 		/* Indicate that this pipe is in the middle of halting. */
789 		pp->pp_halt_state |= EHCI_HALT_STATE_HALTING;
790 
791 		/*
792 		 * Find out if this is an full/low speed interrupt endpoint.
793 		 * A non-zero Cmask indicates that this QH is an interrupt
794 		 * endpoint.  Check the endpoint speed to see if it is either
795 		 * FULL or LOW .
796 		 */
797 		smask = Get_QH(qh->qh_split_ctrl) &
798 		    EHCI_QH_SPLIT_CTRL_INTR_MASK;
799 		eps = Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_ED_SPEED;
800 		split_intr_qh = ((smask != 0) &&
801 		    (eps != EHCI_QH_CTRL_ED_HIGH_SPEED));
802 
803 		if (eps == EHCI_QH_CTRL_ED_HIGH_SPEED) {
804 			ehci_halt_hs_qh(ehcip, pp, qh);
805 		} else {
806 			if (split_intr_qh) {
807 				ehci_halt_fls_intr_qh(ehcip, qh);
808 			} else {
809 				ehci_halt_fls_ctrl_and_bulk_qh(ehcip, pp, qh);
810 			}
811 		}
812 
813 		/* Indicate that this pipe is not in the middle of halting. */
814 		pp->pp_halt_state &= ~EHCI_HALT_STATE_HALTING;
815 	}
816 
817 	/* Sync the QH QTD pool again to get the most up to date information */
818 	Sync_QH_QTD_Pool(ehcip);
819 
820 	ehci_print_qh(ehcip, qh);
821 
822 	status = Get_QH(qh->qh_status);
823 	if (!(status & EHCI_QH_STS_HALTED)) {
824 		USB_DPRINTF_L1(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
825 		    "ehci_modify_qh_status_bit: Failed to halt qh=0x%p",
826 		    (void *)qh);
827 
828 		ehci_print_qh(ehcip, qh);
829 
830 		/* Set host controller soft state to error */
831 		ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
832 
833 		ASSERT(status & EHCI_QH_STS_HALTED);
834 	}
835 
836 success:
837 	/* Wake up threads waiting for this pipe to be halted. */
838 	cv_signal(&pp->pp_halt_cmpl_cv);
839 }
840 
841 
842 /*
843  * ehci_halt_hs_qh:
844  *
845  * Halts all types of HIGH SPEED QHs.
846  */
847 static void
848 ehci_halt_hs_qh(
849 	ehci_state_t		*ehcip,
850 	ehci_pipe_private_t	*pp,
851 	ehci_qh_t		*qh)
852 {
853 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
854 
855 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
856 	    "ehci_halt_hs_qh:");
857 
858 	/* Remove this qh from the HCD's view, but do not reclaim it */
859 	ehci_remove_qh(ehcip, pp, B_FALSE);
860 
861 	/*
862 	 * Wait for atleast one SOF, just in case the HCD is in the
863 	 * middle accessing this QH.
864 	 */
865 	(void) ehci_wait_for_sof(ehcip);
866 
867 	/* Sync the QH QTD pool to get up to date information */
868 	Sync_QH_QTD_Pool(ehcip);
869 
870 	/* Modify the status bit and halt this QH. */
871 	Set_QH(qh->qh_status,
872 	    ((Get_QH(qh->qh_status) &
873 	    ~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
874 
875 	/* Insert this QH back into the HCD's view */
876 	ehci_insert_qh(ehcip, ph);
877 }
878 
879 
880 /*
881  * ehci_halt_fls_ctrl_and_bulk_qh:
882  *
883  * Halts FULL/LOW Ctrl and Bulk QHs only.
884  */
885 static void
886 ehci_halt_fls_ctrl_and_bulk_qh(
887 	ehci_state_t		*ehcip,
888 	ehci_pipe_private_t	*pp,
889 	ehci_qh_t		*qh)
890 {
891 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
892 	uint_t			status, split_status, bytes_left;
893 
894 
895 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
896 	    "ehci_halt_fls_ctrl_and_bulk_qh:");
897 
898 	/* Remove this qh from the HCD's view, but do not reclaim it */
899 	ehci_remove_qh(ehcip, pp, B_FALSE);
900 
901 	/*
902 	 * Wait for atleast one SOF, just in case the HCD is in the
903 	 * middle accessing this QH.
904 	 */
905 	(void) ehci_wait_for_sof(ehcip);
906 
907 	/* Sync the QH QTD pool to get up to date information */
908 	Sync_QH_QTD_Pool(ehcip);
909 
910 	/* Modify the status bit and halt this QH. */
911 	Set_QH(qh->qh_status,
912 	    ((Get_QH(qh->qh_status) &
913 	    ~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
914 
915 	/* Check to see if the QH was in the middle of a transaction */
916 	status = Get_QH(qh->qh_status);
917 	split_status = status & EHCI_QH_STS_SPLIT_XSTATE;
918 	bytes_left = status & EHCI_QH_STS_BYTES_TO_XFER;
919 	if ((split_status == EHCI_QH_STS_DO_COMPLETE_SPLIT) &&
920 	    (bytes_left != 0)) {
921 		/* send ClearTTBuffer to this device's parent 2.0 hub */
922 		ehci_clear_tt_buffer(ehcip, ph, qh);
923 	}
924 
925 	/* Insert this QH back into the HCD's view */
926 	ehci_insert_qh(ehcip, ph);
927 }
928 
929 
930 /*
931  * ehci_clear_tt_buffer
932  *
933  * This function will sent a Clear_TT_Buffer request to the pipe's
934  * parent 2.0 hub.
935  */
936 static void
937 ehci_clear_tt_buffer(
938 	ehci_state_t		*ehcip,
939 	usba_pipe_handle_data_t	*ph,
940 	ehci_qh_t		*qh)
941 {
942 	usba_device_t		*usba_device;
943 	usba_device_t		*hub_usba_device;
944 	usb_pipe_handle_t	hub_def_ph;
945 	usb_ep_descr_t		*eptd;
946 	uchar_t			attributes;
947 	uint16_t		wValue;
948 	usb_ctrl_setup_t	setup;
949 	usb_cr_t		completion_reason;
950 	usb_cb_flags_t		cb_flags;
951 	int			retry;
952 
953 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
954 	    "ehci_clear_tt_buffer: ");
955 
956 	/* Get some information about the current pipe */
957 	usba_device = ph->p_usba_device;
958 	eptd = &ph->p_ep;
959 	attributes = eptd->bmAttributes & USB_EP_ATTR_MASK;
960 
961 	/*
962 	 * Create the wIndex for this request (usb spec 11.24.2.3)
963 	 * 3..0		Endpoint Number
964 	 * 10..4	Device Address
965 	 * 12..11	Endpoint Type
966 	 * 14..13	Reserved (must be 0)
967 	 * 15		Direction 1 = IN, 0 = OUT
968 	 */
969 	wValue = 0;
970 	if ((eptd->bEndpointAddress & USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
971 		wValue |= 0x8000;
972 	}
973 	wValue |= attributes << 11;
974 	wValue |= (Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_DEVICE_ADDRESS) << 4;
975 	wValue |= (Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_ED_HIGH_SPEED) >>
976 	    EHCI_QH_CTRL_ED_NUMBER_SHIFT;
977 
978 	mutex_exit(&ehcip->ehci_int_mutex);
979 
980 	/* Manually fill in the request. */
981 	setup.bmRequestType = EHCI_CLEAR_TT_BUFFER_REQTYPE;
982 	setup.bRequest = EHCI_CLEAR_TT_BUFFER_BREQ;
983 	setup.wValue = wValue;
984 	setup.wIndex = 1;
985 	setup.wLength = 0;
986 	setup.attrs = USB_ATTRS_NONE;
987 
988 	/* Get the usba_device of the parent 2.0 hub. */
989 	mutex_enter(&usba_device->usb_mutex);
990 	hub_usba_device = usba_device->usb_hs_hub_usba_dev;
991 	mutex_exit(&usba_device->usb_mutex);
992 
993 	/* Get the default ctrl pipe for the parent 2.0 hub */
994 	mutex_enter(&hub_usba_device->usb_mutex);
995 	hub_def_ph = (usb_pipe_handle_t)&hub_usba_device->usb_ph_list[0];
996 	mutex_exit(&hub_usba_device->usb_mutex);
997 
998 	for (retry = 0; retry < 3; retry++) {
999 
1000 		/* sync send the request to the default pipe */
1001 		if (usb_pipe_ctrl_xfer_wait(
1002 		    hub_def_ph,
1003 		    &setup,
1004 		    NULL,
1005 		    &completion_reason, &cb_flags, 0) == USB_SUCCESS) {
1006 
1007 			break;
1008 		}
1009 
1010 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1011 		    "ehci_clear_tt_buffer: Failed to clear tt buffer,"
1012 		    "retry = %d, cr = %d, cb_flags = 0x%x\n",
1013 		    retry, completion_reason, cb_flags);
1014 	}
1015 
1016 	if (retry >= 3) {
1017 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1018 		dev_info_t *dip = hub_usba_device->usb_dip;
1019 
1020 		/*
1021 		 * Ask the user to hotplug the 2.0 hub, to make sure that
1022 		 * all the buffer is in sync since this command has failed.
1023 		 */
1024 		USB_DPRINTF_L0(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1025 		    "Error recovery failure: Please hotplug the 2.0 hub at"
1026 		    "%s", ddi_pathname(dip, path));
1027 
1028 		kmem_free(path, MAXPATHLEN);
1029 	}
1030 
1031 	mutex_enter(&ehcip->ehci_int_mutex);
1032 }
1033 
1034 /*
1035  * ehci_halt_fls_intr_qh:
1036  *
1037  * Halts FULL/LOW speed Intr QHs.
1038  */
1039 static void
1040 ehci_halt_fls_intr_qh(
1041 	ehci_state_t		*ehcip,
1042 	ehci_qh_t		*qh)
1043 {
1044 	usb_frame_number_t	starting_frame;
1045 	usb_frame_number_t	frames_past;
1046 	uint_t			status, i;
1047 
1048 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1049 	    "ehci_halt_fls_intr_qh:");
1050 
1051 	/*
1052 	 * Ask the HC to deactivate the QH in a
1053 	 * full/low periodic QH.
1054 	 */
1055 	Set_QH(qh->qh_ctrl,
1056 	    (Get_QH(qh->qh_ctrl) | EHCI_QH_CTRL_ED_INACTIVATE));
1057 
1058 	starting_frame = ehci_get_current_frame_number(ehcip);
1059 
1060 	/*
1061 	 * Wait at least EHCI_NUM_INTR_QH_LISTS+2 frame or until
1062 	 * the QH has been halted.
1063 	 */
1064 	Sync_QH_QTD_Pool(ehcip);
1065 	frames_past = 0;
1066 	status = Get_QH(qh->qh_status) & EHCI_QTD_CTRL_ACTIVE_XACT;
1067 
1068 	while ((frames_past <= (EHCI_NUM_INTR_QH_LISTS + 2)) &&
1069 	    (status != 0)) {
1070 
1071 		(void) ehci_wait_for_sof(ehcip);
1072 
1073 		Sync_QH_QTD_Pool(ehcip);
1074 		status = Get_QH(qh->qh_status) & EHCI_QTD_CTRL_ACTIVE_XACT;
1075 		frames_past = ehci_get_current_frame_number(ehcip) -
1076 		    starting_frame;
1077 	}
1078 
1079 	/* Modify the status bit and halt this QH. */
1080 	Sync_QH_QTD_Pool(ehcip);
1081 
1082 	status = Get_QH(qh->qh_status);
1083 
1084 	for (i = 0; i < EHCI_NUM_INTR_QH_LISTS; i++) {
1085 		Set_QH(qh->qh_status,
1086 		    ((Get_QH(qh->qh_status) &
1087 		    ~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
1088 
1089 		Sync_QH_QTD_Pool(ehcip);
1090 
1091 		(void) ehci_wait_for_sof(ehcip);
1092 		Sync_QH_QTD_Pool(ehcip);
1093 
1094 		if (Get_QH(qh->qh_status) & EHCI_QH_STS_HALTED) {
1095 
1096 			break;
1097 		}
1098 	}
1099 
1100 	Sync_QH_QTD_Pool(ehcip);
1101 
1102 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1103 	    "ehci_halt_fls_intr_qh: qh=0x%p frames past=%llu,"
1104 	    " status=0x%x, 0x%x", (void *)qh,
1105 	    (unsigned long long)(ehci_get_current_frame_number(ehcip) -
1106 	    starting_frame), status, Get_QH(qh->qh_status));
1107 }
1108 
1109 
1110 /*
1111  * ehci_remove_qh:
1112  *
1113  * Remove the Endpoint Descriptor (QH) from the Host Controller's appropriate
1114  * endpoint list.
1115  */
1116 void
1117 ehci_remove_qh(
1118 	ehci_state_t		*ehcip,
1119 	ehci_pipe_private_t	*pp,
1120 	boolean_t		reclaim)
1121 {
1122 	uchar_t			attributes;
1123 
1124 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1125 
1126 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1127 	    "ehci_remove_qh: qh=0x%p", (void *)pp->pp_qh);
1128 
1129 	attributes = pp->pp_pipe_handle->p_ep.bmAttributes & USB_EP_ATTR_MASK;
1130 
1131 	switch (attributes) {
1132 	case USB_EP_ATTR_CONTROL:
1133 	case USB_EP_ATTR_BULK:
1134 		ehci_remove_async_qh(ehcip, pp, reclaim);
1135 		ehcip->ehci_open_async_count--;
1136 		break;
1137 	case USB_EP_ATTR_INTR:
1138 		ehci_remove_intr_qh(ehcip, pp, reclaim);
1139 		ehcip->ehci_open_periodic_count--;
1140 		break;
1141 	case USB_EP_ATTR_ISOCH:
1142 		/* ISOCH does not use QH, don't do anything but update count */
1143 		ehcip->ehci_open_periodic_count--;
1144 		break;
1145 	}
1146 	ehci_toggle_scheduler(ehcip);
1147 }
1148 
1149 
1150 /*
1151  * ehci_remove_async_qh:
1152  *
1153  * Remove a control/bulk endpoint into the Host Controller's (HC)
1154  * Asynchronous schedule endpoint list.
1155  */
1156 static void
1157 ehci_remove_async_qh(
1158 	ehci_state_t		*ehcip,
1159 	ehci_pipe_private_t	*pp,
1160 	boolean_t		reclaim)
1161 {
1162 	ehci_qh_t		*qh = pp->pp_qh; /* qh to be removed */
1163 	ehci_qh_t		*prev_qh, *next_qh;
1164 
1165 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1166 	    "ehci_remove_async_qh:");
1167 
1168 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1169 
1170 	prev_qh = ehci_qh_iommu_to_cpu(ehcip,
1171 	    Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR);
1172 	next_qh = ehci_qh_iommu_to_cpu(ehcip,
1173 	    Get_QH(qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
1174 
1175 	/* Make sure this QH is in the list */
1176 	ASSERT(prev_qh != NULL);
1177 
1178 	/*
1179 	 * If next QH and current QH are the same, then this is the last
1180 	 * QH on the Asynchronous Schedule list.
1181 	 */
1182 	if (qh == next_qh) {
1183 		ASSERT(Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_RECLAIM_HEAD);
1184 		/*
1185 		 * Null our pointer to the async sched list, but do not
1186 		 * touch the host controller's list_addr.
1187 		 */
1188 		ehcip->ehci_head_of_async_sched_list = NULL;
1189 		ASSERT(ehcip->ehci_open_async_count == 1);
1190 	} else {
1191 		/* If this QH is the HEAD then find another one to replace it */
1192 		if (ehcip->ehci_head_of_async_sched_list == qh) {
1193 
1194 			ASSERT(Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_RECLAIM_HEAD);
1195 			ehcip->ehci_head_of_async_sched_list = next_qh;
1196 			Set_QH(next_qh->qh_ctrl,
1197 			    Get_QH(next_qh->qh_ctrl) |
1198 			    EHCI_QH_CTRL_RECLAIM_HEAD);
1199 		}
1200 		Set_QH(prev_qh->qh_link_ptr, Get_QH(qh->qh_link_ptr));
1201 		Set_QH(next_qh->qh_prev, Get_QH(qh->qh_prev));
1202 	}
1203 
1204 	/* qh_prev to indicate it is no longer in the circular list */
1205 	Set_QH(qh->qh_prev, NULL);
1206 
1207 	if (reclaim) {
1208 		ehci_insert_qh_on_reclaim_list(ehcip, pp);
1209 	}
1210 }
1211 
1212 
1213 /*
1214  * ehci_remove_intr_qh:
1215  *
1216  * Set up an interrupt endpoint to be removed from the Host Controller's (HC)
1217  * interrupt lattice tree. The Endpoint Descriptor (QH) will be freed in the
1218  * interrupt handler.
1219  */
1220 static void
1221 ehci_remove_intr_qh(
1222 	ehci_state_t		*ehcip,
1223 	ehci_pipe_private_t	*pp,
1224 	boolean_t		reclaim)
1225 {
1226 	ehci_qh_t		*qh = pp->pp_qh; /* qh to be removed */
1227 	ehci_qh_t		*prev_qh, *next_qh;
1228 
1229 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1230 	    "ehci_remove_intr_qh:");
1231 
1232 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1233 
1234 	prev_qh = ehci_qh_iommu_to_cpu(ehcip, Get_QH(qh->qh_prev));
1235 	next_qh = ehci_qh_iommu_to_cpu(ehcip,
1236 	    Get_QH(qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
1237 
1238 	/* Make sure this QH is in the list */
1239 	ASSERT(prev_qh != NULL);
1240 
1241 	if (next_qh) {
1242 		/* Update previous qh's link pointer */
1243 		Set_QH(prev_qh->qh_link_ptr, Get_QH(qh->qh_link_ptr));
1244 
1245 		if (Get_QH(next_qh->qh_state) != EHCI_QH_STATIC) {
1246 			/* Set the previous pointer of the next one */
1247 			Set_QH(next_qh->qh_prev, Get_QH(qh->qh_prev));
1248 		}
1249 	} else {
1250 		/* Update previous qh's link pointer */
1251 		Set_QH(prev_qh->qh_link_ptr,
1252 		    (Get_QH(qh->qh_link_ptr) | EHCI_QH_LINK_PTR_VALID));
1253 	}
1254 
1255 	/* qh_prev to indicate it is no longer in the circular list */
1256 	Set_QH(qh->qh_prev, NULL);
1257 
1258 	if (reclaim) {
1259 		ehci_insert_qh_on_reclaim_list(ehcip, pp);
1260 	}
1261 }
1262 
1263 
1264 /*
1265  * ehci_insert_qh_on_reclaim_list:
1266  *
1267  * Insert Endpoint onto the reclaim list
1268  */
1269 static void
1270 ehci_insert_qh_on_reclaim_list(
1271 	ehci_state_t		*ehcip,
1272 	ehci_pipe_private_t	*pp)
1273 {
1274 	ehci_qh_t		*qh = pp->pp_qh; /* qh to be removed */
1275 	ehci_qh_t		*next_qh, *prev_qh;
1276 	usb_frame_number_t	frame_number;
1277 
1278 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1279 
1280 	/*
1281 	 * Read current usb frame number and add appropriate number of
1282 	 * usb frames needs to wait before reclaiming current endpoint.
1283 	 */
1284 	frame_number =
1285 	    ehci_get_current_frame_number(ehcip) + MAX_SOF_WAIT_COUNT;
1286 
1287 	/* Store 32-bit ID */
1288 	Set_QH(qh->qh_reclaim_frame,
1289 	    ((uint32_t)(EHCI_GET_ID((void *)(uintptr_t)frame_number))));
1290 
1291 	/* Insert the endpoint onto the reclamation list */
1292 	if (ehcip->ehci_reclaim_list) {
1293 		next_qh = ehcip->ehci_reclaim_list;
1294 
1295 		while (next_qh) {
1296 			prev_qh = next_qh;
1297 			next_qh = ehci_qh_iommu_to_cpu(ehcip,
1298 			    Get_QH(next_qh->qh_reclaim_next));
1299 		}
1300 
1301 		Set_QH(prev_qh->qh_reclaim_next,
1302 		    ehci_qh_cpu_to_iommu(ehcip, qh));
1303 	} else {
1304 		ehcip->ehci_reclaim_list = qh;
1305 	}
1306 
1307 	ASSERT(Get_QH(qh->qh_reclaim_next) == NULL);
1308 }
1309 
1310 
1311 /*
1312  * ehci_deallocate_qh:
1313  *
1314  * Deallocate a Host Controller's (HC) Endpoint Descriptor (QH).
1315  *
1316  * NOTE: This function is also called from POLLED MODE.
1317  */
1318 void
1319 ehci_deallocate_qh(
1320 	ehci_state_t	*ehcip,
1321 	ehci_qh_t	*old_qh)
1322 {
1323 	ehci_qtd_t	*first_dummy_qtd, *second_dummy_qtd;
1324 
1325 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1326 	    "ehci_deallocate_qh:");
1327 
1328 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1329 
1330 	first_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
1331 	    (Get_QH(old_qh->qh_next_qtd) & EHCI_QH_NEXT_QTD_PTR));
1332 
1333 	if (first_dummy_qtd) {
1334 		ASSERT(Get_QTD(first_dummy_qtd->qtd_state) == EHCI_QTD_DUMMY);
1335 
1336 		second_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
1337 		    Get_QTD(first_dummy_qtd->qtd_next_qtd));
1338 
1339 		if (second_dummy_qtd) {
1340 			ASSERT(Get_QTD(second_dummy_qtd->qtd_state) ==
1341 			    EHCI_QTD_DUMMY);
1342 
1343 			ehci_deallocate_qtd(ehcip, second_dummy_qtd);
1344 		}
1345 
1346 		ehci_deallocate_qtd(ehcip, first_dummy_qtd);
1347 	}
1348 
1349 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1350 	    "ehci_deallocate_qh: Deallocated 0x%p", (void *)old_qh);
1351 
1352 	Set_QH(old_qh->qh_state, EHCI_QH_FREE);
1353 }
1354 
1355 
1356 /*
1357  * ehci_qh_cpu_to_iommu:
1358  *
1359  * This function converts for the given Endpoint Descriptor (QH) CPU address
1360  * to IO address.
1361  *
1362  * NOTE: This function is also called from POLLED MODE.
1363  */
1364 uint32_t
1365 ehci_qh_cpu_to_iommu(
1366 	ehci_state_t	*ehcip,
1367 	ehci_qh_t	*addr)
1368 {
1369 	uint32_t	qh;
1370 
1371 	qh = (uint32_t)ehcip->ehci_qh_pool_cookie.dmac_address +
1372 	    (uint32_t)((uintptr_t)addr - (uintptr_t)(ehcip->ehci_qh_pool_addr));
1373 
1374 	ASSERT(qh >= ehcip->ehci_qh_pool_cookie.dmac_address);
1375 	ASSERT(qh <= ehcip->ehci_qh_pool_cookie.dmac_address +
1376 	    sizeof (ehci_qh_t) * ehci_qh_pool_size);
1377 
1378 	return (qh);
1379 }
1380 
1381 
1382 /*
1383  * ehci_qh_iommu_to_cpu:
1384  *
1385  * This function converts for the given Endpoint Descriptor (QH) IO address
1386  * to CPU address.
1387  */
1388 ehci_qh_t *
1389 ehci_qh_iommu_to_cpu(
1390 	ehci_state_t	*ehcip,
1391 	uintptr_t	addr)
1392 {
1393 	ehci_qh_t	*qh;
1394 
1395 	if (addr == NULL) {
1396 
1397 		return (NULL);
1398 	}
1399 
1400 	qh = (ehci_qh_t *)((uintptr_t)
1401 	    (addr - ehcip->ehci_qh_pool_cookie.dmac_address) +
1402 	    (uintptr_t)ehcip->ehci_qh_pool_addr);
1403 
1404 	ASSERT(qh >= ehcip->ehci_qh_pool_addr);
1405 	ASSERT((uintptr_t)qh <= (uintptr_t)ehcip->ehci_qh_pool_addr +
1406 	    (uintptr_t)(sizeof (ehci_qh_t) * ehci_qh_pool_size));
1407 
1408 	return (qh);
1409 }
1410 
1411 
1412 /*
1413  * Transfer Descriptor manipulations functions
1414  */
1415 
1416 /*
1417  * ehci_initialize_dummy:
1418  *
1419  * An Endpoint Descriptor (QH) has a  dummy Transfer Descriptor (QTD) on the
1420  * end of its QTD list. Initially, both the head and tail pointers of the QH
1421  * point to the dummy QTD.
1422  */
1423 static int
1424 ehci_initialize_dummy(
1425 	ehci_state_t	*ehcip,
1426 	ehci_qh_t	*qh)
1427 {
1428 	ehci_qtd_t	*first_dummy_qtd, *second_dummy_qtd;
1429 
1430 	/* Allocate first dummy QTD */
1431 	first_dummy_qtd = ehci_allocate_qtd_from_pool(ehcip);
1432 
1433 	if (first_dummy_qtd == NULL) {
1434 		return (USB_NO_RESOURCES);
1435 	}
1436 
1437 	/* Allocate second dummy QTD */
1438 	second_dummy_qtd = ehci_allocate_qtd_from_pool(ehcip);
1439 
1440 	if (second_dummy_qtd == NULL) {
1441 		/* Deallocate first dummy QTD */
1442 		ehci_deallocate_qtd(ehcip, first_dummy_qtd);
1443 
1444 		return (USB_NO_RESOURCES);
1445 	}
1446 
1447 	/* Next QTD pointer of an QH point to this new dummy QTD */
1448 	Set_QH(qh->qh_next_qtd, ehci_qtd_cpu_to_iommu(ehcip,
1449 	    first_dummy_qtd) & EHCI_QH_NEXT_QTD_PTR);
1450 
1451 	/* Set qh's dummy qtd field */
1452 	Set_QH(qh->qh_dummy_qtd, ehci_qtd_cpu_to_iommu(ehcip, first_dummy_qtd));
1453 
1454 	/* Set first_dummy's next qtd pointer */
1455 	Set_QTD(first_dummy_qtd->qtd_next_qtd,
1456 	    ehci_qtd_cpu_to_iommu(ehcip, second_dummy_qtd));
1457 
1458 	return (USB_SUCCESS);
1459 }
1460 
1461 /*
1462  * ehci_allocate_ctrl_resources:
1463  *
1464  * Calculates the number of tds necessary for a ctrl transfer, and allocates
1465  * all the resources necessary.
1466  *
1467  * Returns NULL if there is insufficient resources otherwise TW.
1468  */
1469 ehci_trans_wrapper_t *
1470 ehci_allocate_ctrl_resources(
1471 	ehci_state_t		*ehcip,
1472 	ehci_pipe_private_t	*pp,
1473 	usb_ctrl_req_t		*ctrl_reqp,
1474 	usb_flags_t		usb_flags)
1475 {
1476 	size_t			qtd_count = 2;
1477 	size_t			ctrl_buf_size;
1478 	ehci_trans_wrapper_t	*tw;
1479 
1480 	/* Add one more td for data phase */
1481 	if (ctrl_reqp->ctrl_wLength) {
1482 		qtd_count += 1;
1483 	}
1484 
1485 	/*
1486 	 * If we have a control data phase, the data buffer starts
1487 	 * on the next 4K page boundary. So the TW buffer is allocated
1488 	 * to be larger than required. The buffer in the range of
1489 	 * [SETUP_SIZE, EHCI_MAX_QTD_BUF_SIZE) is just for padding
1490 	 * and not to be transferred.
1491 	 */
1492 	if (ctrl_reqp->ctrl_wLength) {
1493 		ctrl_buf_size = EHCI_MAX_QTD_BUF_SIZE +
1494 		    ctrl_reqp->ctrl_wLength;
1495 	} else {
1496 		ctrl_buf_size = SETUP_SIZE;
1497 	}
1498 
1499 	tw = ehci_allocate_tw_resources(ehcip, pp, ctrl_buf_size,
1500 	    usb_flags, qtd_count);
1501 
1502 	return (tw);
1503 }
1504 
1505 /*
1506  * ehci_insert_ctrl_req:
1507  *
1508  * Create a Transfer Descriptor (QTD) and a data buffer for a control endpoint.
1509  */
1510 /* ARGSUSED */
1511 void
1512 ehci_insert_ctrl_req(
1513 	ehci_state_t		*ehcip,
1514 	usba_pipe_handle_data_t	*ph,
1515 	usb_ctrl_req_t		*ctrl_reqp,
1516 	ehci_trans_wrapper_t	*tw,
1517 	usb_flags_t		usb_flags)
1518 {
1519 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1520 	uchar_t			bmRequestType = ctrl_reqp->ctrl_bmRequestType;
1521 	uchar_t			bRequest = ctrl_reqp->ctrl_bRequest;
1522 	uint16_t		wValue = ctrl_reqp->ctrl_wValue;
1523 	uint16_t		wIndex = ctrl_reqp->ctrl_wIndex;
1524 	uint16_t		wLength = ctrl_reqp->ctrl_wLength;
1525 	mblk_t			*data = ctrl_reqp->ctrl_data;
1526 	uint32_t		ctrl = 0;
1527 	uint8_t			setup_packet[8];
1528 
1529 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1530 	    "ehci_insert_ctrl_req:");
1531 
1532 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1533 
1534 	/*
1535 	 * Save current control request pointer and timeout values
1536 	 * in transfer wrapper.
1537 	 */
1538 	tw->tw_curr_xfer_reqp = (usb_opaque_t)ctrl_reqp;
1539 	tw->tw_timeout = ctrl_reqp->ctrl_timeout ?
1540 	    ctrl_reqp->ctrl_timeout : EHCI_DEFAULT_XFER_TIMEOUT;
1541 
1542 	/*
1543 	 * Initialize the callback and any callback data for when
1544 	 * the qtd completes.
1545 	 */
1546 	tw->tw_handle_qtd = ehci_handle_ctrl_qtd;
1547 	tw->tw_handle_callback_value = NULL;
1548 
1549 	/*
1550 	 * swap the setup bytes where necessary since we specified
1551 	 * NEVERSWAP
1552 	 */
1553 	setup_packet[0] = bmRequestType;
1554 	setup_packet[1] = bRequest;
1555 	setup_packet[2] = wValue;
1556 	setup_packet[3] = wValue >> 8;
1557 	setup_packet[4] = wIndex;
1558 	setup_packet[5] = wIndex >> 8;
1559 	setup_packet[6] = wLength;
1560 	setup_packet[7] = wLength >> 8;
1561 
1562 	bcopy(setup_packet, tw->tw_buf, SETUP_SIZE);
1563 
1564 	Sync_IO_Buffer_for_device(tw->tw_dmahandle, SETUP_SIZE);
1565 
1566 	ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_0 | EHCI_QTD_CTRL_SETUP_PID);
1567 
1568 	/*
1569 	 * The QTD's are placed on the QH one at a time.
1570 	 * Once this QTD is placed on the done list, the
1571 	 * data or status phase QTD will be enqueued.
1572 	 */
1573 	(void) ehci_insert_qtd(ehcip, ctrl, 0, SETUP_SIZE,
1574 	    EHCI_CTRL_SETUP_PHASE, pp, tw);
1575 
1576 	USB_DPRINTF_L3(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1577 	    "ehci_insert_ctrl_req: pp 0x%p", (void *)pp);
1578 
1579 	/*
1580 	 * If this control transfer has a data phase, record the
1581 	 * direction. If the data phase is an OUT transaction,
1582 	 * copy the data into the buffer of the transfer wrapper.
1583 	 */
1584 	if (wLength != 0) {
1585 		/* There is a data stage.  Find the direction */
1586 		if (bmRequestType & USB_DEV_REQ_DEV_TO_HOST) {
1587 			tw->tw_direction = EHCI_QTD_CTRL_IN_PID;
1588 		} else {
1589 			tw->tw_direction = EHCI_QTD_CTRL_OUT_PID;
1590 
1591 			/* Copy the data into the message */
1592 			bcopy(data->b_rptr, tw->tw_buf + EHCI_MAX_QTD_BUF_SIZE,
1593 			    wLength);
1594 
1595 			Sync_IO_Buffer_for_device(tw->tw_dmahandle,
1596 			    wLength + EHCI_MAX_QTD_BUF_SIZE);
1597 		}
1598 
1599 		ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1 | tw->tw_direction);
1600 
1601 		/*
1602 		 * Create the QTD.  If this is an OUT transaction,
1603 		 * the data is already in the buffer of the TW.
1604 		 * The transfer should start from EHCI_MAX_QTD_BUF_SIZE
1605 		 * which is 4K aligned, though the ctrl phase only
1606 		 * transfers a length of SETUP_SIZE. The padding data
1607 		 * in the TW buffer are discarded.
1608 		 */
1609 		(void) ehci_insert_qtd(ehcip, ctrl, EHCI_MAX_QTD_BUF_SIZE,
1610 		    tw->tw_length - EHCI_MAX_QTD_BUF_SIZE,
1611 		    EHCI_CTRL_DATA_PHASE, pp, tw);
1612 
1613 		/*
1614 		 * The direction of the STATUS QTD depends  on
1615 		 * the direction of the transfer.
1616 		 */
1617 		if (tw->tw_direction == EHCI_QTD_CTRL_IN_PID) {
1618 			ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1|
1619 			    EHCI_QTD_CTRL_OUT_PID |
1620 			    EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1621 		} else {
1622 			ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1|
1623 			    EHCI_QTD_CTRL_IN_PID |
1624 			    EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1625 		}
1626 	} else {
1627 		/*
1628 		 * There is no data stage,  then initiate
1629 		 * status phase from the host.
1630 		 */
1631 		ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1 |
1632 		    EHCI_QTD_CTRL_IN_PID |
1633 		    EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1634 	}
1635 
1636 
1637 	(void) ehci_insert_qtd(ehcip, ctrl, 0, 0,
1638 	    EHCI_CTRL_STATUS_PHASE, pp,  tw);
1639 
1640 	/* Start the timer for this control transfer */
1641 	ehci_start_xfer_timer(ehcip, pp, tw);
1642 }
1643 
1644 
1645 /*
1646  * ehci_allocate_bulk_resources:
1647  *
1648  * Calculates the number of tds necessary for a ctrl transfer, and allocates
1649  * all the resources necessary.
1650  *
1651  * Returns NULL if there is insufficient resources otherwise TW.
1652  */
1653 ehci_trans_wrapper_t *
1654 ehci_allocate_bulk_resources(
1655 	ehci_state_t		*ehcip,
1656 	ehci_pipe_private_t	*pp,
1657 	usb_bulk_req_t		*bulk_reqp,
1658 	usb_flags_t		usb_flags)
1659 {
1660 	size_t			qtd_count = 0;
1661 	ehci_trans_wrapper_t	*tw;
1662 
1663 	/* Check the size of bulk request */
1664 	if (bulk_reqp->bulk_len > EHCI_MAX_BULK_XFER_SIZE) {
1665 
1666 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1667 		    "ehci_allocate_bulk_resources: Bulk request size 0x%x is "
1668 		    "more than 0x%x", bulk_reqp->bulk_len,
1669 		    EHCI_MAX_BULK_XFER_SIZE);
1670 
1671 		return (NULL);
1672 	}
1673 
1674 	/* Get the required bulk packet size */
1675 	qtd_count = bulk_reqp->bulk_len / EHCI_MAX_QTD_XFER_SIZE;
1676 	if (bulk_reqp->bulk_len % EHCI_MAX_QTD_XFER_SIZE ||
1677 	    bulk_reqp->bulk_len == 0) {
1678 		qtd_count += 1;
1679 	}
1680 
1681 	tw = ehci_allocate_tw_resources(ehcip, pp, bulk_reqp->bulk_len,
1682 	    usb_flags, qtd_count);
1683 
1684 	return (tw);
1685 }
1686 
1687 /*
1688  * ehci_insert_bulk_req:
1689  *
1690  * Create a Transfer Descriptor (QTD) and a data buffer for a bulk
1691  * endpoint.
1692  */
1693 /* ARGSUSED */
1694 void
1695 ehci_insert_bulk_req(
1696 	ehci_state_t		*ehcip,
1697 	usba_pipe_handle_data_t	*ph,
1698 	usb_bulk_req_t		*bulk_reqp,
1699 	ehci_trans_wrapper_t	*tw,
1700 	usb_flags_t		flags)
1701 {
1702 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1703 	uint_t			bulk_pkt_size, count;
1704 	size_t			residue = 0, len = 0;
1705 	uint32_t		ctrl = 0;
1706 	int			pipe_dir;
1707 
1708 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1709 	    "ehci_insert_bulk_req: bulk_reqp = 0x%p flags = 0x%x",
1710 	    (void *)bulk_reqp, flags);
1711 
1712 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1713 
1714 	/* Get the bulk pipe direction */
1715 	pipe_dir = ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK;
1716 
1717 	/* Get the required bulk packet size */
1718 	bulk_pkt_size = min(bulk_reqp->bulk_len, EHCI_MAX_QTD_XFER_SIZE);
1719 
1720 	if (bulk_pkt_size) {
1721 		residue = tw->tw_length % bulk_pkt_size;
1722 	}
1723 
1724 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1725 	    "ehci_insert_bulk_req: bulk_pkt_size = %d", bulk_pkt_size);
1726 
1727 	/*
1728 	 * Save current bulk request pointer and timeout values
1729 	 * in transfer wrapper.
1730 	 */
1731 	tw->tw_curr_xfer_reqp = (usb_opaque_t)bulk_reqp;
1732 	tw->tw_timeout = bulk_reqp->bulk_timeout;
1733 
1734 	/*
1735 	 * Initialize the callback and any callback
1736 	 * data required when the qtd completes.
1737 	 */
1738 	tw->tw_handle_qtd = ehci_handle_bulk_qtd;
1739 	tw->tw_handle_callback_value = NULL;
1740 
1741 	tw->tw_direction = (pipe_dir == USB_EP_DIR_OUT) ?
1742 	    EHCI_QTD_CTRL_OUT_PID : EHCI_QTD_CTRL_IN_PID;
1743 
1744 	if (tw->tw_direction == EHCI_QTD_CTRL_OUT_PID) {
1745 
1746 		if (bulk_reqp->bulk_len) {
1747 			ASSERT(bulk_reqp->bulk_data != NULL);
1748 
1749 			bcopy(bulk_reqp->bulk_data->b_rptr, tw->tw_buf,
1750 			    bulk_reqp->bulk_len);
1751 
1752 			Sync_IO_Buffer_for_device(tw->tw_dmahandle,
1753 			    bulk_reqp->bulk_len);
1754 		}
1755 	}
1756 
1757 	ctrl = tw->tw_direction;
1758 
1759 	/* Insert all the bulk QTDs */
1760 	for (count = 0; count < tw->tw_num_qtds; count++) {
1761 
1762 		/* Check for last qtd */
1763 		if (count == (tw->tw_num_qtds - 1)) {
1764 
1765 			ctrl |= EHCI_QTD_CTRL_INTR_ON_COMPLETE;
1766 
1767 			/* Check for inserting residue data */
1768 			if (residue) {
1769 				bulk_pkt_size = residue;
1770 			}
1771 		}
1772 
1773 		/* Insert the QTD onto the endpoint */
1774 		(void) ehci_insert_qtd(ehcip, ctrl, len, bulk_pkt_size,
1775 		    0, pp, tw);
1776 
1777 		len = len + bulk_pkt_size;
1778 	}
1779 
1780 	/* Start the timer for this bulk transfer */
1781 	ehci_start_xfer_timer(ehcip, pp, tw);
1782 }
1783 
1784 
1785 /*
1786  * ehci_start_periodic_pipe_polling:
1787  *
1788  * NOTE: This function is also called from POLLED MODE.
1789  */
1790 int
1791 ehci_start_periodic_pipe_polling(
1792 	ehci_state_t		*ehcip,
1793 	usba_pipe_handle_data_t	*ph,
1794 	usb_opaque_t		periodic_in_reqp,
1795 	usb_flags_t		flags)
1796 {
1797 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1798 	usb_ep_descr_t		*eptd = &ph->p_ep;
1799 	int			error = USB_SUCCESS;
1800 
1801 	USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
1802 	    "ehci_start_periodic_pipe_polling: ep%d",
1803 	    ph->p_ep.bEndpointAddress & USB_EP_NUM_MASK);
1804 
1805 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1806 
1807 	/*
1808 	 * Check and handle start polling on root hub interrupt pipe.
1809 	 */
1810 	if ((ph->p_usba_device->usb_addr == ROOT_HUB_ADDR) &&
1811 	    ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
1812 	    USB_EP_ATTR_INTR)) {
1813 
1814 		error = ehci_handle_root_hub_pipe_start_intr_polling(ph,
1815 		    (usb_intr_req_t *)periodic_in_reqp, flags);
1816 
1817 		return (error);
1818 	}
1819 
1820 	switch (pp->pp_state) {
1821 	case EHCI_PIPE_STATE_IDLE:
1822 		/* Save the Original client's Periodic IN request */
1823 		pp->pp_client_periodic_in_reqp = periodic_in_reqp;
1824 
1825 		/*
1826 		 * This pipe is uninitialized or if a valid QTD is
1827 		 * not found then insert a QTD on the interrupt IN
1828 		 * endpoint.
1829 		 */
1830 		error = ehci_start_pipe_polling(ehcip, ph, flags);
1831 
1832 		if (error != USB_SUCCESS) {
1833 			USB_DPRINTF_L2(PRINT_MASK_INTR,
1834 			    ehcip->ehci_log_hdl,
1835 			    "ehci_start_periodic_pipe_polling: "
1836 			    "Start polling failed");
1837 
1838 			pp->pp_client_periodic_in_reqp = NULL;
1839 
1840 			return (error);
1841 		}
1842 
1843 		USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
1844 		    "ehci_start_periodic_pipe_polling: PP = 0x%p", (void *)pp);
1845 
1846 #ifdef DEBUG
1847 		switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
1848 		case USB_EP_ATTR_INTR:
1849 			ASSERT((pp->pp_tw_head != NULL) &&
1850 			    (pp->pp_tw_tail != NULL));
1851 			break;
1852 		case USB_EP_ATTR_ISOCH:
1853 			ASSERT((pp->pp_itw_head != NULL) &&
1854 			    (pp->pp_itw_tail != NULL));
1855 			break;
1856 		}
1857 #endif
1858 
1859 		break;
1860 	case EHCI_PIPE_STATE_ACTIVE:
1861 		USB_DPRINTF_L2(PRINT_MASK_INTR,
1862 		    ehcip->ehci_log_hdl,
1863 		    "ehci_start_periodic_pipe_polling: "
1864 		    "Polling is already in progress");
1865 
1866 		error = USB_FAILURE;
1867 		break;
1868 	case EHCI_PIPE_STATE_ERROR:
1869 		USB_DPRINTF_L2(PRINT_MASK_INTR,
1870 		    ehcip->ehci_log_hdl,
1871 		    "ehci_start_periodic_pipe_polling: "
1872 		    "Pipe is halted and perform reset"
1873 		    "before restart polling");
1874 
1875 		error = USB_FAILURE;
1876 		break;
1877 	default:
1878 		USB_DPRINTF_L2(PRINT_MASK_INTR,
1879 		    ehcip->ehci_log_hdl,
1880 		    "ehci_start_periodic_pipe_polling: "
1881 		    "Undefined state");
1882 
1883 		error = USB_FAILURE;
1884 		break;
1885 	}
1886 
1887 	return (error);
1888 }
1889 
1890 
1891 /*
1892  * ehci_start_pipe_polling:
1893  *
1894  * Insert the number of periodic requests corresponding to polling
1895  * interval as calculated during pipe open.
1896  */
1897 static int
1898 ehci_start_pipe_polling(
1899 	ehci_state_t		*ehcip,
1900 	usba_pipe_handle_data_t	*ph,
1901 	usb_flags_t		flags)
1902 {
1903 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1904 	usb_ep_descr_t		*eptd = &ph->p_ep;
1905 	int			error = USB_FAILURE;
1906 
1907 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1908 	    "ehci_start_pipe_polling:");
1909 
1910 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1911 
1912 	/*
1913 	 * For the start polling, pp_max_periodic_req_cnt will be zero
1914 	 * and for the restart polling request, it will be non zero.
1915 	 *
1916 	 * In case of start polling request, find out number of requests
1917 	 * required for the Interrupt IN endpoints corresponding to the
1918 	 * endpoint polling interval. For Isochronous IN endpoints, it is
1919 	 * always fixed since its polling interval will be one ms.
1920 	 */
1921 	if (pp->pp_max_periodic_req_cnt == 0) {
1922 
1923 		ehci_set_periodic_pipe_polling(ehcip, ph);
1924 	}
1925 
1926 	ASSERT(pp->pp_max_periodic_req_cnt != 0);
1927 
1928 	switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
1929 	case USB_EP_ATTR_INTR:
1930 		error = ehci_start_intr_polling(ehcip, ph, flags);
1931 		break;
1932 	case USB_EP_ATTR_ISOCH:
1933 		error = ehci_start_isoc_polling(ehcip, ph, flags);
1934 		break;
1935 	}
1936 
1937 	return (error);
1938 }
1939 
1940 static int
1941 ehci_start_intr_polling(
1942 	ehci_state_t		*ehcip,
1943 	usba_pipe_handle_data_t	*ph,
1944 	usb_flags_t		flags)
1945 {
1946 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1947 	ehci_trans_wrapper_t	*tw_list, *tw;
1948 	int			i, total_tws;
1949 	int			error = USB_SUCCESS;
1950 
1951 	/* Allocate all the necessary resources for the IN transfer */
1952 	tw_list = NULL;
1953 	total_tws = pp->pp_max_periodic_req_cnt - pp->pp_cur_periodic_req_cnt;
1954 	for (i = 0; i < total_tws; i += 1) {
1955 		tw = ehci_allocate_intr_resources(ehcip, ph, NULL, flags);
1956 		if (tw == NULL) {
1957 			error = USB_NO_RESOURCES;
1958 			/* There are not enough resources, deallocate the TWs */
1959 			tw = tw_list;
1960 			while (tw != NULL) {
1961 				tw_list = tw->tw_next;
1962 				ehci_deallocate_intr_in_resource(
1963 				    ehcip, pp, tw);
1964 				ehci_deallocate_tw(ehcip, pp, tw);
1965 				tw = tw_list;
1966 			}
1967 
1968 			return (error);
1969 		} else {
1970 			if (tw_list == NULL) {
1971 				tw_list = tw;
1972 			}
1973 		}
1974 	}
1975 
1976 	while (pp->pp_cur_periodic_req_cnt < pp->pp_max_periodic_req_cnt) {
1977 
1978 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1979 		    "ehci_start_pipe_polling: max = %d curr = %d tw = %p:",
1980 		    pp->pp_max_periodic_req_cnt, pp->pp_cur_periodic_req_cnt,
1981 		    (void *)tw_list);
1982 
1983 		tw = tw_list;
1984 		tw_list = tw->tw_next;
1985 
1986 		ehci_insert_intr_req(ehcip, pp, tw, flags);
1987 
1988 		pp->pp_cur_periodic_req_cnt++;
1989 	}
1990 
1991 	return (error);
1992 }
1993 
1994 
1995 /*
1996  * ehci_set_periodic_pipe_polling:
1997  *
1998  * Calculate the number of periodic requests needed corresponding to the
1999  * interrupt IN endpoints polling interval. Table below gives the number
2000  * of periodic requests needed for the interrupt IN endpoints  according
2001  * to endpoint polling interval.
2002  *
2003  * Polling interval		Number of periodic requests
2004  *
2005  * 1ms				4
2006  * 2ms				2
2007  * 4ms to 32ms			1
2008  */
2009 static void
2010 ehci_set_periodic_pipe_polling(
2011 	ehci_state_t		*ehcip,
2012 	usba_pipe_handle_data_t	*ph)
2013 {
2014 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2015 	usb_ep_descr_t		*endpoint = &ph->p_ep;
2016 	uchar_t			ep_attr = endpoint->bmAttributes;
2017 	uint_t			interval;
2018 
2019 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2020 	    "ehci_set_periodic_pipe_polling:");
2021 
2022 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2023 
2024 	pp->pp_cur_periodic_req_cnt = 0;
2025 
2026 	/*
2027 	 * Check usb flag whether USB_FLAGS_ONE_TIME_POLL flag is
2028 	 * set and if so, set pp->pp_max_periodic_req_cnt to one.
2029 	 */
2030 	if (((ep_attr & USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) &&
2031 	    (pp->pp_client_periodic_in_reqp)) {
2032 		usb_intr_req_t *intr_reqp = (usb_intr_req_t *)
2033 		    pp->pp_client_periodic_in_reqp;
2034 
2035 		if (intr_reqp->intr_attributes &
2036 		    USB_ATTRS_ONE_XFER) {
2037 
2038 			pp->pp_max_periodic_req_cnt = EHCI_INTR_XMS_REQS;
2039 
2040 			return;
2041 		}
2042 	}
2043 
2044 	mutex_enter(&ph->p_usba_device->usb_mutex);
2045 
2046 	/*
2047 	 * The ehci_adjust_polling_interval function will not fail
2048 	 * at this instance since bandwidth allocation is already
2049 	 * done. Here we are getting only the periodic interval.
2050 	 */
2051 	interval = ehci_adjust_polling_interval(ehcip, endpoint,
2052 	    ph->p_usba_device->usb_port_status);
2053 
2054 	mutex_exit(&ph->p_usba_device->usb_mutex);
2055 
2056 	switch (interval) {
2057 	case EHCI_INTR_1MS_POLL:
2058 		pp->pp_max_periodic_req_cnt = EHCI_INTR_1MS_REQS;
2059 		break;
2060 	case EHCI_INTR_2MS_POLL:
2061 		pp->pp_max_periodic_req_cnt = EHCI_INTR_2MS_REQS;
2062 		break;
2063 	default:
2064 		pp->pp_max_periodic_req_cnt = EHCI_INTR_XMS_REQS;
2065 		break;
2066 	}
2067 
2068 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2069 	    "ehci_set_periodic_pipe_polling: Max periodic requests = %d",
2070 	    pp->pp_max_periodic_req_cnt);
2071 }
2072 
2073 /*
2074  * ehci_allocate_intr_resources:
2075  *
2076  * Calculates the number of tds necessary for a intr transfer, and allocates
2077  * all the necessary resources.
2078  *
2079  * Returns NULL if there is insufficient resources otherwise TW.
2080  */
2081 ehci_trans_wrapper_t *
2082 ehci_allocate_intr_resources(
2083 	ehci_state_t		*ehcip,
2084 	usba_pipe_handle_data_t	*ph,
2085 	usb_intr_req_t		*intr_reqp,
2086 	usb_flags_t		flags)
2087 {
2088 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2089 	int			pipe_dir;
2090 	size_t			qtd_count = 1;
2091 	size_t			tw_length;
2092 	ehci_trans_wrapper_t	*tw;
2093 
2094 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2095 	    "ehci_allocate_intr_resources:");
2096 
2097 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2098 
2099 	pipe_dir = ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK;
2100 
2101 	/* Get the length of interrupt transfer & alloc data */
2102 	if (intr_reqp) {
2103 		tw_length = intr_reqp->intr_len;
2104 	} else {
2105 		ASSERT(pipe_dir == USB_EP_DIR_IN);
2106 		tw_length = (pp->pp_client_periodic_in_reqp) ?
2107 		    (((usb_intr_req_t *)pp->
2108 		    pp_client_periodic_in_reqp)->intr_len) :
2109 		    ph->p_ep.wMaxPacketSize;
2110 	}
2111 
2112 	/* Check the size of interrupt request */
2113 	if (tw_length > EHCI_MAX_QTD_XFER_SIZE) {
2114 
2115 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2116 		    "ehci_allocate_intr_resources: Intr request size 0x%lx is "
2117 		    "more than 0x%x", tw_length, EHCI_MAX_QTD_XFER_SIZE);
2118 
2119 		return (NULL);
2120 	}
2121 
2122 	if ((tw = ehci_allocate_tw_resources(ehcip, pp, tw_length, flags,
2123 	    qtd_count)) == NULL) {
2124 
2125 		return (NULL);
2126 	}
2127 
2128 	if (pipe_dir == USB_EP_DIR_IN) {
2129 		if (ehci_allocate_intr_in_resource(ehcip, pp, tw, flags) !=
2130 		    USB_SUCCESS) {
2131 			ehci_deallocate_tw(ehcip, pp, tw);
2132 		}
2133 		tw->tw_direction = EHCI_QTD_CTRL_IN_PID;
2134 	} else {
2135 		if (tw_length) {
2136 			ASSERT(intr_reqp->intr_data != NULL);
2137 
2138 			/* Copy the data into the buffer */
2139 			bcopy(intr_reqp->intr_data->b_rptr, tw->tw_buf,
2140 			    intr_reqp->intr_len);
2141 
2142 			Sync_IO_Buffer_for_device(tw->tw_dmahandle,
2143 			    intr_reqp->intr_len);
2144 		}
2145 
2146 		tw->tw_curr_xfer_reqp = (usb_opaque_t)intr_reqp;
2147 		tw->tw_direction = EHCI_QTD_CTRL_OUT_PID;
2148 	}
2149 
2150 	if (intr_reqp) {
2151 		tw->tw_timeout = intr_reqp->intr_timeout;
2152 	}
2153 
2154 	/*
2155 	 * Initialize the callback and any callback
2156 	 * data required when the qtd completes.
2157 	 */
2158 	tw->tw_handle_qtd = ehci_handle_intr_qtd;
2159 	tw->tw_handle_callback_value = NULL;
2160 
2161 	return (tw);
2162 }
2163 
2164 
2165 /*
2166  * ehci_insert_intr_req:
2167  *
2168  * Insert an Interrupt request into the Host Controller's periodic list.
2169  */
2170 /* ARGSUSED */
2171 void
2172 ehci_insert_intr_req(
2173 	ehci_state_t		*ehcip,
2174 	ehci_pipe_private_t	*pp,
2175 	ehci_trans_wrapper_t	*tw,
2176 	usb_flags_t		flags)
2177 {
2178 	uint_t			ctrl = 0;
2179 
2180 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2181 
2182 	ASSERT(tw->tw_curr_xfer_reqp != NULL);
2183 
2184 	ctrl = (tw->tw_direction | EHCI_QTD_CTRL_INTR_ON_COMPLETE);
2185 
2186 	/* Insert another interrupt QTD */
2187 	(void) ehci_insert_qtd(ehcip, ctrl, 0, tw->tw_length, 0, pp, tw);
2188 
2189 	/* Start the timer for this Interrupt transfer */
2190 	ehci_start_xfer_timer(ehcip, pp, tw);
2191 }
2192 
2193 
2194 /*
2195  * ehci_stop_periodic_pipe_polling:
2196  */
2197 /* ARGSUSED */
2198 int
2199 ehci_stop_periodic_pipe_polling(
2200 	ehci_state_t		*ehcip,
2201 	usba_pipe_handle_data_t	*ph,
2202 	usb_flags_t		flags)
2203 {
2204 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2205 	usb_ep_descr_t		*eptd = &ph->p_ep;
2206 
2207 	USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
2208 	    "ehci_stop_periodic_pipe_polling: Flags = 0x%x", flags);
2209 
2210 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2211 
2212 	/*
2213 	 * Check and handle stop polling on root hub interrupt pipe.
2214 	 */
2215 	if ((ph->p_usba_device->usb_addr == ROOT_HUB_ADDR) &&
2216 	    ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
2217 	    USB_EP_ATTR_INTR)) {
2218 
2219 		ehci_handle_root_hub_pipe_stop_intr_polling(ph, flags);
2220 
2221 		return (USB_SUCCESS);
2222 	}
2223 
2224 	if (pp->pp_state != EHCI_PIPE_STATE_ACTIVE) {
2225 
2226 		USB_DPRINTF_L2(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
2227 		    "ehci_stop_periodic_pipe_polling: "
2228 		    "Polling already stopped");
2229 
2230 		return (USB_SUCCESS);
2231 	}
2232 
2233 	/* Set pipe state to pipe stop polling */
2234 	pp->pp_state = EHCI_PIPE_STATE_STOP_POLLING;
2235 
2236 	ehci_pipe_cleanup(ehcip, ph);
2237 
2238 	return (USB_SUCCESS);
2239 }
2240 
2241 
2242 /*
2243  * ehci_insert_qtd:
2244  *
2245  * Insert a Transfer Descriptor (QTD) on an Endpoint Descriptor (QH).
2246  * Always returns USB_SUCCESS for now.	Once Isoch has been implemented,
2247  * it may return USB_FAILURE.
2248  */
2249 int
2250 ehci_insert_qtd(
2251 	ehci_state_t		*ehcip,
2252 	uint32_t		qtd_ctrl,
2253 	size_t			qtd_dma_offs,
2254 	size_t			qtd_length,
2255 	uint32_t		qtd_ctrl_phase,
2256 	ehci_pipe_private_t	*pp,
2257 	ehci_trans_wrapper_t	*tw)
2258 {
2259 	ehci_qtd_t		*curr_dummy_qtd, *next_dummy_qtd;
2260 	ehci_qtd_t		*new_dummy_qtd;
2261 	ehci_qh_t		*qh = pp->pp_qh;
2262 	int			error = USB_SUCCESS;
2263 
2264 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2265 
2266 	/* Allocate new dummy QTD */
2267 	new_dummy_qtd = tw->tw_qtd_free_list;
2268 
2269 	ASSERT(new_dummy_qtd != NULL);
2270 	tw->tw_qtd_free_list = ehci_qtd_iommu_to_cpu(ehcip,
2271 	    Get_QTD(new_dummy_qtd->qtd_tw_next_qtd));
2272 	Set_QTD(new_dummy_qtd->qtd_tw_next_qtd, NULL);
2273 
2274 	/* Get the current and next dummy QTDs */
2275 	curr_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2276 	    Get_QH(qh->qh_dummy_qtd));
2277 	next_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2278 	    Get_QTD(curr_dummy_qtd->qtd_next_qtd));
2279 
2280 	/* Update QH's dummy qtd field */
2281 	Set_QH(qh->qh_dummy_qtd, ehci_qtd_cpu_to_iommu(ehcip, next_dummy_qtd));
2282 
2283 	/* Update next dummy's next qtd pointer */
2284 	Set_QTD(next_dummy_qtd->qtd_next_qtd,
2285 	    ehci_qtd_cpu_to_iommu(ehcip, new_dummy_qtd));
2286 
2287 	/*
2288 	 * Fill in the current dummy qtd and
2289 	 * add the new dummy to the end.
2290 	 */
2291 	ehci_fill_in_qtd(ehcip, curr_dummy_qtd, qtd_ctrl,
2292 	    qtd_dma_offs, qtd_length, qtd_ctrl_phase, pp, tw);
2293 
2294 	/* Insert this qtd onto the tw */
2295 	ehci_insert_qtd_on_tw(ehcip, tw, curr_dummy_qtd);
2296 
2297 	/*
2298 	 * Insert this qtd onto active qtd list.
2299 	 * Don't insert polled mode qtd here.
2300 	 */
2301 	if (pp->pp_flag != EHCI_POLLED_MODE_FLAG) {
2302 		/* Insert this qtd onto active qtd list */
2303 		ehci_insert_qtd_into_active_qtd_list(ehcip, curr_dummy_qtd);
2304 	}
2305 
2306 	/* Print qh and qtd */
2307 	ehci_print_qh(ehcip, qh);
2308 	ehci_print_qtd(ehcip, curr_dummy_qtd);
2309 
2310 	return (error);
2311 }
2312 
2313 
2314 /*
2315  * ehci_allocate_qtd_from_pool:
2316  *
2317  * Allocate a Transfer Descriptor (QTD) from the QTD buffer pool.
2318  */
2319 static ehci_qtd_t *
2320 ehci_allocate_qtd_from_pool(ehci_state_t	*ehcip)
2321 {
2322 	int		i, ctrl;
2323 	ehci_qtd_t	*qtd;
2324 
2325 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2326 
2327 	/*
2328 	 * Search for a blank Transfer Descriptor (QTD)
2329 	 * in the QTD buffer pool.
2330 	 */
2331 	for (i = 0; i < ehci_qtd_pool_size; i ++) {
2332 		ctrl = Get_QTD(ehcip->ehci_qtd_pool_addr[i].qtd_state);
2333 		if (ctrl == EHCI_QTD_FREE) {
2334 			break;
2335 		}
2336 	}
2337 
2338 	if (i >= ehci_qtd_pool_size) {
2339 		USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2340 		    "ehci_allocate_qtd_from_pool: QTD exhausted");
2341 
2342 		return (NULL);
2343 	}
2344 
2345 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2346 	    "ehci_allocate_qtd_from_pool: Allocated %d", i);
2347 
2348 	/* Create a new dummy for the end of the QTD list */
2349 	qtd = &ehcip->ehci_qtd_pool_addr[i];
2350 
2351 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2352 	    "ehci_allocate_qtd_from_pool: qtd 0x%p", (void *)qtd);
2353 
2354 	/* Mark the newly allocated QTD as a dummy */
2355 	Set_QTD(qtd->qtd_state, EHCI_QTD_DUMMY);
2356 
2357 	/* Mark the status of this new QTD to halted state */
2358 	Set_QTD(qtd->qtd_ctrl, EHCI_QTD_CTRL_HALTED_XACT);
2359 
2360 	/* Disable dummy QTD's next and alternate next pointers */
2361 	Set_QTD(qtd->qtd_next_qtd, EHCI_QTD_NEXT_QTD_PTR_VALID);
2362 	Set_QTD(qtd->qtd_alt_next_qtd, EHCI_QTD_ALT_NEXT_QTD_PTR_VALID);
2363 
2364 	return (qtd);
2365 }
2366 
2367 
2368 /*
2369  * ehci_fill_in_qtd:
2370  *
2371  * Fill in the fields of a Transfer Descriptor (QTD).
2372  * The "Buffer Pointer" fields of a QTD are retrieved from the TW
2373  * it is associated with.
2374  *
2375  * Note:
2376  * qtd_dma_offs - the starting offset into the TW buffer, where the QTD
2377  *		  should transfer from. It should be 4K aligned. And when
2378  *		  a TW has more than one QTDs, the QTDs must be filled in
2379  *		  increasing order.
2380  * qtd_length - the total bytes to transfer.
2381  */
2382 /*ARGSUSED*/
2383 static void
2384 ehci_fill_in_qtd(
2385 	ehci_state_t		*ehcip,
2386 	ehci_qtd_t		*qtd,
2387 	uint32_t		qtd_ctrl,
2388 	size_t			qtd_dma_offs,
2389 	size_t			qtd_length,
2390 	uint32_t		qtd_ctrl_phase,
2391 	ehci_pipe_private_t	*pp,
2392 	ehci_trans_wrapper_t	*tw)
2393 {
2394 	uint32_t		buf_addr;
2395 	size_t			buf_len = qtd_length;
2396 	uint32_t		ctrl = qtd_ctrl;
2397 	uint_t			i = 0;
2398 	int			rem_len;
2399 
2400 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2401 	    "ehci_fill_in_qtd: qtd 0x%p ctrl 0x%x bufoffs 0x%lx "
2402 	    "len 0x%lx", (void *)qtd, qtd_ctrl, qtd_dma_offs, qtd_length);
2403 
2404 	/* Assert that the qtd to be filled in is a dummy */
2405 	ASSERT(Get_QTD(qtd->qtd_state) == EHCI_QTD_DUMMY);
2406 
2407 	/* Change QTD's state Active */
2408 	Set_QTD(qtd->qtd_state, EHCI_QTD_ACTIVE);
2409 
2410 	/* Set the total length data transfer */
2411 	ctrl |= (((qtd_length << EHCI_QTD_CTRL_BYTES_TO_XFER_SHIFT)
2412 	    & EHCI_QTD_CTRL_BYTES_TO_XFER) | EHCI_QTD_CTRL_MAX_ERR_COUNTS);
2413 
2414 	/*
2415 	 * QTDs must be filled in increasing DMA offset order.
2416 	 * tw_dma_offs is initialized to be 0 at TW creation and
2417 	 * is only increased in this function.
2418 	 */
2419 	ASSERT(buf_len == 0 || qtd_dma_offs >= tw->tw_dma_offs);
2420 
2421 	/*
2422 	 * Save the starting dma buffer offset used and
2423 	 * length of data that will be transfered in
2424 	 * the current QTD.
2425 	 */
2426 	Set_QTD(qtd->qtd_xfer_offs, qtd_dma_offs);
2427 	Set_QTD(qtd->qtd_xfer_len, buf_len);
2428 
2429 	while (buf_len) {
2430 		/*
2431 		 * Advance to the next DMA cookie until finding the cookie
2432 		 * that qtd_dma_offs falls in.
2433 		 * It is very likely this loop will never repeat more than
2434 		 * once. It is here just to accommodate the case qtd_dma_offs
2435 		 * is increased by multiple cookies during two consecutive
2436 		 * calls into this function. In that case, the interim DMA
2437 		 * buffer is allowed to be skipped.
2438 		 */
2439 		while ((tw->tw_dma_offs + tw->tw_cookie.dmac_size) <=
2440 		    qtd_dma_offs) {
2441 			/*
2442 			 * tw_dma_offs always points to the starting offset
2443 			 * of a cookie
2444 			 */
2445 			tw->tw_dma_offs += tw->tw_cookie.dmac_size;
2446 			ddi_dma_nextcookie(tw->tw_dmahandle, &tw->tw_cookie);
2447 			tw->tw_cookie_idx++;
2448 			ASSERT(tw->tw_cookie_idx < tw->tw_ncookies);
2449 		}
2450 
2451 		/*
2452 		 * Counting the remained buffer length to be filled in
2453 		 * the QTD for current DMA cookie
2454 		 */
2455 		rem_len = (tw->tw_dma_offs + tw->tw_cookie.dmac_size) -
2456 		    qtd_dma_offs;
2457 
2458 		/* Update the beginning of the buffer */
2459 		buf_addr = (qtd_dma_offs - tw->tw_dma_offs) +
2460 		    tw->tw_cookie.dmac_address;
2461 		ASSERT((buf_addr % EHCI_4K_ALIGN) == 0);
2462 		Set_QTD(qtd->qtd_buf[i], buf_addr);
2463 
2464 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2465 		    "ehci_fill_in_qtd: dmac_addr 0x%x dmac_size "
2466 		    "0x%lx idx %d", buf_addr, tw->tw_cookie.dmac_size,
2467 		    tw->tw_cookie_idx);
2468 
2469 		if (buf_len <= EHCI_MAX_QTD_BUF_SIZE) {
2470 			ASSERT(buf_len <= rem_len);
2471 			break;
2472 		} else {
2473 			ASSERT(rem_len >= EHCI_MAX_QTD_BUF_SIZE);
2474 			buf_len -= EHCI_MAX_QTD_BUF_SIZE;
2475 			qtd_dma_offs += EHCI_MAX_QTD_BUF_SIZE;
2476 		}
2477 
2478 		i++;
2479 	}
2480 
2481 	/*
2482 	 * Setup the alternate next qTD pointer if appropriate.  The alternate
2483 	 * qtd is currently pointing to a QTD that is not yet linked, but will
2484 	 * be in the very near future.	If a short_xfer occurs in this
2485 	 * situation , the HC will automatically skip this QH.	Eventually
2486 	 * everything will be placed and the alternate_qtd will be valid QTD.
2487 	 * For more information on alternate qtds look at section 3.5.2 in the
2488 	 * EHCI spec.
2489 	 */
2490 	if (tw->tw_alt_qtd != NULL) {
2491 		Set_QTD(qtd->qtd_alt_next_qtd,
2492 		    (ehci_qtd_cpu_to_iommu(ehcip, tw->tw_alt_qtd) &
2493 		    EHCI_QTD_ALT_NEXT_QTD_PTR));
2494 	}
2495 
2496 	/*
2497 	 * For control, bulk and interrupt QTD, now
2498 	 * enable current QTD by setting active bit.
2499 	 */
2500 	Set_QTD(qtd->qtd_ctrl, (ctrl | EHCI_QTD_CTRL_ACTIVE_XACT));
2501 
2502 	/*
2503 	 * For Control Xfer, qtd_ctrl_phase is a valid filed.
2504 	 */
2505 	if (qtd_ctrl_phase) {
2506 		Set_QTD(qtd->qtd_ctrl_phase, qtd_ctrl_phase);
2507 	}
2508 
2509 	/* Set the transfer wrapper */
2510 	ASSERT(tw != NULL);
2511 	ASSERT(tw->tw_id != NULL);
2512 
2513 	Set_QTD(qtd->qtd_trans_wrapper, (uint32_t)tw->tw_id);
2514 }
2515 
2516 
2517 /*
2518  * ehci_insert_qtd_on_tw:
2519  *
2520  * The transfer wrapper keeps a list of all Transfer Descriptors (QTD) that
2521  * are allocated for this transfer. Insert a QTD  onto this list. The  list
2522  * of QTD's does not include the dummy QTD that is at the end of the list of
2523  * QTD's for the endpoint.
2524  */
2525 static void
2526 ehci_insert_qtd_on_tw(
2527 	ehci_state_t		*ehcip,
2528 	ehci_trans_wrapper_t	*tw,
2529 	ehci_qtd_t		*qtd)
2530 {
2531 	/*
2532 	 * Set the next pointer to NULL because
2533 	 * this is the last QTD on list.
2534 	 */
2535 	Set_QTD(qtd->qtd_tw_next_qtd, NULL);
2536 
2537 	if (tw->tw_qtd_head == NULL) {
2538 		ASSERT(tw->tw_qtd_tail == NULL);
2539 		tw->tw_qtd_head = qtd;
2540 		tw->tw_qtd_tail = qtd;
2541 	} else {
2542 		ehci_qtd_t *dummy = (ehci_qtd_t *)tw->tw_qtd_tail;
2543 
2544 		ASSERT(dummy != NULL);
2545 		ASSERT(dummy != qtd);
2546 		ASSERT(Get_QTD(qtd->qtd_state) != EHCI_QTD_DUMMY);
2547 
2548 		/* Add the qtd to the end of the list */
2549 		Set_QTD(dummy->qtd_tw_next_qtd,
2550 		    ehci_qtd_cpu_to_iommu(ehcip, qtd));
2551 
2552 		tw->tw_qtd_tail = qtd;
2553 
2554 		ASSERT(Get_QTD(qtd->qtd_tw_next_qtd) == NULL);
2555 	}
2556 }
2557 
2558 
2559 /*
2560  * ehci_insert_qtd_into_active_qtd_list:
2561  *
2562  * Insert current QTD into active QTD list.
2563  */
2564 static void
2565 ehci_insert_qtd_into_active_qtd_list(
2566 	ehci_state_t		*ehcip,
2567 	ehci_qtd_t		*qtd)
2568 {
2569 	ehci_qtd_t		*curr_qtd, *next_qtd;
2570 
2571 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2572 
2573 	curr_qtd = ehcip->ehci_active_qtd_list;
2574 
2575 	/* Insert this QTD into QTD Active List */
2576 	if (curr_qtd) {
2577 		next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2578 		    Get_QTD(curr_qtd->qtd_active_qtd_next));
2579 
2580 		while (next_qtd) {
2581 			curr_qtd = next_qtd;
2582 			next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2583 			    Get_QTD(curr_qtd->qtd_active_qtd_next));
2584 		}
2585 
2586 		Set_QTD(qtd->qtd_active_qtd_prev,
2587 		    ehci_qtd_cpu_to_iommu(ehcip, curr_qtd));
2588 
2589 		Set_QTD(curr_qtd->qtd_active_qtd_next,
2590 		    ehci_qtd_cpu_to_iommu(ehcip, qtd));
2591 	} else {
2592 		ehcip->ehci_active_qtd_list = qtd;
2593 		Set_QTD(qtd->qtd_active_qtd_next, NULL);
2594 		Set_QTD(qtd->qtd_active_qtd_prev, NULL);
2595 	}
2596 }
2597 
2598 
2599 /*
2600  * ehci_remove_qtd_from_active_qtd_list:
2601  *
2602  * Remove current QTD from the active QTD list.
2603  *
2604  * NOTE: This function is also called from POLLED MODE.
2605  */
2606 void
2607 ehci_remove_qtd_from_active_qtd_list(
2608 	ehci_state_t		*ehcip,
2609 	ehci_qtd_t		*qtd)
2610 {
2611 	ehci_qtd_t		*curr_qtd, *prev_qtd, *next_qtd;
2612 
2613 	ASSERT(qtd != NULL);
2614 
2615 	curr_qtd = ehcip->ehci_active_qtd_list;
2616 
2617 	while ((curr_qtd) && (curr_qtd != qtd)) {
2618 		curr_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2619 		    Get_QTD(curr_qtd->qtd_active_qtd_next));
2620 	}
2621 
2622 	if ((curr_qtd) && (curr_qtd == qtd)) {
2623 		prev_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2624 		    Get_QTD(curr_qtd->qtd_active_qtd_prev));
2625 		next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2626 		    Get_QTD(curr_qtd->qtd_active_qtd_next));
2627 
2628 		if (prev_qtd) {
2629 			Set_QTD(prev_qtd->qtd_active_qtd_next,
2630 			    Get_QTD(curr_qtd->qtd_active_qtd_next));
2631 		} else {
2632 			ehcip->ehci_active_qtd_list = next_qtd;
2633 		}
2634 
2635 		if (next_qtd) {
2636 			Set_QTD(next_qtd->qtd_active_qtd_prev,
2637 			    Get_QTD(curr_qtd->qtd_active_qtd_prev));
2638 		}
2639 	} else {
2640 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2641 		    "ehci_remove_qtd_from_active_qtd_list: "
2642 		    "Unable to find QTD in active_qtd_list");
2643 	}
2644 }
2645 
2646 
2647 /*
2648  * ehci_traverse_qtds:
2649  *
2650  * Traverse the list of QTDs for given pipe using transfer wrapper.  Since
2651  * the endpoint is marked as Halted, the Host Controller (HC) is no longer
2652  * accessing these QTDs. Remove all the QTDs that are attached to endpoint.
2653  */
2654 static void
2655 ehci_traverse_qtds(
2656 	ehci_state_t		*ehcip,
2657 	usba_pipe_handle_data_t	*ph)
2658 {
2659 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2660 	ehci_trans_wrapper_t	*next_tw;
2661 	ehci_qtd_t		*qtd;
2662 	ehci_qtd_t		*next_qtd;
2663 
2664 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2665 
2666 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2667 	    "ehci_traverse_qtds:");
2668 
2669 	/* Process the transfer wrappers for this pipe */
2670 	next_tw = pp->pp_tw_head;
2671 
2672 	while (next_tw) {
2673 		/* Stop the the transfer timer */
2674 		ehci_stop_xfer_timer(ehcip, next_tw, EHCI_REMOVE_XFER_ALWAYS);
2675 
2676 		qtd = (ehci_qtd_t *)next_tw->tw_qtd_head;
2677 
2678 		/* Walk through each QTD for this transfer wrapper */
2679 		while (qtd) {
2680 			/* Remove this QTD from active QTD list */
2681 			ehci_remove_qtd_from_active_qtd_list(ehcip, qtd);
2682 
2683 			next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2684 			    Get_QTD(qtd->qtd_tw_next_qtd));
2685 
2686 			/* Deallocate this QTD */
2687 			ehci_deallocate_qtd(ehcip, qtd);
2688 
2689 			qtd = next_qtd;
2690 		}
2691 
2692 		next_tw = next_tw->tw_next;
2693 	}
2694 
2695 	/* Clear current qtd pointer */
2696 	Set_QH(pp->pp_qh->qh_curr_qtd, (uint32_t)0x00000000);
2697 
2698 	/* Update the next qtd pointer in the QH */
2699 	Set_QH(pp->pp_qh->qh_next_qtd, Get_QH(pp->pp_qh->qh_dummy_qtd));
2700 }
2701 
2702 
2703 /*
2704  * ehci_deallocate_qtd:
2705  *
2706  * Deallocate a Host Controller's (HC) Transfer Descriptor (QTD).
2707  *
2708  * NOTE: This function is also called from POLLED MODE.
2709  */
2710 void
2711 ehci_deallocate_qtd(
2712 	ehci_state_t		*ehcip,
2713 	ehci_qtd_t		*old_qtd)
2714 {
2715 	ehci_trans_wrapper_t	*tw = NULL;
2716 
2717 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2718 	    "ehci_deallocate_qtd: old_qtd = 0x%p", (void *)old_qtd);
2719 
2720 	/*
2721 	 * Obtain the transaction wrapper and tw will be
2722 	 * NULL for the dummy QTDs.
2723 	 */
2724 	if (Get_QTD(old_qtd->qtd_state) != EHCI_QTD_DUMMY) {
2725 		tw = (ehci_trans_wrapper_t *)
2726 		    EHCI_LOOKUP_ID((uint32_t)
2727 		    Get_QTD(old_qtd->qtd_trans_wrapper));
2728 
2729 		ASSERT(tw != NULL);
2730 	}
2731 
2732 	/*
2733 	 * If QTD's transfer wrapper is NULL, don't access its TW.
2734 	 * Just free the QTD.
2735 	 */
2736 	if (tw) {
2737 		ehci_qtd_t	*qtd, *next_qtd;
2738 
2739 		qtd = tw->tw_qtd_head;
2740 
2741 		if (old_qtd != qtd) {
2742 			next_qtd = ehci_qtd_iommu_to_cpu(
2743 			    ehcip, Get_QTD(qtd->qtd_tw_next_qtd));
2744 
2745 			while (next_qtd != old_qtd) {
2746 				qtd = next_qtd;
2747 				next_qtd = ehci_qtd_iommu_to_cpu(
2748 				    ehcip, Get_QTD(qtd->qtd_tw_next_qtd));
2749 			}
2750 
2751 			Set_QTD(qtd->qtd_tw_next_qtd, old_qtd->qtd_tw_next_qtd);
2752 
2753 			if (qtd->qtd_tw_next_qtd == NULL) {
2754 				tw->tw_qtd_tail = qtd;
2755 			}
2756 		} else {
2757 			tw->tw_qtd_head = ehci_qtd_iommu_to_cpu(
2758 			    ehcip, Get_QTD(old_qtd->qtd_tw_next_qtd));
2759 
2760 			if (tw->tw_qtd_head == NULL) {
2761 				tw->tw_qtd_tail = NULL;
2762 			}
2763 		}
2764 	}
2765 
2766 	bzero((void *)old_qtd, sizeof (ehci_qtd_t));
2767 	Set_QTD(old_qtd->qtd_state, EHCI_QTD_FREE);
2768 
2769 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2770 	    "Dealloc_qtd: qtd 0x%p", (void *)old_qtd);
2771 }
2772 
2773 
2774 /*
2775  * ehci_qtd_cpu_to_iommu:
2776  *
2777  * This function converts for the given Transfer Descriptor (QTD) CPU address
2778  * to IO address.
2779  *
2780  * NOTE: This function is also called from POLLED MODE.
2781  */
2782 uint32_t
2783 ehci_qtd_cpu_to_iommu(
2784 	ehci_state_t	*ehcip,
2785 	ehci_qtd_t	*addr)
2786 {
2787 	uint32_t	td;
2788 
2789 	td  = (uint32_t)ehcip->ehci_qtd_pool_cookie.dmac_address +
2790 	    (uint32_t)((uintptr_t)addr -
2791 	    (uintptr_t)(ehcip->ehci_qtd_pool_addr));
2792 
2793 	ASSERT((ehcip->ehci_qtd_pool_cookie.dmac_address +
2794 	    (uint32_t) (sizeof (ehci_qtd_t) *
2795 	    (addr - ehcip->ehci_qtd_pool_addr))) ==
2796 	    (ehcip->ehci_qtd_pool_cookie.dmac_address +
2797 	    (uint32_t)((uintptr_t)addr - (uintptr_t)
2798 	    (ehcip->ehci_qtd_pool_addr))));
2799 
2800 	ASSERT(td >= ehcip->ehci_qtd_pool_cookie.dmac_address);
2801 	ASSERT(td <= ehcip->ehci_qtd_pool_cookie.dmac_address +
2802 	    sizeof (ehci_qtd_t) * ehci_qtd_pool_size);
2803 
2804 	return (td);
2805 }
2806 
2807 
2808 /*
2809  * ehci_qtd_iommu_to_cpu:
2810  *
2811  * This function converts for the given Transfer Descriptor (QTD) IO address
2812  * to CPU address.
2813  *
2814  * NOTE: This function is also called from POLLED MODE.
2815  */
2816 ehci_qtd_t *
2817 ehci_qtd_iommu_to_cpu(
2818 	ehci_state_t	*ehcip,
2819 	uintptr_t	addr)
2820 {
2821 	ehci_qtd_t	*qtd;
2822 
2823 	if (addr == NULL) {
2824 
2825 		return (NULL);
2826 	}
2827 
2828 	qtd = (ehci_qtd_t *)((uintptr_t)
2829 	    (addr - ehcip->ehci_qtd_pool_cookie.dmac_address) +
2830 	    (uintptr_t)ehcip->ehci_qtd_pool_addr);
2831 
2832 	ASSERT(qtd >= ehcip->ehci_qtd_pool_addr);
2833 	ASSERT((uintptr_t)qtd <= (uintptr_t)ehcip->ehci_qtd_pool_addr +
2834 	    (uintptr_t)(sizeof (ehci_qtd_t) * ehci_qtd_pool_size));
2835 
2836 	return (qtd);
2837 }
2838 
2839 /*
2840  * ehci_allocate_tds_for_tw_resources:
2841  *
2842  * Allocate n Transfer Descriptors (TD) from the TD buffer pool and places it
2843  * into the TW.  Also chooses the correct alternate qtd when required.	It is
2844  * used for hardware short transfer support.  For more information on
2845  * alternate qtds look at section 3.5.2 in the EHCI spec.
2846  * Here is how each alternate qtd's are used:
2847  *
2848  * Bulk: used fully.
2849  * Intr: xfers only require 1 QTD, so alternate qtds are never used.
2850  * Ctrl: Should not use alternate QTD
2851  * Isoch: Doesn't support short_xfer nor does it use QTD
2852  *
2853  * Returns USB_NO_RESOURCES if it was not able to allocate all the requested TD
2854  * otherwise USB_SUCCESS.
2855  */
2856 int
2857 ehci_allocate_tds_for_tw(
2858 	ehci_state_t		*ehcip,
2859 	ehci_pipe_private_t	*pp,
2860 	ehci_trans_wrapper_t	*tw,
2861 	size_t			qtd_count)
2862 {
2863 	usb_ep_descr_t		*eptd = &pp->pp_pipe_handle->p_ep;
2864 	uchar_t			attributes;
2865 	ehci_qtd_t		*qtd;
2866 	uint32_t		qtd_addr;
2867 	int			i;
2868 	int			error = USB_SUCCESS;
2869 
2870 	attributes = eptd->bmAttributes & USB_EP_ATTR_MASK;
2871 
2872 	for (i = 0; i < qtd_count; i += 1) {
2873 		qtd = ehci_allocate_qtd_from_pool(ehcip);
2874 		if (qtd == NULL) {
2875 			error = USB_NO_RESOURCES;
2876 			USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2877 			    "ehci_allocate_qtds_for_tw: "
2878 			    "Unable to allocate %lu QTDs",
2879 			    qtd_count);
2880 			break;
2881 		}
2882 		if (i > 0) {
2883 			qtd_addr = ehci_qtd_cpu_to_iommu(ehcip,
2884 			    tw->tw_qtd_free_list);
2885 			Set_QTD(qtd->qtd_tw_next_qtd, qtd_addr);
2886 		}
2887 		tw->tw_qtd_free_list = qtd;
2888 
2889 		/*
2890 		 * Save the second one as a pointer to the new dummy 1.
2891 		 * It is used later for the alt_qtd_ptr.  Xfers with only
2892 		 * one qtd do not need alt_qtd_ptr.
2893 		 * The tds's are allocated and put into a stack, that is
2894 		 * why the second qtd allocated will turn out to be the
2895 		 * new dummy 1.
2896 		 */
2897 		if ((i == 1) && (attributes == USB_EP_ATTR_BULK)) {
2898 			tw->tw_alt_qtd = qtd;
2899 		}
2900 	}
2901 
2902 	return (error);
2903 }
2904 
2905 /*
2906  * ehci_allocate_tw_resources:
2907  *
2908  * Allocate a Transaction Wrapper (TW) and n Transfer Descriptors (QTD)
2909  * from the QTD buffer pool and places it into the TW.	It does an all
2910  * or nothing transaction.
2911  *
2912  * Returns NULL if there is insufficient resources otherwise TW.
2913  */
2914 static ehci_trans_wrapper_t *
2915 ehci_allocate_tw_resources(
2916 	ehci_state_t		*ehcip,
2917 	ehci_pipe_private_t	*pp,
2918 	size_t			tw_length,
2919 	usb_flags_t		usb_flags,
2920 	size_t			qtd_count)
2921 {
2922 	ehci_trans_wrapper_t	*tw;
2923 
2924 	tw = ehci_create_transfer_wrapper(ehcip, pp, tw_length, usb_flags);
2925 
2926 	if (tw == NULL) {
2927 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2928 		    "ehci_allocate_tw_resources: Unable to allocate TW");
2929 	} else {
2930 		if (ehci_allocate_tds_for_tw(ehcip, pp, tw, qtd_count) ==
2931 		    USB_SUCCESS) {
2932 			tw->tw_num_qtds = qtd_count;
2933 		} else {
2934 			ehci_deallocate_tw(ehcip, pp, tw);
2935 			tw = NULL;
2936 		}
2937 	}
2938 
2939 	return (tw);
2940 }
2941 
2942 
2943 /*
2944  * ehci_free_tw_td_resources:
2945  *
2946  * Free all allocated resources for Transaction Wrapper (TW).
2947  * Does not free the TW itself.
2948  *
2949  * Returns NULL if there is insufficient resources otherwise TW.
2950  */
2951 static void
2952 ehci_free_tw_td_resources(
2953 	ehci_state_t		*ehcip,
2954 	ehci_trans_wrapper_t	*tw)
2955 {
2956 	ehci_qtd_t		*qtd = NULL;
2957 	ehci_qtd_t		*temp_qtd = NULL;
2958 
2959 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2960 	    "ehci_free_tw_td_resources: tw = 0x%p", (void *)tw);
2961 
2962 	qtd = tw->tw_qtd_free_list;
2963 	while (qtd != NULL) {
2964 		/* Save the pointer to the next qtd before destroying it */
2965 		temp_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2966 		    Get_QTD(qtd->qtd_tw_next_qtd));
2967 		ehci_deallocate_qtd(ehcip, qtd);
2968 		qtd = temp_qtd;
2969 	}
2970 	tw->tw_qtd_free_list = NULL;
2971 }
2972 
2973 /*
2974  * Transfer Wrapper functions
2975  *
2976  * ehci_create_transfer_wrapper:
2977  *
2978  * Create a Transaction Wrapper (TW) and this involves the allocating of DMA
2979  * resources.
2980  */
2981 static ehci_trans_wrapper_t *
2982 ehci_create_transfer_wrapper(
2983 	ehci_state_t		*ehcip,
2984 	ehci_pipe_private_t	*pp,
2985 	size_t			length,
2986 	uint_t			usb_flags)
2987 {
2988 	ddi_device_acc_attr_t	dev_attr;
2989 	ddi_dma_attr_t		dma_attr;
2990 	int			result;
2991 	size_t			real_length;
2992 	ehci_trans_wrapper_t	*tw;
2993 	int			kmem_flag;
2994 	int			(*dmamem_wait)(caddr_t);
2995 
2996 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2997 	    "ehci_create_transfer_wrapper: length = 0x%lx flags = 0x%x",
2998 	    length, usb_flags);
2999 
3000 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3001 
3002 	/* SLEEP flag should not be used in interrupt context */
3003 	if (servicing_interrupt()) {
3004 		kmem_flag = KM_NOSLEEP;
3005 		dmamem_wait = DDI_DMA_DONTWAIT;
3006 	} else {
3007 		kmem_flag = KM_SLEEP;
3008 		dmamem_wait = DDI_DMA_SLEEP;
3009 	}
3010 
3011 	/* Allocate space for the transfer wrapper */
3012 	tw = kmem_zalloc(sizeof (ehci_trans_wrapper_t), kmem_flag);
3013 
3014 	if (tw == NULL) {
3015 		USB_DPRINTF_L2(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3016 		    "ehci_create_transfer_wrapper: kmem_zalloc failed");
3017 
3018 		return (NULL);
3019 	}
3020 
3021 	/* zero-length packet doesn't need to allocate dma memory */
3022 	if (length == 0) {
3023 
3024 		goto dmadone;
3025 	}
3026 
3027 	/* allow sg lists for transfer wrapper dma memory */
3028 	bcopy(&ehcip->ehci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t));
3029 	dma_attr.dma_attr_sgllen = EHCI_DMA_ATTR_TW_SGLLEN;
3030 	dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
3031 
3032 	/* Allocate the DMA handle */
3033 	result = ddi_dma_alloc_handle(ehcip->ehci_dip,
3034 	    &dma_attr, dmamem_wait, 0, &tw->tw_dmahandle);
3035 
3036 	if (result != DDI_SUCCESS) {
3037 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3038 		    "ehci_create_transfer_wrapper: Alloc handle failed");
3039 
3040 		kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3041 
3042 		return (NULL);
3043 	}
3044 
3045 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
3046 
3047 	/* no need for swapping the raw data */
3048 	dev_attr.devacc_attr_endian_flags  = DDI_NEVERSWAP_ACC;
3049 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
3050 
3051 	/* Allocate the memory */
3052 	result = ddi_dma_mem_alloc(tw->tw_dmahandle, length,
3053 	    &dev_attr, DDI_DMA_CONSISTENT, dmamem_wait, NULL,
3054 	    (caddr_t *)&tw->tw_buf, &real_length, &tw->tw_accesshandle);
3055 
3056 	if (result != DDI_SUCCESS) {
3057 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3058 		    "ehci_create_transfer_wrapper: dma_mem_alloc fail");
3059 
3060 		ddi_dma_free_handle(&tw->tw_dmahandle);
3061 		kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3062 
3063 		return (NULL);
3064 	}
3065 
3066 	ASSERT(real_length >= length);
3067 
3068 	/* Bind the handle */
3069 	result = ddi_dma_addr_bind_handle(tw->tw_dmahandle, NULL,
3070 	    (caddr_t)tw->tw_buf, real_length, DDI_DMA_RDWR|DDI_DMA_CONSISTENT,
3071 	    dmamem_wait, NULL, &tw->tw_cookie, &tw->tw_ncookies);
3072 
3073 	if (result != DDI_DMA_MAPPED) {
3074 		ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
3075 
3076 		ddi_dma_mem_free(&tw->tw_accesshandle);
3077 		ddi_dma_free_handle(&tw->tw_dmahandle);
3078 		kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3079 
3080 		return (NULL);
3081 	}
3082 
3083 	tw->tw_cookie_idx = 0;
3084 	tw->tw_dma_offs = 0;
3085 
3086 dmadone:
3087 	/*
3088 	 * Only allow one wrapper to be added at a time. Insert the
3089 	 * new transaction wrapper into the list for this pipe.
3090 	 */
3091 	if (pp->pp_tw_head == NULL) {
3092 		pp->pp_tw_head = tw;
3093 		pp->pp_tw_tail = tw;
3094 	} else {
3095 		pp->pp_tw_tail->tw_next = tw;
3096 		pp->pp_tw_tail = tw;
3097 	}
3098 
3099 	/* Store the transfer length */
3100 	tw->tw_length = length;
3101 
3102 	/* Store a back pointer to the pipe private structure */
3103 	tw->tw_pipe_private = pp;
3104 
3105 	/* Store the transfer type - synchronous or asynchronous */
3106 	tw->tw_flags = usb_flags;
3107 
3108 	/* Get and Store 32bit ID */
3109 	tw->tw_id = EHCI_GET_ID((void *)tw);
3110 
3111 	ASSERT(tw->tw_id != NULL);
3112 
3113 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3114 	    "ehci_create_transfer_wrapper: tw = 0x%p, ncookies = %u",
3115 	    (void *)tw, tw->tw_ncookies);
3116 
3117 	return (tw);
3118 }
3119 
3120 
3121 /*
3122  * ehci_start_xfer_timer:
3123  *
3124  * Start the timer for the control, bulk and for one time interrupt
3125  * transfers.
3126  */
3127 /* ARGSUSED */
3128 static void
3129 ehci_start_xfer_timer(
3130 	ehci_state_t		*ehcip,
3131 	ehci_pipe_private_t	*pp,
3132 	ehci_trans_wrapper_t	*tw)
3133 {
3134 	USB_DPRINTF_L3(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3135 	    "ehci_start_xfer_timer: tw = 0x%p", (void *)tw);
3136 
3137 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3138 
3139 	/*
3140 	 * The timeout handling is done only for control, bulk and for
3141 	 * one time Interrupt transfers.
3142 	 *
3143 	 * NOTE: If timeout is zero; Assume infinite timeout and don't
3144 	 * insert this transfer on the timeout list.
3145 	 */
3146 	if (tw->tw_timeout) {
3147 		/*
3148 		 * Add this transfer wrapper to the head of the pipe's
3149 		 * tw timeout list.
3150 		 */
3151 		if (pp->pp_timeout_list) {
3152 			tw->tw_timeout_next = pp->pp_timeout_list;
3153 		}
3154 
3155 		pp->pp_timeout_list = tw;
3156 		ehci_start_timer(ehcip, pp);
3157 	}
3158 }
3159 
3160 
3161 /*
3162  * ehci_stop_xfer_timer:
3163  *
3164  * Start the timer for the control, bulk and for one time interrupt
3165  * transfers.
3166  */
3167 void
3168 ehci_stop_xfer_timer(
3169 	ehci_state_t		*ehcip,
3170 	ehci_trans_wrapper_t	*tw,
3171 	uint_t			flag)
3172 {
3173 	ehci_pipe_private_t	*pp;
3174 	timeout_id_t		timer_id;
3175 
3176 	USB_DPRINTF_L3(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3177 	    "ehci_stop_xfer_timer: tw = 0x%p", (void *)tw);
3178 
3179 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3180 
3181 	/* Obtain the pipe private structure */
3182 	pp = tw->tw_pipe_private;
3183 
3184 	/* check if the timeout tw list is empty */
3185 	if (pp->pp_timeout_list == NULL) {
3186 
3187 		return;
3188 	}
3189 
3190 	switch (flag) {
3191 	case EHCI_REMOVE_XFER_IFLAST:
3192 		if (tw->tw_qtd_head != tw->tw_qtd_tail) {
3193 			break;
3194 		}
3195 
3196 		/* FALLTHRU */
3197 	case EHCI_REMOVE_XFER_ALWAYS:
3198 		ehci_remove_tw_from_timeout_list(ehcip, tw);
3199 
3200 		if ((pp->pp_timeout_list == NULL) &&
3201 		    (pp->pp_timer_id)) {
3202 
3203 			timer_id = pp->pp_timer_id;
3204 
3205 			/* Reset the timer id to zero */
3206 			pp->pp_timer_id = 0;
3207 
3208 			mutex_exit(&ehcip->ehci_int_mutex);
3209 
3210 			(void) untimeout(timer_id);
3211 
3212 			mutex_enter(&ehcip->ehci_int_mutex);
3213 		}
3214 		break;
3215 	default:
3216 		break;
3217 	}
3218 }
3219 
3220 
3221 /*
3222  * ehci_xfer_timeout_handler:
3223  *
3224  * Control or bulk transfer timeout handler.
3225  */
3226 static void
3227 ehci_xfer_timeout_handler(void *arg)
3228 {
3229 	usba_pipe_handle_data_t	*ph = (usba_pipe_handle_data_t *)arg;
3230 	ehci_state_t		*ehcip = ehci_obtain_state(
3231 	    ph->p_usba_device->usb_root_hub_dip);
3232 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3233 	ehci_trans_wrapper_t	*tw, *next;
3234 	ehci_trans_wrapper_t	*expire_xfer_list = NULL;
3235 	ehci_qtd_t		*qtd;
3236 
3237 	USB_DPRINTF_L4(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3238 	    "ehci_xfer_timeout_handler: ehcip = 0x%p, ph = 0x%p",
3239 	    (void *)ehcip, (void *)ph);
3240 
3241 	mutex_enter(&ehcip->ehci_int_mutex);
3242 
3243 	/*
3244 	 * Check whether still timeout handler is valid.
3245 	 */
3246 	if (pp->pp_timer_id != 0) {
3247 
3248 		/* Reset the timer id to zero */
3249 		pp->pp_timer_id = 0;
3250 	} else {
3251 		mutex_exit(&ehcip->ehci_int_mutex);
3252 
3253 		return;
3254 	}
3255 
3256 	/* Get the transfer timeout list head */
3257 	tw = pp->pp_timeout_list;
3258 
3259 	while (tw) {
3260 
3261 		/* Get the transfer on the timeout list */
3262 		next = tw->tw_timeout_next;
3263 
3264 		tw->tw_timeout--;
3265 
3266 		if (tw->tw_timeout <= 0) {
3267 
3268 			/* remove the tw from the timeout list */
3269 			ehci_remove_tw_from_timeout_list(ehcip, tw);
3270 
3271 			/* remove QTDs from active QTD list */
3272 			qtd = tw->tw_qtd_head;
3273 			while (qtd) {
3274 				ehci_remove_qtd_from_active_qtd_list(
3275 				    ehcip, qtd);
3276 
3277 				/* Get the next QTD from the wrapper */
3278 				qtd = ehci_qtd_iommu_to_cpu(ehcip,
3279 				    Get_QTD(qtd->qtd_tw_next_qtd));
3280 			}
3281 
3282 			/*
3283 			 * Preserve the order to the requests
3284 			 * started time sequence.
3285 			 */
3286 			tw->tw_timeout_next = expire_xfer_list;
3287 			expire_xfer_list = tw;
3288 		}
3289 
3290 		tw = next;
3291 	}
3292 
3293 	/*
3294 	 * The timer should be started before the callbacks.
3295 	 * There is always a chance that ehci interrupts come
3296 	 * in when we release the mutex while calling the tw back.
3297 	 * To keep an accurate timeout it should be restarted
3298 	 * as soon as possible.
3299 	 */
3300 	ehci_start_timer(ehcip, pp);
3301 
3302 	/* Get the expired transfer timeout list head */
3303 	tw = expire_xfer_list;
3304 
3305 	while (tw) {
3306 
3307 		/* Get the next tw on the expired transfer timeout list */
3308 		next = tw->tw_timeout_next;
3309 
3310 		/*
3311 		 * The error handle routine will release the mutex when
3312 		 * calling back to USBA. But this will not cause any race.
3313 		 * We do the callback and are relying on ehci_pipe_cleanup()
3314 		 * to halt the queue head and clean up since we should not
3315 		 * block in timeout context.
3316 		 */
3317 		ehci_handle_error(ehcip, tw->tw_qtd_head, USB_CR_TIMEOUT);
3318 
3319 		tw = next;
3320 	}
3321 	mutex_exit(&ehcip->ehci_int_mutex);
3322 }
3323 
3324 
3325 /*
3326  * ehci_remove_tw_from_timeout_list:
3327  *
3328  * Remove Control or bulk transfer from the timeout list.
3329  */
3330 static void
3331 ehci_remove_tw_from_timeout_list(
3332 	ehci_state_t		*ehcip,
3333 	ehci_trans_wrapper_t	*tw)
3334 {
3335 	ehci_pipe_private_t	*pp;
3336 	ehci_trans_wrapper_t	*prev, *next;
3337 
3338 	USB_DPRINTF_L3(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3339 	    "ehci_remove_tw_from_timeout_list: tw = 0x%p", (void *)tw);
3340 
3341 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3342 
3343 	/* Obtain the pipe private structure */
3344 	pp = tw->tw_pipe_private;
3345 
3346 	if (pp->pp_timeout_list) {
3347 		if (pp->pp_timeout_list == tw) {
3348 			pp->pp_timeout_list = tw->tw_timeout_next;
3349 
3350 			tw->tw_timeout_next = NULL;
3351 		} else {
3352 			prev = pp->pp_timeout_list;
3353 			next = prev->tw_timeout_next;
3354 
3355 			while (next && (next != tw)) {
3356 				prev = next;
3357 				next = next->tw_timeout_next;
3358 			}
3359 
3360 			if (next == tw) {
3361 				prev->tw_timeout_next =
3362 				    next->tw_timeout_next;
3363 				tw->tw_timeout_next = NULL;
3364 			}
3365 		}
3366 	}
3367 }
3368 
3369 
3370 /*
3371  * ehci_start_timer:
3372  *
3373  * Start the pipe's timer
3374  */
3375 static void
3376 ehci_start_timer(
3377 	ehci_state_t		*ehcip,
3378 	ehci_pipe_private_t	*pp)
3379 {
3380 	USB_DPRINTF_L4(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3381 	    "ehci_start_timer: ehcip = 0x%p, pp = 0x%p",
3382 	    (void *)ehcip, (void *)pp);
3383 
3384 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3385 
3386 	/*
3387 	 * Start the pipe's timer only if currently timer is not
3388 	 * running and if there are any transfers on the timeout
3389 	 * list. This timer will be per pipe.
3390 	 */
3391 	if ((!pp->pp_timer_id) && (pp->pp_timeout_list)) {
3392 		pp->pp_timer_id = timeout(ehci_xfer_timeout_handler,
3393 		    (void *)(pp->pp_pipe_handle), drv_usectohz(1000000));
3394 	}
3395 }
3396 
3397 /*
3398  * ehci_deallocate_tw:
3399  *
3400  * Deallocate of a Transaction Wrapper (TW) and this involves the freeing of
3401  * of DMA resources.
3402  */
3403 void
3404 ehci_deallocate_tw(
3405 	ehci_state_t		*ehcip,
3406 	ehci_pipe_private_t	*pp,
3407 	ehci_trans_wrapper_t	*tw)
3408 {
3409 	ehci_trans_wrapper_t	*prev, *next;
3410 
3411 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3412 	    "ehci_deallocate_tw: tw = 0x%p", (void *)tw);
3413 
3414 	/*
3415 	 * If the transfer wrapper has no Host Controller (HC)
3416 	 * Transfer Descriptors (QTD) associated with it,  then
3417 	 * remove the transfer wrapper.
3418 	 */
3419 	if (tw->tw_qtd_head) {
3420 		ASSERT(tw->tw_qtd_tail != NULL);
3421 
3422 		return;
3423 	}
3424 
3425 	ASSERT(tw->tw_qtd_tail == NULL);
3426 
3427 	/* Make sure we return all the unused qtd's to the pool as well */
3428 	ehci_free_tw_td_resources(ehcip, tw);
3429 
3430 	/*
3431 	 * If pp->pp_tw_head and pp->pp_tw_tail are pointing to
3432 	 * given TW then set the head and  tail  equal to NULL.
3433 	 * Otherwise search for this TW in the linked TW's list
3434 	 * and then remove this TW from the list.
3435 	 */
3436 	if (pp->pp_tw_head == tw) {
3437 		if (pp->pp_tw_tail == tw) {
3438 			pp->pp_tw_head = NULL;
3439 			pp->pp_tw_tail = NULL;
3440 		} else {
3441 			pp->pp_tw_head = tw->tw_next;
3442 		}
3443 	} else {
3444 		prev = pp->pp_tw_head;
3445 		next = prev->tw_next;
3446 
3447 		while (next && (next != tw)) {
3448 			prev = next;
3449 			next = next->tw_next;
3450 		}
3451 
3452 		if (next == tw) {
3453 			prev->tw_next = next->tw_next;
3454 
3455 			if (pp->pp_tw_tail == tw) {
3456 				pp->pp_tw_tail = prev;
3457 			}
3458 		}
3459 	}
3460 
3461 	/*
3462 	 * Make sure that, this TW has been removed
3463 	 * from the timeout list.
3464 	 */
3465 	ehci_remove_tw_from_timeout_list(ehcip, tw);
3466 
3467 	/* Deallocate this TW */
3468 	ehci_free_tw(ehcip, pp, tw);
3469 }
3470 
3471 
3472 /*
3473  * ehci_free_dma_resources:
3474  *
3475  * Free dma resources of a Transfer Wrapper (TW) and also free the TW.
3476  *
3477  * NOTE: This function is also called from POLLED MODE.
3478  */
3479 void
3480 ehci_free_dma_resources(
3481 	ehci_state_t		*ehcip,
3482 	usba_pipe_handle_data_t	*ph)
3483 {
3484 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3485 	ehci_trans_wrapper_t	*head_tw = pp->pp_tw_head;
3486 	ehci_trans_wrapper_t	*next_tw, *tw;
3487 
3488 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3489 	    "ehci_free_dma_resources: ph = 0x%p", (void *)ph);
3490 
3491 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3492 
3493 	/* Process the Transfer Wrappers */
3494 	next_tw = head_tw;
3495 	while (next_tw) {
3496 		tw = next_tw;
3497 		next_tw = tw->tw_next;
3498 
3499 		USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3500 		    "ehci_free_dma_resources: Free TW = 0x%p", (void *)tw);
3501 
3502 		ehci_free_tw(ehcip, pp, tw);
3503 	}
3504 
3505 	/* Adjust the head and tail pointers */
3506 	pp->pp_tw_head = NULL;
3507 	pp->pp_tw_tail = NULL;
3508 }
3509 
3510 
3511 /*
3512  * ehci_free_tw:
3513  *
3514  * Free the Transfer Wrapper (TW).
3515  */
3516 /*ARGSUSED*/
3517 static void
3518 ehci_free_tw(
3519 	ehci_state_t		*ehcip,
3520 	ehci_pipe_private_t	*pp,
3521 	ehci_trans_wrapper_t	*tw)
3522 {
3523 	int	rval;
3524 
3525 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3526 	    "ehci_free_tw: tw = 0x%p", (void *)tw);
3527 
3528 	ASSERT(tw != NULL);
3529 	ASSERT(tw->tw_id != NULL);
3530 
3531 	/* Free 32bit ID */
3532 	EHCI_FREE_ID((uint32_t)tw->tw_id);
3533 
3534 	if (tw->tw_dmahandle != NULL) {
3535 		rval = ddi_dma_unbind_handle(tw->tw_dmahandle);
3536 		ASSERT(rval == DDI_SUCCESS);
3537 
3538 		ddi_dma_mem_free(&tw->tw_accesshandle);
3539 		ddi_dma_free_handle(&tw->tw_dmahandle);
3540 	}
3541 
3542 	/* Free transfer wrapper */
3543 	kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3544 }
3545 
3546 
3547 /*
3548  * Miscellaneous functions
3549  */
3550 
3551 /*
3552  * ehci_allocate_intr_in_resource
3553  *
3554  * Allocate interrupt request structure for the interrupt IN transfer.
3555  */
3556 /*ARGSUSED*/
3557 int
3558 ehci_allocate_intr_in_resource(
3559 	ehci_state_t		*ehcip,
3560 	ehci_pipe_private_t	*pp,
3561 	ehci_trans_wrapper_t	*tw,
3562 	usb_flags_t		flags)
3563 {
3564 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
3565 	usb_intr_req_t		*curr_intr_reqp;
3566 	usb_opaque_t		client_periodic_in_reqp;
3567 	size_t			length = 0;
3568 
3569 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3570 	    "ehci_allocate_intr_in_resource:"
3571 	    "pp = 0x%p tw = 0x%p flags = 0x%x", (void *)pp, (void *)tw, flags);
3572 
3573 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3574 	ASSERT(tw->tw_curr_xfer_reqp == NULL);
3575 
3576 	/* Get the client periodic in request pointer */
3577 	client_periodic_in_reqp = pp->pp_client_periodic_in_reqp;
3578 
3579 	/*
3580 	 * If it a periodic IN request and periodic request is NULL,
3581 	 * allocate corresponding usb periodic IN request for the
3582 	 * current periodic polling request and copy the information
3583 	 * from the saved periodic request structure.
3584 	 */
3585 	if (client_periodic_in_reqp) {
3586 
3587 		/* Get the interrupt transfer length */
3588 		length = ((usb_intr_req_t *)
3589 		    client_periodic_in_reqp)->intr_len;
3590 
3591 		curr_intr_reqp = usba_hcdi_dup_intr_req(ph->p_dip,
3592 		    (usb_intr_req_t *)client_periodic_in_reqp, length, flags);
3593 	} else {
3594 		curr_intr_reqp = usb_alloc_intr_req(ph->p_dip, length, flags);
3595 	}
3596 
3597 	if (curr_intr_reqp == NULL) {
3598 
3599 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3600 		    "ehci_allocate_intr_in_resource: Interrupt"
3601 		    "request structure allocation failed");
3602 
3603 		return (USB_NO_RESOURCES);
3604 	}
3605 
3606 	/* For polled mode */
3607 	if (client_periodic_in_reqp == NULL) {
3608 		curr_intr_reqp->intr_attributes = USB_ATTRS_SHORT_XFER_OK;
3609 		curr_intr_reqp->intr_len = ph->p_ep.wMaxPacketSize;
3610 	} else {
3611 		/* Check and save the timeout value */
3612 		tw->tw_timeout = (curr_intr_reqp->intr_attributes &
3613 		    USB_ATTRS_ONE_XFER) ? curr_intr_reqp->intr_timeout: 0;
3614 	}
3615 
3616 	tw->tw_curr_xfer_reqp = (usb_opaque_t)curr_intr_reqp;
3617 	tw->tw_length = curr_intr_reqp->intr_len;
3618 
3619 	mutex_enter(&ph->p_mutex);
3620 	ph->p_req_count++;
3621 	mutex_exit(&ph->p_mutex);
3622 
3623 	pp->pp_state = EHCI_PIPE_STATE_ACTIVE;
3624 
3625 	return (USB_SUCCESS);
3626 }
3627 
3628 /*
3629  * ehci_pipe_cleanup
3630  *
3631  * Cleanup ehci pipe.
3632  */
3633 void
3634 ehci_pipe_cleanup(
3635 	ehci_state_t		*ehcip,
3636 	usba_pipe_handle_data_t	*ph)
3637 {
3638 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3639 	uint_t			pipe_state = pp->pp_state;
3640 	usb_cr_t		completion_reason;
3641 	usb_ep_descr_t		*eptd = &ph->p_ep;
3642 
3643 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3644 	    "ehci_pipe_cleanup: ph = 0x%p", (void *)ph);
3645 
3646 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3647 
3648 	if (EHCI_ISOC_ENDPOINT(eptd)) {
3649 		ehci_isoc_pipe_cleanup(ehcip, ph);
3650 
3651 		return;
3652 	}
3653 
3654 	ASSERT(!servicing_interrupt());
3655 
3656 	/*
3657 	 * Set the QH's status to Halt condition.
3658 	 * If another thread is halting this function will automatically
3659 	 * wait. If a pipe close happens at this time
3660 	 * we will be in lots of trouble.
3661 	 * If we are in an interrupt thread, don't halt, because it may
3662 	 * do a wait_for_sof.
3663 	 */
3664 	ehci_modify_qh_status_bit(ehcip, pp, SET_HALT);
3665 
3666 	/*
3667 	 * Wait for processing all completed transfers and
3668 	 * to send results to upstream.
3669 	 */
3670 	ehci_wait_for_transfers_completion(ehcip, pp);
3671 
3672 	/* Save the data toggle information */
3673 	ehci_save_data_toggle(ehcip, ph);
3674 
3675 	/*
3676 	 * Traverse the list of QTDs for this pipe using transfer
3677 	 * wrapper. Process these QTDs depending on their status.
3678 	 * And stop the timer of this pipe.
3679 	 */
3680 	ehci_traverse_qtds(ehcip, ph);
3681 
3682 	/* Make sure the timer is not running */
3683 	ASSERT(pp->pp_timer_id == 0);
3684 
3685 	/* Do callbacks for all unfinished requests */
3686 	ehci_handle_outstanding_requests(ehcip, pp);
3687 
3688 	/* Free DMA resources */
3689 	ehci_free_dma_resources(ehcip, ph);
3690 
3691 	switch (pipe_state) {
3692 	case EHCI_PIPE_STATE_CLOSE:
3693 		completion_reason = USB_CR_PIPE_CLOSING;
3694 		break;
3695 	case EHCI_PIPE_STATE_RESET:
3696 	case EHCI_PIPE_STATE_STOP_POLLING:
3697 		/* Set completion reason */
3698 		completion_reason = (pipe_state ==
3699 		    EHCI_PIPE_STATE_RESET) ?
3700 		    USB_CR_PIPE_RESET: USB_CR_STOPPED_POLLING;
3701 
3702 		/* Restore the data toggle information */
3703 		ehci_restore_data_toggle(ehcip, ph);
3704 
3705 		/*
3706 		 * Clear the halt bit to restart all the
3707 		 * transactions on this pipe.
3708 		 */
3709 		ehci_modify_qh_status_bit(ehcip, pp, CLEAR_HALT);
3710 
3711 		/* Set pipe state to idle */
3712 		pp->pp_state = EHCI_PIPE_STATE_IDLE;
3713 
3714 		break;
3715 	}
3716 
3717 	/*
3718 	 * Do the callback for the original client
3719 	 * periodic IN request.
3720 	 */
3721 	if ((EHCI_PERIODIC_ENDPOINT(eptd)) &&
3722 	    ((ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK) ==
3723 	    USB_EP_DIR_IN)) {
3724 
3725 		ehci_do_client_periodic_in_req_callback(
3726 		    ehcip, pp, completion_reason);
3727 	}
3728 }
3729 
3730 
3731 /*
3732  * ehci_wait_for_transfers_completion:
3733  *
3734  * Wait for processing all completed transfers and to send results
3735  * to upstream.
3736  */
3737 static void
3738 ehci_wait_for_transfers_completion(
3739 	ehci_state_t		*ehcip,
3740 	ehci_pipe_private_t	*pp)
3741 {
3742 	ehci_trans_wrapper_t	*next_tw = pp->pp_tw_head;
3743 	clock_t			xfer_cmpl_time_wait;
3744 	ehci_qtd_t		*qtd;
3745 
3746 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3747 	    ehcip->ehci_log_hdl,
3748 	    "ehci_wait_for_transfers_completion: pp = 0x%p", (void *)pp);
3749 
3750 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3751 
3752 	if ((ehci_state_is_operational(ehcip)) != USB_SUCCESS) {
3753 
3754 		return;
3755 	}
3756 
3757 	pp->pp_count_done_qtds = 0;
3758 
3759 	/* Process the transfer wrappers for this pipe */
3760 	while (next_tw) {
3761 		qtd = (ehci_qtd_t *)next_tw->tw_qtd_head;
3762 
3763 		/*
3764 		 * Walk through each QTD for this transfer wrapper.
3765 		 * If a QTD still exists, then it is either on done
3766 		 * list or on the QH's list.
3767 		 */
3768 		while (qtd) {
3769 			if (!(Get_QTD(qtd->qtd_ctrl) &
3770 			    EHCI_QTD_CTRL_ACTIVE_XACT)) {
3771 				pp->pp_count_done_qtds++;
3772 			}
3773 
3774 			qtd = ehci_qtd_iommu_to_cpu(ehcip,
3775 			    Get_QTD(qtd->qtd_tw_next_qtd));
3776 		}
3777 
3778 		next_tw = next_tw->tw_next;
3779 	}
3780 
3781 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3782 	    "ehci_wait_for_transfers_completion: count_done_qtds = 0x%x",
3783 	    pp->pp_count_done_qtds);
3784 
3785 	if (!pp->pp_count_done_qtds) {
3786 
3787 		return;
3788 	}
3789 
3790 	/* Get the number of clock ticks to wait */
3791 	xfer_cmpl_time_wait = drv_usectohz(EHCI_XFER_CMPL_TIMEWAIT * 1000000);
3792 
3793 	(void) cv_timedwait(&pp->pp_xfer_cmpl_cv,
3794 	    &ehcip->ehci_int_mutex,
3795 	    ddi_get_lbolt() + xfer_cmpl_time_wait);
3796 
3797 	if (pp->pp_count_done_qtds) {
3798 
3799 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3800 		    "ehci_wait_for_transfers_completion:"
3801 		    "No transfers completion confirmation received");
3802 	}
3803 }
3804 
3805 /*
3806  * ehci_check_for_transfers_completion:
3807  *
3808  * Check whether anybody is waiting for transfers completion event. If so, send
3809  * this event and also stop initiating any new transfers on this pipe.
3810  */
3811 void
3812 ehci_check_for_transfers_completion(
3813 	ehci_state_t		*ehcip,
3814 	ehci_pipe_private_t	*pp)
3815 {
3816 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3817 	    ehcip->ehci_log_hdl,
3818 	    "ehci_check_for_transfers_completion: pp = 0x%p", (void *)pp);
3819 
3820 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3821 
3822 	if ((pp->pp_state == EHCI_PIPE_STATE_STOP_POLLING) &&
3823 	    (pp->pp_error == USB_CR_NO_RESOURCES) &&
3824 	    (pp->pp_cur_periodic_req_cnt == 0)) {
3825 
3826 		/* Reset pipe error to zero */
3827 		pp->pp_error = 0;
3828 
3829 		/* Do callback for original request */
3830 		ehci_do_client_periodic_in_req_callback(
3831 		    ehcip, pp, USB_CR_NO_RESOURCES);
3832 	}
3833 
3834 	if (pp->pp_count_done_qtds) {
3835 
3836 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3837 		    "ehci_check_for_transfers_completion:"
3838 		    "count_done_qtds = 0x%x", pp->pp_count_done_qtds);
3839 
3840 		/* Decrement the done qtd count */
3841 		pp->pp_count_done_qtds--;
3842 
3843 		if (!pp->pp_count_done_qtds) {
3844 
3845 			USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3846 			    "ehci_check_for_transfers_completion:"
3847 			    "Sent transfers completion event pp = 0x%p",
3848 			    (void *)pp);
3849 
3850 			/* Send the transfer completion signal */
3851 			cv_signal(&pp->pp_xfer_cmpl_cv);
3852 		}
3853 	}
3854 }
3855 
3856 
3857 /*
3858  * ehci_save_data_toggle:
3859  *
3860  * Save the data toggle information.
3861  */
3862 static void
3863 ehci_save_data_toggle(
3864 	ehci_state_t		*ehcip,
3865 	usba_pipe_handle_data_t	*ph)
3866 {
3867 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3868 	usb_ep_descr_t		*eptd = &ph->p_ep;
3869 	uint_t			data_toggle;
3870 	usb_cr_t		error = pp->pp_error;
3871 	ehci_qh_t		*qh = pp->pp_qh;
3872 
3873 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3874 	    ehcip->ehci_log_hdl,
3875 	    "ehci_save_data_toggle: ph = 0x%p", (void *)ph);
3876 
3877 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3878 
3879 	/* Reset the pipe error value */
3880 	pp->pp_error = USB_CR_OK;
3881 
3882 	/* Return immediately if it is a control pipe */
3883 	if ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
3884 	    USB_EP_ATTR_CONTROL) {
3885 
3886 		return;
3887 	}
3888 
3889 	/* Get the data toggle information from the endpoint (QH) */
3890 	data_toggle = (Get_QH(qh->qh_status) &
3891 	    EHCI_QH_STS_DATA_TOGGLE)? DATA1:DATA0;
3892 
3893 	/*
3894 	 * If error is STALL, then, set
3895 	 * data toggle to zero.
3896 	 */
3897 	if (error == USB_CR_STALL) {
3898 		data_toggle = DATA0;
3899 	}
3900 
3901 	/*
3902 	 * Save the data toggle information
3903 	 * in the usb device structure.
3904 	 */
3905 	mutex_enter(&ph->p_mutex);
3906 	usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress,
3907 	    data_toggle);
3908 	mutex_exit(&ph->p_mutex);
3909 }
3910 
3911 
3912 /*
3913  * ehci_restore_data_toggle:
3914  *
3915  * Restore the data toggle information.
3916  */
3917 void
3918 ehci_restore_data_toggle(
3919 	ehci_state_t		*ehcip,
3920 	usba_pipe_handle_data_t	*ph)
3921 {
3922 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3923 	usb_ep_descr_t		*eptd = &ph->p_ep;
3924 	uint_t			data_toggle = 0;
3925 
3926 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3927 	    ehcip->ehci_log_hdl,
3928 	    "ehci_restore_data_toggle: ph = 0x%p", (void *)ph);
3929 
3930 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3931 
3932 	/* Return immediately if it is a control pipe */
3933 	if ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
3934 	    USB_EP_ATTR_CONTROL) {
3935 
3936 		return;
3937 	}
3938 
3939 	mutex_enter(&ph->p_mutex);
3940 
3941 	data_toggle = usba_hcdi_get_data_toggle(ph->p_usba_device,
3942 	    ph->p_ep.bEndpointAddress);
3943 	usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress,
3944 	    0);
3945 
3946 	mutex_exit(&ph->p_mutex);
3947 
3948 	/*
3949 	 * Restore the data toggle bit depending on the
3950 	 * previous data toggle information.
3951 	 */
3952 	if (data_toggle) {
3953 		Set_QH(pp->pp_qh->qh_status,
3954 		    Get_QH(pp->pp_qh->qh_status) | EHCI_QH_STS_DATA_TOGGLE);
3955 	} else {
3956 		Set_QH(pp->pp_qh->qh_status,
3957 		    Get_QH(pp->pp_qh->qh_status) & (~EHCI_QH_STS_DATA_TOGGLE));
3958 	}
3959 }
3960 
3961 
3962 /*
3963  * ehci_handle_outstanding_requests
3964  *
3965  * Deallocate interrupt request structure for the interrupt IN transfer.
3966  * Do the callbacks for all unfinished requests.
3967  *
3968  * NOTE: This function is also called from POLLED MODE.
3969  */
3970 void
3971 ehci_handle_outstanding_requests(
3972 	ehci_state_t		*ehcip,
3973 	ehci_pipe_private_t	*pp)
3974 {
3975 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
3976 	usb_ep_descr_t		*eptd = &ph->p_ep;
3977 	ehci_trans_wrapper_t	*curr_tw;
3978 	ehci_trans_wrapper_t	*next_tw;
3979 	usb_opaque_t		curr_xfer_reqp;
3980 
3981 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3982 	    ehcip->ehci_log_hdl,
3983 	    "ehci_handle_outstanding_requests: pp = 0x%p", (void *)pp);
3984 
3985 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3986 
3987 	/* Deallocate all pre-allocated interrupt requests */
3988 	next_tw = pp->pp_tw_head;
3989 
3990 	while (next_tw) {
3991 		curr_tw = next_tw;
3992 		next_tw = curr_tw->tw_next;
3993 
3994 		curr_xfer_reqp = curr_tw->tw_curr_xfer_reqp;
3995 
3996 		/* Deallocate current interrupt request */
3997 		if (curr_xfer_reqp) {
3998 
3999 			if ((EHCI_PERIODIC_ENDPOINT(eptd)) &&
4000 			    (curr_tw->tw_direction == EHCI_QTD_CTRL_IN_PID)) {
4001 
4002 				/* Decrement periodic in request count */
4003 				pp->pp_cur_periodic_req_cnt--;
4004 
4005 				ehci_deallocate_intr_in_resource(
4006 				    ehcip, pp, curr_tw);
4007 			} else {
4008 				ehci_hcdi_callback(ph, curr_tw, USB_CR_FLUSHED);
4009 			}
4010 		}
4011 	}
4012 }
4013 
4014 
4015 /*
4016  * ehci_deallocate_intr_in_resource
4017  *
4018  * Deallocate interrupt request structure for the interrupt IN transfer.
4019  */
4020 void
4021 ehci_deallocate_intr_in_resource(
4022 	ehci_state_t		*ehcip,
4023 	ehci_pipe_private_t	*pp,
4024 	ehci_trans_wrapper_t	*tw)
4025 {
4026 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
4027 	uchar_t			ep_attr = ph->p_ep.bmAttributes;
4028 	usb_opaque_t		curr_xfer_reqp;
4029 
4030 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
4031 	    ehcip->ehci_log_hdl,
4032 	    "ehci_deallocate_intr_in_resource: "
4033 	    "pp = 0x%p tw = 0x%p", (void *)pp, (void *)tw);
4034 
4035 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
4036 	ASSERT((ep_attr & USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR);
4037 
4038 	curr_xfer_reqp = tw->tw_curr_xfer_reqp;
4039 
4040 	/* Check the current periodic in request pointer */
4041 	if (curr_xfer_reqp) {
4042 
4043 		tw->tw_curr_xfer_reqp = NULL;
4044 
4045 		mutex_enter(&ph->p_mutex);
4046 		ph->p_req_count--;
4047 		mutex_exit(&ph->p_mutex);
4048 
4049 		/* Free pre-allocated interrupt requests */
4050 		usb_free_intr_req((usb_intr_req_t *)curr_xfer_reqp);
4051 
4052 		/* Set periodic in pipe state to idle */
4053 		pp->pp_state = EHCI_PIPE_STATE_IDLE;
4054 	}
4055 }
4056 
4057 
4058 /*
4059  * ehci_do_client_periodic_in_req_callback
4060  *
4061  * Do callback for the original client periodic IN request.
4062  */
4063 void
4064 ehci_do_client_periodic_in_req_callback(
4065 	ehci_state_t		*ehcip,
4066 	ehci_pipe_private_t	*pp,
4067 	usb_cr_t		completion_reason)
4068 {
4069 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
4070 	usb_ep_descr_t		*eptd = &ph->p_ep;
4071 
4072 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
4073 	    ehcip->ehci_log_hdl,
4074 	    "ehci_do_client_periodic_in_req_callback: "
4075 	    "pp = 0x%p cc = 0x%x", (void *)pp, completion_reason);
4076 
4077 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
4078 
4079 	/*
4080 	 * Check for Interrupt/Isochronous IN, whether we need to do
4081 	 * callback for the original client's periodic IN request.
4082 	 */
4083 	if (pp->pp_client_periodic_in_reqp) {
4084 		ASSERT(pp->pp_cur_periodic_req_cnt == 0);
4085 		if (EHCI_ISOC_ENDPOINT(eptd)) {
4086 			ehci_hcdi_isoc_callback(ph, NULL, completion_reason);
4087 		} else {
4088 			ehci_hcdi_callback(ph, NULL, completion_reason);
4089 		}
4090 	}
4091 }
4092 
4093 
4094 /*
4095  * ehci_hcdi_callback()
4096  *
4097  * Convenience wrapper around usba_hcdi_cb() other than root hub.
4098  */
4099 void
4100 ehci_hcdi_callback(
4101 	usba_pipe_handle_data_t	*ph,
4102 	ehci_trans_wrapper_t	*tw,
4103 	usb_cr_t		completion_reason)
4104 {
4105 	ehci_state_t		*ehcip = ehci_obtain_state(
4106 	    ph->p_usba_device->usb_root_hub_dip);
4107 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
4108 	usb_opaque_t		curr_xfer_reqp;
4109 	uint_t			pipe_state = 0;
4110 
4111 	USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
4112 	    "ehci_hcdi_callback: ph = 0x%p, tw = 0x%p, cr = 0x%x",
4113 	    (void *)ph, (void *)tw, completion_reason);
4114 
4115 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
4116 
4117 	/* Set the pipe state as per completion reason */
4118 	switch (completion_reason) {
4119 	case USB_CR_OK:
4120 		pipe_state = pp->pp_state;
4121 		break;
4122 	case USB_CR_NO_RESOURCES:
4123 	case USB_CR_NOT_SUPPORTED:
4124 	case USB_CR_PIPE_RESET:
4125 	case USB_CR_STOPPED_POLLING:
4126 		pipe_state = EHCI_PIPE_STATE_IDLE;
4127 		break;
4128 	case USB_CR_PIPE_CLOSING:
4129 		break;
4130 	default:
4131 		/* Set the pipe state to error */
4132 		pipe_state = EHCI_PIPE_STATE_ERROR;
4133 		pp->pp_error = completion_reason;
4134 		break;
4135 
4136 	}
4137 
4138 	pp->pp_state = pipe_state;
4139 
4140 	if (tw && tw->tw_curr_xfer_reqp) {
4141 		curr_xfer_reqp = tw->tw_curr_xfer_reqp;
4142 		tw->tw_curr_xfer_reqp = NULL;
4143 	} else {
4144 		ASSERT(pp->pp_client_periodic_in_reqp != NULL);
4145 
4146 		curr_xfer_reqp = pp->pp_client_periodic_in_reqp;
4147 		pp->pp_client_periodic_in_reqp = NULL;
4148 	}
4149 
4150 	ASSERT(curr_xfer_reqp != NULL);
4151 
4152 	mutex_exit(&ehcip->ehci_int_mutex);
4153 
4154 	usba_hcdi_cb(ph, curr_xfer_reqp, completion_reason);
4155 
4156 	mutex_enter(&ehcip->ehci_int_mutex);
4157 }
4158