xref: /illumos-gate/usr/src/uts/common/io/usb/hcd/ehci/ehci_xfer.c (revision 8d0c3d29bb99f6521f2dc5058a7e4debebad7899)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 /*
27  * EHCI Host Controller Driver (EHCI)
28  *
29  * The EHCI driver is a software driver which interfaces to the Universal
30  * Serial Bus layer (USBA) and the Host Controller (HC). The interface to
31  * the Host Controller is defined by the EHCI Host Controller Interface.
32  *
33  * This module contains the main EHCI driver code which handles all USB
34  * transfers, bandwidth allocations and other general functionalities.
35  */
36 
37 #include <sys/usb/hcd/ehci/ehcid.h>
38 #include <sys/usb/hcd/ehci/ehci_intr.h>
39 #include <sys/usb/hcd/ehci/ehci_util.h>
40 #include <sys/usb/hcd/ehci/ehci_isoch.h>
41 
42 /* Adjustable variables for the size of the pools */
43 extern int ehci_qh_pool_size;
44 extern int ehci_qtd_pool_size;
45 
46 
47 /* Endpoint Descriptor (QH) related functions */
48 ehci_qh_t	*ehci_alloc_qh(
49 				ehci_state_t		*ehcip,
50 				usba_pipe_handle_data_t	*ph,
51 				uint_t			flag);
52 static void	ehci_unpack_endpoint(
53 				ehci_state_t		*ehcip,
54 				usba_pipe_handle_data_t	*ph,
55 				ehci_qh_t		*qh);
56 void		ehci_insert_qh(
57 				ehci_state_t		*ehcip,
58 				usba_pipe_handle_data_t	*ph);
59 static void	ehci_insert_async_qh(
60 				ehci_state_t		*ehcip,
61 				ehci_pipe_private_t	*pp);
62 static void	ehci_insert_intr_qh(
63 				ehci_state_t		*ehcip,
64 				ehci_pipe_private_t	*pp);
65 static void	ehci_modify_qh_status_bit(
66 				ehci_state_t		*ehcip,
67 				ehci_pipe_private_t	*pp,
68 				halt_bit_t		action);
69 static void	ehci_halt_hs_qh(
70 				ehci_state_t		*ehcip,
71 				ehci_pipe_private_t	*pp,
72 				ehci_qh_t		*qh);
73 static void	ehci_halt_fls_ctrl_and_bulk_qh(
74 				ehci_state_t		*ehcip,
75 				ehci_pipe_private_t	*pp,
76 				ehci_qh_t		*qh);
77 static void	ehci_clear_tt_buffer(
78 				ehci_state_t		*ehcip,
79 				usba_pipe_handle_data_t	*ph,
80 				ehci_qh_t		*qh);
81 static void	ehci_halt_fls_intr_qh(
82 				ehci_state_t		*ehcip,
83 				ehci_qh_t		*qh);
84 void		ehci_remove_qh(
85 				ehci_state_t		*ehcip,
86 				ehci_pipe_private_t	*pp,
87 				boolean_t		reclaim);
88 static void	ehci_remove_async_qh(
89 				ehci_state_t		*ehcip,
90 				ehci_pipe_private_t	*pp,
91 				boolean_t		reclaim);
92 static void	ehci_remove_intr_qh(
93 				ehci_state_t		*ehcip,
94 				ehci_pipe_private_t	*pp,
95 				boolean_t		reclaim);
96 static void	ehci_insert_qh_on_reclaim_list(
97 				ehci_state_t		*ehcip,
98 				ehci_pipe_private_t	*pp);
99 void		ehci_deallocate_qh(
100 				ehci_state_t		*ehcip,
101 				ehci_qh_t		*old_qh);
102 uint32_t	ehci_qh_cpu_to_iommu(
103 				ehci_state_t		*ehcip,
104 				ehci_qh_t		*addr);
105 ehci_qh_t	*ehci_qh_iommu_to_cpu(
106 				ehci_state_t		*ehcip,
107 				uintptr_t		addr);
108 
109 /* Transfer Descriptor (QTD) related functions */
110 static int	ehci_initialize_dummy(
111 				ehci_state_t		*ehcip,
112 				ehci_qh_t		*qh);
113 ehci_trans_wrapper_t *ehci_allocate_ctrl_resources(
114 				ehci_state_t		*ehcip,
115 				ehci_pipe_private_t	*pp,
116 				usb_ctrl_req_t		*ctrl_reqp,
117 				usb_flags_t		usb_flags);
118 void		ehci_insert_ctrl_req(
119 				ehci_state_t		*ehcip,
120 				usba_pipe_handle_data_t	*ph,
121 				usb_ctrl_req_t		*ctrl_reqp,
122 				ehci_trans_wrapper_t	*tw,
123 				usb_flags_t		usb_flags);
124 ehci_trans_wrapper_t *ehci_allocate_bulk_resources(
125 				ehci_state_t		*ehcip,
126 				ehci_pipe_private_t	*pp,
127 				usb_bulk_req_t		*bulk_reqp,
128 				usb_flags_t		usb_flags);
129 void		ehci_insert_bulk_req(
130 				ehci_state_t		*ehcip,
131 				usba_pipe_handle_data_t	*ph,
132 				usb_bulk_req_t		*bulk_reqp,
133 				ehci_trans_wrapper_t	*tw,
134 				usb_flags_t		flags);
135 int		ehci_start_periodic_pipe_polling(
136 				ehci_state_t		*ehcip,
137 				usba_pipe_handle_data_t	*ph,
138 				usb_opaque_t		periodic_in_reqp,
139 				usb_flags_t		flags);
140 static int	ehci_start_pipe_polling(
141 				ehci_state_t		*ehcip,
142 				usba_pipe_handle_data_t	*ph,
143 				usb_flags_t		flags);
144 static int	ehci_start_intr_polling(
145 				ehci_state_t		*ehcip,
146 				usba_pipe_handle_data_t	*ph,
147 				usb_flags_t		flags);
148 static void	ehci_set_periodic_pipe_polling(
149 				ehci_state_t		*ehcip,
150 				usba_pipe_handle_data_t	*ph);
151 ehci_trans_wrapper_t *ehci_allocate_intr_resources(
152 				ehci_state_t		*ehcip,
153 				usba_pipe_handle_data_t	*ph,
154 				usb_intr_req_t		*intr_reqp,
155 				usb_flags_t		usb_flags);
156 void		ehci_insert_intr_req(
157 				ehci_state_t		*ehcip,
158 				ehci_pipe_private_t	*pp,
159 				ehci_trans_wrapper_t	*tw,
160 				usb_flags_t		flags);
161 int		ehci_stop_periodic_pipe_polling(
162 				ehci_state_t		*ehcip,
163 				usba_pipe_handle_data_t	*ph,
164 				usb_flags_t		flags);
165 int		ehci_insert_qtd(
166 				ehci_state_t		*ehcip,
167 				uint32_t		qtd_ctrl,
168 				size_t			qtd_dma_offs,
169 				size_t			qtd_length,
170 				uint32_t		qtd_ctrl_phase,
171 				ehci_pipe_private_t	*pp,
172 				ehci_trans_wrapper_t	*tw);
173 static ehci_qtd_t *ehci_allocate_qtd_from_pool(
174 				ehci_state_t		*ehcip);
175 static void	ehci_fill_in_qtd(
176 				ehci_state_t		*ehcip,
177 				ehci_qtd_t		*qtd,
178 				uint32_t		qtd_ctrl,
179 				size_t			qtd_dma_offs,
180 				size_t			qtd_length,
181 				uint32_t		qtd_ctrl_phase,
182 				ehci_pipe_private_t	*pp,
183 				ehci_trans_wrapper_t	*tw);
184 static void	ehci_insert_qtd_on_tw(
185 				ehci_state_t		*ehcip,
186 				ehci_trans_wrapper_t	*tw,
187 				ehci_qtd_t		*qtd);
188 static void	ehci_insert_qtd_into_active_qtd_list(
189 				ehci_state_t		*ehcip,
190 				ehci_qtd_t		*curr_qtd);
191 void		ehci_remove_qtd_from_active_qtd_list(
192 				ehci_state_t		*ehcip,
193 				ehci_qtd_t		*curr_qtd);
194 static void	ehci_traverse_qtds(
195 				ehci_state_t		*ehcip,
196 				usba_pipe_handle_data_t	*ph);
197 void		ehci_deallocate_qtd(
198 				ehci_state_t		*ehcip,
199 				ehci_qtd_t		*old_qtd);
200 uint32_t	ehci_qtd_cpu_to_iommu(
201 				ehci_state_t		*ehcip,
202 				ehci_qtd_t		*addr);
203 ehci_qtd_t	*ehci_qtd_iommu_to_cpu(
204 				ehci_state_t		*ehcip,
205 				uintptr_t		addr);
206 
207 /* Transfer Wrapper (TW) functions */
208 static ehci_trans_wrapper_t  *ehci_create_transfer_wrapper(
209 				ehci_state_t		*ehcip,
210 				ehci_pipe_private_t	*pp,
211 				size_t			length,
212 				uint_t			usb_flags);
213 int		ehci_allocate_tds_for_tw(
214 				ehci_state_t		*ehcip,
215 				ehci_pipe_private_t	*pp,
216 				ehci_trans_wrapper_t	*tw,
217 				size_t			qtd_count);
218 static ehci_trans_wrapper_t  *ehci_allocate_tw_resources(
219 				ehci_state_t		*ehcip,
220 				ehci_pipe_private_t	*pp,
221 				size_t			length,
222 				usb_flags_t		usb_flags,
223 				size_t			td_count);
224 static void	ehci_free_tw_td_resources(
225 				ehci_state_t		*ehcip,
226 				ehci_trans_wrapper_t	*tw);
227 static void	ehci_start_xfer_timer(
228 				ehci_state_t		*ehcip,
229 				ehci_pipe_private_t	*pp,
230 				ehci_trans_wrapper_t	*tw);
231 void		ehci_stop_xfer_timer(
232 				ehci_state_t		*ehcip,
233 				ehci_trans_wrapper_t	*tw,
234 				uint_t			flag);
235 static void	ehci_xfer_timeout_handler(void		*arg);
236 static void	ehci_remove_tw_from_timeout_list(
237 				ehci_state_t		*ehcip,
238 				ehci_trans_wrapper_t	*tw);
239 static void	ehci_start_timer(ehci_state_t		*ehcip,
240 				ehci_pipe_private_t	*pp);
241 void		ehci_deallocate_tw(
242 				ehci_state_t		*ehcip,
243 				ehci_pipe_private_t	*pp,
244 				ehci_trans_wrapper_t	*tw);
245 void		ehci_free_dma_resources(
246 				ehci_state_t		*ehcip,
247 				usba_pipe_handle_data_t	*ph);
248 static void	ehci_free_tw(
249 				ehci_state_t		*ehcip,
250 				ehci_pipe_private_t	*pp,
251 				ehci_trans_wrapper_t	*tw);
252 
253 /* Miscellaneous functions */
254 int		ehci_allocate_intr_in_resource(
255 				ehci_state_t		*ehcip,
256 				ehci_pipe_private_t	*pp,
257 				ehci_trans_wrapper_t	*tw,
258 				usb_flags_t		flags);
259 void		ehci_pipe_cleanup(
260 				ehci_state_t		*ehcip,
261 				usba_pipe_handle_data_t	*ph);
262 static void	ehci_wait_for_transfers_completion(
263 				ehci_state_t		*ehcip,
264 				ehci_pipe_private_t	*pp);
265 void		ehci_check_for_transfers_completion(
266 				ehci_state_t		*ehcip,
267 				ehci_pipe_private_t	*pp);
268 static void	ehci_save_data_toggle(
269 				ehci_state_t		*ehcip,
270 				usba_pipe_handle_data_t	*ph);
271 void		ehci_restore_data_toggle(
272 				ehci_state_t		*ehcip,
273 				usba_pipe_handle_data_t	*ph);
274 void		ehci_handle_outstanding_requests(
275 				ehci_state_t		*ehcip,
276 				ehci_pipe_private_t	*pp);
277 void		ehci_deallocate_intr_in_resource(
278 				ehci_state_t		*ehcip,
279 				ehci_pipe_private_t	*pp,
280 				ehci_trans_wrapper_t	*tw);
281 void		ehci_do_client_periodic_in_req_callback(
282 				ehci_state_t		*ehcip,
283 				ehci_pipe_private_t	*pp,
284 				usb_cr_t		completion_reason);
285 void		ehci_hcdi_callback(
286 				usba_pipe_handle_data_t	*ph,
287 				ehci_trans_wrapper_t	*tw,
288 				usb_cr_t		completion_reason);
289 
290 
291 /*
292  * Endpoint Descriptor (QH) manipulations functions
293  */
294 
295 /*
296  * ehci_alloc_qh:
297  *
298  * Allocate an endpoint descriptor (QH)
299  *
300  * NOTE: This function is also called from POLLED MODE.
301  */
302 ehci_qh_t *
303 ehci_alloc_qh(
304 	ehci_state_t		*ehcip,
305 	usba_pipe_handle_data_t	*ph,
306 	uint_t			flag)
307 {
308 	int			i, state;
309 	ehci_qh_t		*qh;
310 
311 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
312 	    "ehci_alloc_qh: ph = 0x%p flag = 0x%x", (void *)ph, flag);
313 
314 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
315 
316 	/*
317 	 * If this is for a ISOC endpoint return null.
318 	 * Isochronous uses ITD put directly onto the PFL.
319 	 */
320 	if (ph) {
321 		if (EHCI_ISOC_ENDPOINT((&ph->p_ep))) {
322 
323 			return (NULL);
324 		}
325 	}
326 
327 	/*
328 	 * The first 63 endpoints in the Endpoint Descriptor (QH)
329 	 * buffer pool are reserved for building interrupt lattice
330 	 * tree. Search for a blank endpoint descriptor in the QH
331 	 * buffer pool.
332 	 */
333 	for (i = EHCI_NUM_STATIC_NODES; i < ehci_qh_pool_size; i ++) {
334 		state = Get_QH(ehcip->ehci_qh_pool_addr[i].qh_state);
335 
336 		if (state == EHCI_QH_FREE) {
337 			break;
338 		}
339 	}
340 
341 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
342 	    "ehci_alloc_qh: Allocated %d", i);
343 
344 	if (i == ehci_qh_pool_size) {
345 		USB_DPRINTF_L2(PRINT_MASK_ALLOC,  ehcip->ehci_log_hdl,
346 		    "ehci_alloc_qh: QH exhausted");
347 
348 		return (NULL);
349 	} else {
350 		qh = &ehcip->ehci_qh_pool_addr[i];
351 		bzero((void *)qh, sizeof (ehci_qh_t));
352 
353 		USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
354 		    "ehci_alloc_qh: Allocated address 0x%p", (void *)qh);
355 
356 		/* Check polled mode flag */
357 		if (flag == EHCI_POLLED_MODE_FLAG) {
358 			Set_QH(qh->qh_link_ptr, EHCI_QH_LINK_PTR_VALID);
359 			Set_QH(qh->qh_ctrl, EHCI_QH_CTRL_ED_INACTIVATE);
360 		}
361 
362 		/* Unpack the endpoint descriptor into a control field */
363 		if (ph) {
364 			if ((ehci_initialize_dummy(ehcip,
365 			    qh)) == USB_NO_RESOURCES) {
366 
367 				Set_QH(qh->qh_state, EHCI_QH_FREE);
368 
369 				return (NULL);
370 			}
371 
372 			ehci_unpack_endpoint(ehcip, ph, qh);
373 
374 			Set_QH(qh->qh_curr_qtd, NULL);
375 			Set_QH(qh->qh_alt_next_qtd,
376 			    EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
377 
378 			/* Change QH's state Active */
379 			Set_QH(qh->qh_state, EHCI_QH_ACTIVE);
380 		} else {
381 			Set_QH(qh->qh_status, EHCI_QH_STS_HALTED);
382 
383 			/* Change QH's state Static */
384 			Set_QH(qh->qh_state, EHCI_QH_STATIC);
385 		}
386 
387 		ehci_print_qh(ehcip, qh);
388 
389 		return (qh);
390 	}
391 }
392 
393 
394 /*
395  * ehci_unpack_endpoint:
396  *
397  * Unpack the information in the pipe handle and create the first byte
398  * of the Host Controller's (HC) Endpoint Descriptor (QH).
399  */
400 static void
401 ehci_unpack_endpoint(
402 	ehci_state_t		*ehcip,
403 	usba_pipe_handle_data_t	*ph,
404 	ehci_qh_t		*qh)
405 {
406 	usb_ep_descr_t		*endpoint = &ph->p_ep;
407 	uint_t			maxpacketsize, addr, xactions;
408 	uint_t			ctrl = 0, status = 0, split_ctrl = 0;
409 	usb_port_status_t	usb_port_status;
410 	usba_device_t		*usba_device = ph->p_usba_device;
411 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
412 
413 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
414 	    "ehci_unpack_endpoint:");
415 
416 	mutex_enter(&usba_device->usb_mutex);
417 	ctrl = usba_device->usb_addr;
418 	usb_port_status = usba_device->usb_port_status;
419 	mutex_exit(&usba_device->usb_mutex);
420 
421 	addr = endpoint->bEndpointAddress;
422 
423 	/* Assign the endpoint's address */
424 	ctrl |= ((addr & USB_EP_NUM_MASK) << EHCI_QH_CTRL_ED_NUMBER_SHIFT);
425 
426 	/* Assign the speed */
427 	switch (usb_port_status) {
428 	case USBA_LOW_SPEED_DEV:
429 		ctrl |= EHCI_QH_CTRL_ED_LOW_SPEED;
430 		break;
431 	case USBA_FULL_SPEED_DEV:
432 		ctrl |= EHCI_QH_CTRL_ED_FULL_SPEED;
433 		break;
434 	case USBA_HIGH_SPEED_DEV:
435 		ctrl |= EHCI_QH_CTRL_ED_HIGH_SPEED;
436 		break;
437 	}
438 
439 	switch (endpoint->bmAttributes & USB_EP_ATTR_MASK) {
440 	case USB_EP_ATTR_CONTROL:
441 		/* Assign data toggle information */
442 		ctrl |= EHCI_QH_CTRL_DATA_TOGGLE;
443 
444 		if (usb_port_status != USBA_HIGH_SPEED_DEV) {
445 			ctrl |= EHCI_QH_CTRL_CONTROL_ED_FLAG;
446 		}
447 		/* FALLTHRU */
448 	case USB_EP_ATTR_BULK:
449 		/* Maximum nak counter */
450 		ctrl |= EHCI_QH_CTRL_MAX_NC;
451 
452 		if (usb_port_status == USBA_HIGH_SPEED_DEV) {
453 			/*
454 			 * Perform ping before executing control
455 			 * and bulk transactions.
456 			 */
457 			status = EHCI_QH_STS_DO_PING;
458 		}
459 		break;
460 	case USB_EP_ATTR_INTR:
461 		/* Set start split mask */
462 		split_ctrl = (pp->pp_smask & EHCI_QH_SPLIT_CTRL_INTR_MASK);
463 
464 		/*
465 		 * Set complete split mask for low/full speed
466 		 * usb devices.
467 		 */
468 		if (usb_port_status != USBA_HIGH_SPEED_DEV) {
469 			split_ctrl |= ((pp->pp_cmask <<
470 			    EHCI_QH_SPLIT_CTRL_COMP_SHIFT) &
471 			    EHCI_QH_SPLIT_CTRL_COMP_MASK);
472 		}
473 		break;
474 	}
475 
476 	/* Get the max transactions per microframe */
477 	xactions = (endpoint->wMaxPacketSize &
478 	    USB_EP_MAX_XACTS_MASK) >>  USB_EP_MAX_XACTS_SHIFT;
479 
480 	switch (xactions) {
481 	case 0:
482 		split_ctrl |= EHCI_QH_SPLIT_CTRL_1_XACTS;
483 		break;
484 	case 1:
485 		split_ctrl |= EHCI_QH_SPLIT_CTRL_2_XACTS;
486 		break;
487 	case 2:
488 		split_ctrl |= EHCI_QH_SPLIT_CTRL_3_XACTS;
489 		break;
490 	default:
491 		split_ctrl |= EHCI_QH_SPLIT_CTRL_1_XACTS;
492 		break;
493 	}
494 
495 	/*
496 	 * For low/full speed devices, program high speed hub
497 	 * address and port number.
498 	 */
499 	if (usb_port_status != USBA_HIGH_SPEED_DEV) {
500 		mutex_enter(&usba_device->usb_mutex);
501 		split_ctrl |= ((usba_device->usb_hs_hub_addr
502 		    << EHCI_QH_SPLIT_CTRL_HUB_ADDR_SHIFT) &
503 		    EHCI_QH_SPLIT_CTRL_HUB_ADDR);
504 
505 		split_ctrl |= ((usba_device->usb_hs_hub_port
506 		    << EHCI_QH_SPLIT_CTRL_HUB_PORT_SHIFT) &
507 		    EHCI_QH_SPLIT_CTRL_HUB_PORT);
508 
509 		mutex_exit(&usba_device->usb_mutex);
510 
511 		/* Set start split transaction state */
512 		status = EHCI_QH_STS_DO_START_SPLIT;
513 	}
514 
515 	/* Assign endpoint's maxpacketsize */
516 	maxpacketsize = endpoint->wMaxPacketSize & USB_EP_MAX_PKTSZ_MASK;
517 	maxpacketsize = maxpacketsize << EHCI_QH_CTRL_MAXPKTSZ_SHIFT;
518 	ctrl |= (maxpacketsize & EHCI_QH_CTRL_MAXPKTSZ);
519 
520 	Set_QH(qh->qh_ctrl, ctrl);
521 	Set_QH(qh->qh_split_ctrl, split_ctrl);
522 	Set_QH(qh->qh_status, status);
523 }
524 
525 
526 /*
527  * ehci_insert_qh:
528  *
529  * Add the Endpoint Descriptor (QH) into the Host Controller's
530  * (HC) appropriate endpoint list.
531  */
532 void
533 ehci_insert_qh(
534 	ehci_state_t		*ehcip,
535 	usba_pipe_handle_data_t	*ph)
536 {
537 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
538 
539 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
540 	    "ehci_insert_qh: qh=0x%p", (void *)pp->pp_qh);
541 
542 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
543 
544 	switch (ph->p_ep.bmAttributes & USB_EP_ATTR_MASK) {
545 	case USB_EP_ATTR_CONTROL:
546 	case USB_EP_ATTR_BULK:
547 		ehci_insert_async_qh(ehcip, pp);
548 		ehcip->ehci_open_async_count++;
549 		break;
550 	case USB_EP_ATTR_INTR:
551 		ehci_insert_intr_qh(ehcip, pp);
552 		ehcip->ehci_open_periodic_count++;
553 		break;
554 	case USB_EP_ATTR_ISOCH:
555 		/* ISOCH does not use QH, don't do anything but update count */
556 		ehcip->ehci_open_periodic_count++;
557 		break;
558 	}
559 }
560 
561 
562 /*
563  * ehci_insert_async_qh:
564  *
565  * Insert a control/bulk endpoint into the Host Controller's (HC)
566  * Asynchronous schedule endpoint list.
567  */
568 static void
569 ehci_insert_async_qh(
570 	ehci_state_t		*ehcip,
571 	ehci_pipe_private_t	*pp)
572 {
573 	ehci_qh_t		*qh = pp->pp_qh;
574 	ehci_qh_t		*async_head_qh;
575 	ehci_qh_t		*next_qh;
576 	uintptr_t		qh_addr;
577 
578 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
579 	    "ehci_insert_async_qh:");
580 
581 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
582 
583 	/* Make sure this QH is not already in the list */
584 	ASSERT((Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR) == NULL);
585 
586 	qh_addr = ehci_qh_cpu_to_iommu(ehcip, qh);
587 
588 	/* Obtain a ptr to the head of the Async schedule list */
589 	async_head_qh = ehcip->ehci_head_of_async_sched_list;
590 
591 	if (async_head_qh == NULL) {
592 		/* Set this QH to be the "head" of the circular list */
593 		Set_QH(qh->qh_ctrl,
594 		    (Get_QH(qh->qh_ctrl) | EHCI_QH_CTRL_RECLAIM_HEAD));
595 
596 		/* Set new QH's link and previous pointer to itself */
597 		Set_QH(qh->qh_link_ptr, qh_addr | EHCI_QH_LINK_REF_QH);
598 		Set_QH(qh->qh_prev, qh_addr);
599 
600 		ehcip->ehci_head_of_async_sched_list = qh;
601 
602 		/* Set the head ptr to the new endpoint */
603 		Set_OpReg(ehci_async_list_addr, qh_addr);
604 
605 		/*
606 		 * For some reason this register might get nulled out by
607 		 * the Uli M1575 South Bridge. To workaround the hardware
608 		 * problem, check the value after write and retry if the
609 		 * last write fails.
610 		 *
611 		 * If the ASYNCLISTADDR remains "stuck" after
612 		 * EHCI_MAX_RETRY retries, then the M1575 is broken
613 		 * and is stuck in an inconsistent state and is about
614 		 * to crash the machine with a trn_oor panic when it
615 		 * does a DMA read from 0x0.  It is better to panic
616 		 * now rather than wait for the trn_oor crash; this
617 		 * way Customer Service will have a clean signature
618 		 * that indicts the M1575 chip rather than a
619 		 * mysterious and hard-to-diagnose trn_oor panic.
620 		 */
621 		if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
622 		    (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
623 		    (qh_addr != Get_OpReg(ehci_async_list_addr))) {
624 			int retry = 0;
625 
626 			Set_OpRegRetry(ehci_async_list_addr, qh_addr, retry);
627 			if (retry >= EHCI_MAX_RETRY)
628 				cmn_err(CE_PANIC, "ehci_insert_async_qh:"
629 				    " ASYNCLISTADDR write failed.");
630 
631 			USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
632 			    "ehci_insert_async_qh: ASYNCLISTADDR "
633 			    "write failed, retry=%d", retry);
634 		}
635 	} else {
636 		ASSERT(Get_QH(async_head_qh->qh_ctrl) &
637 		    EHCI_QH_CTRL_RECLAIM_HEAD);
638 
639 		/* Ensure this QH's "H" bit is not set */
640 		Set_QH(qh->qh_ctrl,
641 		    (Get_QH(qh->qh_ctrl) & ~EHCI_QH_CTRL_RECLAIM_HEAD));
642 
643 		next_qh = ehci_qh_iommu_to_cpu(ehcip,
644 		    Get_QH(async_head_qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
645 
646 		/* Set new QH's link and previous pointers */
647 		Set_QH(qh->qh_link_ptr,
648 		    Get_QH(async_head_qh->qh_link_ptr) | EHCI_QH_LINK_REF_QH);
649 		Set_QH(qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, async_head_qh));
650 
651 		/* Set next QH's prev pointer */
652 		Set_QH(next_qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, qh));
653 
654 		/* Set QH Head's link pointer points to new QH */
655 		Set_QH(async_head_qh->qh_link_ptr,
656 		    qh_addr | EHCI_QH_LINK_REF_QH);
657 	}
658 }
659 
660 
661 /*
662  * ehci_insert_intr_qh:
663  *
664  * Insert a interrupt endpoint into the Host Controller's (HC) interrupt
665  * lattice tree.
666  */
667 static void
668 ehci_insert_intr_qh(
669 	ehci_state_t		*ehcip,
670 	ehci_pipe_private_t	*pp)
671 {
672 	ehci_qh_t		*qh = pp->pp_qh;
673 	ehci_qh_t		*next_lattice_qh, *lattice_qh;
674 	uint_t			hnode;
675 
676 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
677 	    "ehci_insert_intr_qh:");
678 
679 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
680 
681 	/* Make sure this QH is not already in the list */
682 	ASSERT((Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR) == NULL);
683 
684 	/*
685 	 * The appropriate high speed node was found
686 	 * during the opening of the pipe.
687 	 */
688 	hnode = pp->pp_pnode;
689 
690 	/* Find the lattice endpoint */
691 	lattice_qh = &ehcip->ehci_qh_pool_addr[hnode];
692 
693 	/* Find the next lattice endpoint */
694 	next_lattice_qh = ehci_qh_iommu_to_cpu(
695 	    ehcip, (Get_QH(lattice_qh->qh_link_ptr) & EHCI_QH_LINK_PTR));
696 
697 	/* Update the previous pointer */
698 	Set_QH(qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, lattice_qh));
699 
700 	/* Check next_lattice_qh value */
701 	if (next_lattice_qh) {
702 		/* Update this qh to point to the next one in the lattice */
703 		Set_QH(qh->qh_link_ptr, Get_QH(lattice_qh->qh_link_ptr));
704 
705 		/* Update the previous pointer of qh->qh_link_ptr */
706 		if (Get_QH(next_lattice_qh->qh_state) != EHCI_QH_STATIC) {
707 			Set_QH(next_lattice_qh->qh_prev,
708 			    ehci_qh_cpu_to_iommu(ehcip, qh));
709 		}
710 	} else {
711 		/* Update qh's link pointer to terminate periodic list */
712 		Set_QH(qh->qh_link_ptr,
713 		    (Get_QH(lattice_qh->qh_link_ptr) | EHCI_QH_LINK_PTR_VALID));
714 	}
715 
716 	/* Insert this endpoint into the lattice */
717 	Set_QH(lattice_qh->qh_link_ptr,
718 	    (ehci_qh_cpu_to_iommu(ehcip, qh) | EHCI_QH_LINK_REF_QH));
719 }
720 
721 
722 /*
723  * ehci_modify_qh_status_bit:
724  *
725  * Modify the halt bit on the Host Controller (HC) Endpoint Descriptor (QH).
726  *
727  * If several threads try to halt the same pipe, they will need to wait on
728  * a condition variable.  Only one thread is allowed to halt or unhalt the
729  * pipe at a time.
730  *
731  * Usually after a halt pipe, an unhalt pipe will follow soon after.  There
732  * is an assumption that an Unhalt pipe will never occur without a halt pipe.
733  */
734 static void
735 ehci_modify_qh_status_bit(
736 	ehci_state_t		*ehcip,
737 	ehci_pipe_private_t	*pp,
738 	halt_bit_t		action)
739 {
740 	ehci_qh_t		*qh = pp->pp_qh;
741 	uint_t			smask, eps, split_intr_qh;
742 	uint_t			status;
743 
744 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
745 	    "ehci_modify_qh_status_bit: action=0x%x qh=0x%p",
746 	    action, (void *)qh);
747 
748 	ehci_print_qh(ehcip, qh);
749 
750 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
751 
752 	/*
753 	 * If this pipe is in the middle of halting don't allow another
754 	 * thread to come in and modify the same pipe.
755 	 */
756 	while (pp->pp_halt_state & EHCI_HALT_STATE_HALTING) {
757 
758 		cv_wait(&pp->pp_halt_cmpl_cv,
759 		    &ehcip->ehci_int_mutex);
760 	}
761 
762 	/* Sync the QH QTD pool to get up to date information */
763 	Sync_QH_QTD_Pool(ehcip);
764 
765 
766 	if (action == CLEAR_HALT) {
767 		/*
768 		 * If the halt bit is to be cleared, just clear it.
769 		 * there shouldn't be any race condition problems.
770 		 * If the host controller reads the bit before the
771 		 * driver has a chance to set the bit, the bit will
772 		 * be reread on the next frame.
773 		 */
774 		Set_QH(qh->qh_ctrl,
775 		    (Get_QH(qh->qh_ctrl) & ~EHCI_QH_CTRL_ED_INACTIVATE));
776 		Set_QH(qh->qh_status,
777 		    Get_QH(qh->qh_status) & ~(EHCI_QH_STS_XACT_STATUS));
778 
779 		goto success;
780 	}
781 
782 	/* Halt the the QH, but first check to see if it is already halted */
783 	status = Get_QH(qh->qh_status);
784 	if (!(status & EHCI_QH_STS_HALTED)) {
785 		/* Indicate that this pipe is in the middle of halting. */
786 		pp->pp_halt_state |= EHCI_HALT_STATE_HALTING;
787 
788 		/*
789 		 * Find out if this is an full/low speed interrupt endpoint.
790 		 * A non-zero Cmask indicates that this QH is an interrupt
791 		 * endpoint.  Check the endpoint speed to see if it is either
792 		 * FULL or LOW .
793 		 */
794 		smask = Get_QH(qh->qh_split_ctrl) &
795 		    EHCI_QH_SPLIT_CTRL_INTR_MASK;
796 		eps = Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_ED_SPEED;
797 		split_intr_qh = ((smask != 0) &&
798 		    (eps != EHCI_QH_CTRL_ED_HIGH_SPEED));
799 
800 		if (eps == EHCI_QH_CTRL_ED_HIGH_SPEED) {
801 			ehci_halt_hs_qh(ehcip, pp, qh);
802 		} else {
803 			if (split_intr_qh) {
804 				ehci_halt_fls_intr_qh(ehcip, qh);
805 			} else {
806 				ehci_halt_fls_ctrl_and_bulk_qh(ehcip, pp, qh);
807 			}
808 		}
809 
810 		/* Indicate that this pipe is not in the middle of halting. */
811 		pp->pp_halt_state &= ~EHCI_HALT_STATE_HALTING;
812 	}
813 
814 	/* Sync the QH QTD pool again to get the most up to date information */
815 	Sync_QH_QTD_Pool(ehcip);
816 
817 	ehci_print_qh(ehcip, qh);
818 
819 	status = Get_QH(qh->qh_status);
820 	if (!(status & EHCI_QH_STS_HALTED)) {
821 		USB_DPRINTF_L1(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
822 		    "ehci_modify_qh_status_bit: Failed to halt qh=0x%p",
823 		    (void *)qh);
824 
825 		ehci_print_qh(ehcip, qh);
826 
827 		/* Set host controller soft state to error */
828 		ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
829 
830 		ASSERT(status & EHCI_QH_STS_HALTED);
831 	}
832 
833 success:
834 	/* Wake up threads waiting for this pipe to be halted. */
835 	cv_signal(&pp->pp_halt_cmpl_cv);
836 }
837 
838 
839 /*
840  * ehci_halt_hs_qh:
841  *
842  * Halts all types of HIGH SPEED QHs.
843  */
844 static void
845 ehci_halt_hs_qh(
846 	ehci_state_t		*ehcip,
847 	ehci_pipe_private_t	*pp,
848 	ehci_qh_t		*qh)
849 {
850 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
851 
852 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
853 	    "ehci_halt_hs_qh:");
854 
855 	/* Remove this qh from the HCD's view, but do not reclaim it */
856 	ehci_remove_qh(ehcip, pp, B_FALSE);
857 
858 	/*
859 	 * Wait for atleast one SOF, just in case the HCD is in the
860 	 * middle accessing this QH.
861 	 */
862 	(void) ehci_wait_for_sof(ehcip);
863 
864 	/* Sync the QH QTD pool to get up to date information */
865 	Sync_QH_QTD_Pool(ehcip);
866 
867 	/* Modify the status bit and halt this QH. */
868 	Set_QH(qh->qh_status,
869 	    ((Get_QH(qh->qh_status) &
870 	    ~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
871 
872 	/* Insert this QH back into the HCD's view */
873 	ehci_insert_qh(ehcip, ph);
874 }
875 
876 
877 /*
878  * ehci_halt_fls_ctrl_and_bulk_qh:
879  *
880  * Halts FULL/LOW Ctrl and Bulk QHs only.
881  */
882 static void
883 ehci_halt_fls_ctrl_and_bulk_qh(
884 	ehci_state_t		*ehcip,
885 	ehci_pipe_private_t	*pp,
886 	ehci_qh_t		*qh)
887 {
888 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
889 	uint_t			status, split_status, bytes_left;
890 
891 
892 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
893 	    "ehci_halt_fls_ctrl_and_bulk_qh:");
894 
895 	/* Remove this qh from the HCD's view, but do not reclaim it */
896 	ehci_remove_qh(ehcip, pp, B_FALSE);
897 
898 	/*
899 	 * Wait for atleast one SOF, just in case the HCD is in the
900 	 * middle accessing this QH.
901 	 */
902 	(void) ehci_wait_for_sof(ehcip);
903 
904 	/* Sync the QH QTD pool to get up to date information */
905 	Sync_QH_QTD_Pool(ehcip);
906 
907 	/* Modify the status bit and halt this QH. */
908 	Set_QH(qh->qh_status,
909 	    ((Get_QH(qh->qh_status) &
910 	    ~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
911 
912 	/* Check to see if the QH was in the middle of a transaction */
913 	status = Get_QH(qh->qh_status);
914 	split_status = status & EHCI_QH_STS_SPLIT_XSTATE;
915 	bytes_left = status & EHCI_QH_STS_BYTES_TO_XFER;
916 	if ((split_status == EHCI_QH_STS_DO_COMPLETE_SPLIT) &&
917 	    (bytes_left != 0)) {
918 		/* send ClearTTBuffer to this device's parent 2.0 hub */
919 		ehci_clear_tt_buffer(ehcip, ph, qh);
920 	}
921 
922 	/* Insert this QH back into the HCD's view */
923 	ehci_insert_qh(ehcip, ph);
924 }
925 
926 
927 /*
928  * ehci_clear_tt_buffer
929  *
930  * This function will sent a Clear_TT_Buffer request to the pipe's
931  * parent 2.0 hub.
932  */
933 static void
934 ehci_clear_tt_buffer(
935 	ehci_state_t		*ehcip,
936 	usba_pipe_handle_data_t	*ph,
937 	ehci_qh_t		*qh)
938 {
939 	usba_device_t		*usba_device;
940 	usba_device_t		*hub_usba_device;
941 	usb_pipe_handle_t	hub_def_ph;
942 	usb_ep_descr_t		*eptd;
943 	uchar_t			attributes;
944 	uint16_t		wValue;
945 	usb_ctrl_setup_t	setup;
946 	usb_cr_t		completion_reason;
947 	usb_cb_flags_t		cb_flags;
948 	int			retry;
949 
950 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
951 	    "ehci_clear_tt_buffer: ");
952 
953 	/* Get some information about the current pipe */
954 	usba_device = ph->p_usba_device;
955 	eptd = &ph->p_ep;
956 	attributes = eptd->bmAttributes & USB_EP_ATTR_MASK;
957 
958 	/*
959 	 * Create the wIndex for this request (usb spec 11.24.2.3)
960 	 * 3..0		Endpoint Number
961 	 * 10..4	Device Address
962 	 * 12..11	Endpoint Type
963 	 * 14..13	Reserved (must be 0)
964 	 * 15		Direction 1 = IN, 0 = OUT
965 	 */
966 	wValue = 0;
967 	if ((eptd->bEndpointAddress & USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
968 		wValue |= 0x8000;
969 	}
970 	wValue |= attributes << 11;
971 	wValue |= (Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_DEVICE_ADDRESS) << 4;
972 	wValue |= (Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_ED_HIGH_SPEED) >>
973 	    EHCI_QH_CTRL_ED_NUMBER_SHIFT;
974 
975 	mutex_exit(&ehcip->ehci_int_mutex);
976 
977 	/* Manually fill in the request. */
978 	setup.bmRequestType = EHCI_CLEAR_TT_BUFFER_REQTYPE;
979 	setup.bRequest = EHCI_CLEAR_TT_BUFFER_BREQ;
980 	setup.wValue = wValue;
981 	setup.wIndex = 1;
982 	setup.wLength = 0;
983 	setup.attrs = USB_ATTRS_NONE;
984 
985 	/* Get the usba_device of the parent 2.0 hub. */
986 	mutex_enter(&usba_device->usb_mutex);
987 	hub_usba_device = usba_device->usb_hs_hub_usba_dev;
988 	mutex_exit(&usba_device->usb_mutex);
989 
990 	/* Get the default ctrl pipe for the parent 2.0 hub */
991 	mutex_enter(&hub_usba_device->usb_mutex);
992 	hub_def_ph = (usb_pipe_handle_t)&hub_usba_device->usb_ph_list[0];
993 	mutex_exit(&hub_usba_device->usb_mutex);
994 
995 	for (retry = 0; retry < 3; retry++) {
996 
997 		/* sync send the request to the default pipe */
998 		if (usb_pipe_ctrl_xfer_wait(
999 		    hub_def_ph,
1000 		    &setup,
1001 		    NULL,
1002 		    &completion_reason, &cb_flags, 0) == USB_SUCCESS) {
1003 
1004 			break;
1005 		}
1006 
1007 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1008 		    "ehci_clear_tt_buffer: Failed to clear tt buffer,"
1009 		    "retry = %d, cr = %d, cb_flags = 0x%x\n",
1010 		    retry, completion_reason, cb_flags);
1011 	}
1012 
1013 	if (retry >= 3) {
1014 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1015 		dev_info_t *dip = hub_usba_device->usb_dip;
1016 
1017 		/*
1018 		 * Ask the user to hotplug the 2.0 hub, to make sure that
1019 		 * all the buffer is in sync since this command has failed.
1020 		 */
1021 		USB_DPRINTF_L0(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1022 		    "Error recovery failure: Please hotplug the 2.0 hub at"
1023 		    "%s", ddi_pathname(dip, path));
1024 
1025 		kmem_free(path, MAXPATHLEN);
1026 	}
1027 
1028 	mutex_enter(&ehcip->ehci_int_mutex);
1029 }
1030 
1031 /*
1032  * ehci_halt_fls_intr_qh:
1033  *
1034  * Halts FULL/LOW speed Intr QHs.
1035  */
1036 static void
1037 ehci_halt_fls_intr_qh(
1038 	ehci_state_t		*ehcip,
1039 	ehci_qh_t		*qh)
1040 {
1041 	usb_frame_number_t	starting_frame;
1042 	usb_frame_number_t	frames_past;
1043 	uint_t			status, i;
1044 
1045 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1046 	    "ehci_halt_fls_intr_qh:");
1047 
1048 	/*
1049 	 * Ask the HC to deactivate the QH in a
1050 	 * full/low periodic QH.
1051 	 */
1052 	Set_QH(qh->qh_ctrl,
1053 	    (Get_QH(qh->qh_ctrl) | EHCI_QH_CTRL_ED_INACTIVATE));
1054 
1055 	starting_frame = ehci_get_current_frame_number(ehcip);
1056 
1057 	/*
1058 	 * Wait at least EHCI_NUM_INTR_QH_LISTS+2 frame or until
1059 	 * the QH has been halted.
1060 	 */
1061 	Sync_QH_QTD_Pool(ehcip);
1062 	frames_past = 0;
1063 	status = Get_QH(qh->qh_status) & EHCI_QTD_CTRL_ACTIVE_XACT;
1064 
1065 	while ((frames_past <= (EHCI_NUM_INTR_QH_LISTS + 2)) &&
1066 	    (status != 0)) {
1067 
1068 		(void) ehci_wait_for_sof(ehcip);
1069 
1070 		Sync_QH_QTD_Pool(ehcip);
1071 		status = Get_QH(qh->qh_status) & EHCI_QTD_CTRL_ACTIVE_XACT;
1072 		frames_past = ehci_get_current_frame_number(ehcip) -
1073 		    starting_frame;
1074 	}
1075 
1076 	/* Modify the status bit and halt this QH. */
1077 	Sync_QH_QTD_Pool(ehcip);
1078 
1079 	status = Get_QH(qh->qh_status);
1080 
1081 	for (i = 0; i < EHCI_NUM_INTR_QH_LISTS; i++) {
1082 		Set_QH(qh->qh_status,
1083 		    ((Get_QH(qh->qh_status) &
1084 		    ~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
1085 
1086 		Sync_QH_QTD_Pool(ehcip);
1087 
1088 		(void) ehci_wait_for_sof(ehcip);
1089 		Sync_QH_QTD_Pool(ehcip);
1090 
1091 		if (Get_QH(qh->qh_status) & EHCI_QH_STS_HALTED) {
1092 
1093 			break;
1094 		}
1095 	}
1096 
1097 	Sync_QH_QTD_Pool(ehcip);
1098 
1099 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1100 	    "ehci_halt_fls_intr_qh: qh=0x%p frames past=%llu,"
1101 	    " status=0x%x, 0x%x", (void *)qh,
1102 	    (unsigned long long)(ehci_get_current_frame_number(ehcip) -
1103 	    starting_frame), status, Get_QH(qh->qh_status));
1104 }
1105 
1106 
1107 /*
1108  * ehci_remove_qh:
1109  *
1110  * Remove the Endpoint Descriptor (QH) from the Host Controller's appropriate
1111  * endpoint list.
1112  */
1113 void
1114 ehci_remove_qh(
1115 	ehci_state_t		*ehcip,
1116 	ehci_pipe_private_t	*pp,
1117 	boolean_t		reclaim)
1118 {
1119 	uchar_t			attributes;
1120 
1121 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1122 
1123 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1124 	    "ehci_remove_qh: qh=0x%p", (void *)pp->pp_qh);
1125 
1126 	attributes = pp->pp_pipe_handle->p_ep.bmAttributes & USB_EP_ATTR_MASK;
1127 
1128 	switch (attributes) {
1129 	case USB_EP_ATTR_CONTROL:
1130 	case USB_EP_ATTR_BULK:
1131 		ehci_remove_async_qh(ehcip, pp, reclaim);
1132 		ehcip->ehci_open_async_count--;
1133 		break;
1134 	case USB_EP_ATTR_INTR:
1135 		ehci_remove_intr_qh(ehcip, pp, reclaim);
1136 		ehcip->ehci_open_periodic_count--;
1137 		break;
1138 	case USB_EP_ATTR_ISOCH:
1139 		/* ISOCH does not use QH, don't do anything but update count */
1140 		ehcip->ehci_open_periodic_count--;
1141 		break;
1142 	}
1143 }
1144 
1145 
1146 /*
1147  * ehci_remove_async_qh:
1148  *
1149  * Remove a control/bulk endpoint into the Host Controller's (HC)
1150  * Asynchronous schedule endpoint list.
1151  */
1152 static void
1153 ehci_remove_async_qh(
1154 	ehci_state_t		*ehcip,
1155 	ehci_pipe_private_t	*pp,
1156 	boolean_t		reclaim)
1157 {
1158 	ehci_qh_t		*qh = pp->pp_qh; /* qh to be removed */
1159 	ehci_qh_t		*prev_qh, *next_qh;
1160 
1161 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1162 	    "ehci_remove_async_qh:");
1163 
1164 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1165 
1166 	prev_qh = ehci_qh_iommu_to_cpu(ehcip,
1167 	    Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR);
1168 	next_qh = ehci_qh_iommu_to_cpu(ehcip,
1169 	    Get_QH(qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
1170 
1171 	/* Make sure this QH is in the list */
1172 	ASSERT(prev_qh != NULL);
1173 
1174 	/*
1175 	 * If next QH and current QH are the same, then this is the last
1176 	 * QH on the Asynchronous Schedule list.
1177 	 */
1178 	if (qh == next_qh) {
1179 		ASSERT(Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_RECLAIM_HEAD);
1180 		/*
1181 		 * Null our pointer to the async sched list, but do not
1182 		 * touch the host controller's list_addr.
1183 		 */
1184 		ehcip->ehci_head_of_async_sched_list = NULL;
1185 		ASSERT(ehcip->ehci_open_async_count == 1);
1186 	} else {
1187 		/* If this QH is the HEAD then find another one to replace it */
1188 		if (ehcip->ehci_head_of_async_sched_list == qh) {
1189 
1190 			ASSERT(Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_RECLAIM_HEAD);
1191 			ehcip->ehci_head_of_async_sched_list = next_qh;
1192 			Set_QH(next_qh->qh_ctrl,
1193 			    Get_QH(next_qh->qh_ctrl) |
1194 			    EHCI_QH_CTRL_RECLAIM_HEAD);
1195 		}
1196 		Set_QH(prev_qh->qh_link_ptr, Get_QH(qh->qh_link_ptr));
1197 		Set_QH(next_qh->qh_prev, Get_QH(qh->qh_prev));
1198 	}
1199 
1200 	/* qh_prev to indicate it is no longer in the circular list */
1201 	Set_QH(qh->qh_prev, NULL);
1202 
1203 	if (reclaim) {
1204 		ehci_insert_qh_on_reclaim_list(ehcip, pp);
1205 	}
1206 }
1207 
1208 
1209 /*
1210  * ehci_remove_intr_qh:
1211  *
1212  * Set up an interrupt endpoint to be removed from the Host Controller's (HC)
1213  * interrupt lattice tree. The Endpoint Descriptor (QH) will be freed in the
1214  * interrupt handler.
1215  */
1216 static void
1217 ehci_remove_intr_qh(
1218 	ehci_state_t		*ehcip,
1219 	ehci_pipe_private_t	*pp,
1220 	boolean_t		reclaim)
1221 {
1222 	ehci_qh_t		*qh = pp->pp_qh; /* qh to be removed */
1223 	ehci_qh_t		*prev_qh, *next_qh;
1224 
1225 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1226 	    "ehci_remove_intr_qh:");
1227 
1228 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1229 
1230 	prev_qh = ehci_qh_iommu_to_cpu(ehcip, Get_QH(qh->qh_prev));
1231 	next_qh = ehci_qh_iommu_to_cpu(ehcip,
1232 	    Get_QH(qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
1233 
1234 	/* Make sure this QH is in the list */
1235 	ASSERT(prev_qh != NULL);
1236 
1237 	if (next_qh) {
1238 		/* Update previous qh's link pointer */
1239 		Set_QH(prev_qh->qh_link_ptr, Get_QH(qh->qh_link_ptr));
1240 
1241 		if (Get_QH(next_qh->qh_state) != EHCI_QH_STATIC) {
1242 			/* Set the previous pointer of the next one */
1243 			Set_QH(next_qh->qh_prev, Get_QH(qh->qh_prev));
1244 		}
1245 	} else {
1246 		/* Update previous qh's link pointer */
1247 		Set_QH(prev_qh->qh_link_ptr,
1248 		    (Get_QH(qh->qh_link_ptr) | EHCI_QH_LINK_PTR_VALID));
1249 	}
1250 
1251 	/* qh_prev to indicate it is no longer in the circular list */
1252 	Set_QH(qh->qh_prev, NULL);
1253 
1254 	if (reclaim) {
1255 		ehci_insert_qh_on_reclaim_list(ehcip, pp);
1256 	}
1257 }
1258 
1259 
1260 /*
1261  * ehci_insert_qh_on_reclaim_list:
1262  *
1263  * Insert Endpoint onto the reclaim list
1264  */
1265 static void
1266 ehci_insert_qh_on_reclaim_list(
1267 	ehci_state_t		*ehcip,
1268 	ehci_pipe_private_t	*pp)
1269 {
1270 	ehci_qh_t		*qh = pp->pp_qh; /* qh to be removed */
1271 	ehci_qh_t		*next_qh, *prev_qh;
1272 	usb_frame_number_t	frame_number;
1273 
1274 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1275 
1276 	/*
1277 	 * Read current usb frame number and add appropriate number of
1278 	 * usb frames needs to wait before reclaiming current endpoint.
1279 	 */
1280 	frame_number =
1281 	    ehci_get_current_frame_number(ehcip) + MAX_SOF_WAIT_COUNT;
1282 
1283 	/* Store 32-bit ID */
1284 	Set_QH(qh->qh_reclaim_frame,
1285 	    ((uint32_t)(EHCI_GET_ID((void *)(uintptr_t)frame_number))));
1286 
1287 	/* Insert the endpoint onto the reclamation list */
1288 	if (ehcip->ehci_reclaim_list) {
1289 		next_qh = ehcip->ehci_reclaim_list;
1290 
1291 		while (next_qh) {
1292 			prev_qh = next_qh;
1293 			next_qh = ehci_qh_iommu_to_cpu(ehcip,
1294 			    Get_QH(next_qh->qh_reclaim_next));
1295 		}
1296 
1297 		Set_QH(prev_qh->qh_reclaim_next,
1298 		    ehci_qh_cpu_to_iommu(ehcip, qh));
1299 	} else {
1300 		ehcip->ehci_reclaim_list = qh;
1301 	}
1302 
1303 	ASSERT(Get_QH(qh->qh_reclaim_next) == NULL);
1304 }
1305 
1306 
1307 /*
1308  * ehci_deallocate_qh:
1309  *
1310  * Deallocate a Host Controller's (HC) Endpoint Descriptor (QH).
1311  *
1312  * NOTE: This function is also called from POLLED MODE.
1313  */
1314 void
1315 ehci_deallocate_qh(
1316 	ehci_state_t	*ehcip,
1317 	ehci_qh_t	*old_qh)
1318 {
1319 	ehci_qtd_t	*first_dummy_qtd, *second_dummy_qtd;
1320 
1321 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1322 	    "ehci_deallocate_qh:");
1323 
1324 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1325 
1326 	first_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
1327 	    (Get_QH(old_qh->qh_next_qtd) & EHCI_QH_NEXT_QTD_PTR));
1328 
1329 	if (first_dummy_qtd) {
1330 		ASSERT(Get_QTD(first_dummy_qtd->qtd_state) == EHCI_QTD_DUMMY);
1331 
1332 		second_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
1333 		    Get_QTD(first_dummy_qtd->qtd_next_qtd));
1334 
1335 		if (second_dummy_qtd) {
1336 			ASSERT(Get_QTD(second_dummy_qtd->qtd_state) ==
1337 			    EHCI_QTD_DUMMY);
1338 
1339 			ehci_deallocate_qtd(ehcip, second_dummy_qtd);
1340 		}
1341 
1342 		ehci_deallocate_qtd(ehcip, first_dummy_qtd);
1343 	}
1344 
1345 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1346 	    "ehci_deallocate_qh: Deallocated 0x%p", (void *)old_qh);
1347 
1348 	Set_QH(old_qh->qh_state, EHCI_QH_FREE);
1349 }
1350 
1351 
1352 /*
1353  * ehci_qh_cpu_to_iommu:
1354  *
1355  * This function converts for the given Endpoint Descriptor (QH) CPU address
1356  * to IO address.
1357  *
1358  * NOTE: This function is also called from POLLED MODE.
1359  */
1360 uint32_t
1361 ehci_qh_cpu_to_iommu(
1362 	ehci_state_t	*ehcip,
1363 	ehci_qh_t	*addr)
1364 {
1365 	uint32_t	qh;
1366 
1367 	qh = (uint32_t)ehcip->ehci_qh_pool_cookie.dmac_address +
1368 	    (uint32_t)((uintptr_t)addr - (uintptr_t)(ehcip->ehci_qh_pool_addr));
1369 
1370 	ASSERT(qh >= ehcip->ehci_qh_pool_cookie.dmac_address);
1371 	ASSERT(qh <= ehcip->ehci_qh_pool_cookie.dmac_address +
1372 	    sizeof (ehci_qh_t) * ehci_qh_pool_size);
1373 
1374 	return (qh);
1375 }
1376 
1377 
1378 /*
1379  * ehci_qh_iommu_to_cpu:
1380  *
1381  * This function converts for the given Endpoint Descriptor (QH) IO address
1382  * to CPU address.
1383  */
1384 ehci_qh_t *
1385 ehci_qh_iommu_to_cpu(
1386 	ehci_state_t	*ehcip,
1387 	uintptr_t	addr)
1388 {
1389 	ehci_qh_t	*qh;
1390 
1391 	if (addr == NULL) {
1392 
1393 		return (NULL);
1394 	}
1395 
1396 	qh = (ehci_qh_t *)((uintptr_t)
1397 	    (addr - ehcip->ehci_qh_pool_cookie.dmac_address) +
1398 	    (uintptr_t)ehcip->ehci_qh_pool_addr);
1399 
1400 	ASSERT(qh >= ehcip->ehci_qh_pool_addr);
1401 	ASSERT((uintptr_t)qh <= (uintptr_t)ehcip->ehci_qh_pool_addr +
1402 	    (uintptr_t)(sizeof (ehci_qh_t) * ehci_qh_pool_size));
1403 
1404 	return (qh);
1405 }
1406 
1407 
1408 /*
1409  * Transfer Descriptor manipulations functions
1410  */
1411 
1412 /*
1413  * ehci_initialize_dummy:
1414  *
1415  * An Endpoint Descriptor (QH) has a  dummy Transfer Descriptor (QTD) on the
1416  * end of its QTD list. Initially, both the head and tail pointers of the QH
1417  * point to the dummy QTD.
1418  */
1419 static int
1420 ehci_initialize_dummy(
1421 	ehci_state_t	*ehcip,
1422 	ehci_qh_t	*qh)
1423 {
1424 	ehci_qtd_t	*first_dummy_qtd, *second_dummy_qtd;
1425 
1426 	/* Allocate first dummy QTD */
1427 	first_dummy_qtd = ehci_allocate_qtd_from_pool(ehcip);
1428 
1429 	if (first_dummy_qtd == NULL) {
1430 		return (USB_NO_RESOURCES);
1431 	}
1432 
1433 	/* Allocate second dummy QTD */
1434 	second_dummy_qtd = ehci_allocate_qtd_from_pool(ehcip);
1435 
1436 	if (second_dummy_qtd == NULL) {
1437 		/* Deallocate first dummy QTD */
1438 		ehci_deallocate_qtd(ehcip, first_dummy_qtd);
1439 
1440 		return (USB_NO_RESOURCES);
1441 	}
1442 
1443 	/* Next QTD pointer of an QH point to this new dummy QTD */
1444 	Set_QH(qh->qh_next_qtd, ehci_qtd_cpu_to_iommu(ehcip,
1445 	    first_dummy_qtd) & EHCI_QH_NEXT_QTD_PTR);
1446 
1447 	/* Set qh's dummy qtd field */
1448 	Set_QH(qh->qh_dummy_qtd, ehci_qtd_cpu_to_iommu(ehcip, first_dummy_qtd));
1449 
1450 	/* Set first_dummy's next qtd pointer */
1451 	Set_QTD(first_dummy_qtd->qtd_next_qtd,
1452 	    ehci_qtd_cpu_to_iommu(ehcip, second_dummy_qtd));
1453 
1454 	return (USB_SUCCESS);
1455 }
1456 
1457 /*
1458  * ehci_allocate_ctrl_resources:
1459  *
1460  * Calculates the number of tds necessary for a ctrl transfer, and allocates
1461  * all the resources necessary.
1462  *
1463  * Returns NULL if there is insufficient resources otherwise TW.
1464  */
1465 ehci_trans_wrapper_t *
1466 ehci_allocate_ctrl_resources(
1467 	ehci_state_t		*ehcip,
1468 	ehci_pipe_private_t	*pp,
1469 	usb_ctrl_req_t		*ctrl_reqp,
1470 	usb_flags_t		usb_flags)
1471 {
1472 	size_t			qtd_count = 2;
1473 	size_t			ctrl_buf_size;
1474 	ehci_trans_wrapper_t	*tw;
1475 
1476 	/* Add one more td for data phase */
1477 	if (ctrl_reqp->ctrl_wLength) {
1478 		qtd_count += 1;
1479 	}
1480 
1481 	/*
1482 	 * If we have a control data phase, the data buffer starts
1483 	 * on the next 4K page boundary. So the TW buffer is allocated
1484 	 * to be larger than required. The buffer in the range of
1485 	 * [SETUP_SIZE, EHCI_MAX_QTD_BUF_SIZE) is just for padding
1486 	 * and not to be transferred.
1487 	 */
1488 	if (ctrl_reqp->ctrl_wLength) {
1489 		ctrl_buf_size = EHCI_MAX_QTD_BUF_SIZE +
1490 		    ctrl_reqp->ctrl_wLength;
1491 	} else {
1492 		ctrl_buf_size = SETUP_SIZE;
1493 	}
1494 
1495 	tw = ehci_allocate_tw_resources(ehcip, pp, ctrl_buf_size,
1496 	    usb_flags, qtd_count);
1497 
1498 	return (tw);
1499 }
1500 
1501 /*
1502  * ehci_insert_ctrl_req:
1503  *
1504  * Create a Transfer Descriptor (QTD) and a data buffer for a control endpoint.
1505  */
1506 /* ARGSUSED */
1507 void
1508 ehci_insert_ctrl_req(
1509 	ehci_state_t		*ehcip,
1510 	usba_pipe_handle_data_t	*ph,
1511 	usb_ctrl_req_t		*ctrl_reqp,
1512 	ehci_trans_wrapper_t	*tw,
1513 	usb_flags_t		usb_flags)
1514 {
1515 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1516 	uchar_t			bmRequestType = ctrl_reqp->ctrl_bmRequestType;
1517 	uchar_t			bRequest = ctrl_reqp->ctrl_bRequest;
1518 	uint16_t		wValue = ctrl_reqp->ctrl_wValue;
1519 	uint16_t		wIndex = ctrl_reqp->ctrl_wIndex;
1520 	uint16_t		wLength = ctrl_reqp->ctrl_wLength;
1521 	mblk_t			*data = ctrl_reqp->ctrl_data;
1522 	uint32_t		ctrl = 0;
1523 	uint8_t			setup_packet[8];
1524 
1525 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1526 	    "ehci_insert_ctrl_req:");
1527 
1528 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1529 
1530 	/*
1531 	 * Save current control request pointer and timeout values
1532 	 * in transfer wrapper.
1533 	 */
1534 	tw->tw_curr_xfer_reqp = (usb_opaque_t)ctrl_reqp;
1535 	tw->tw_timeout = ctrl_reqp->ctrl_timeout ?
1536 	    ctrl_reqp->ctrl_timeout : EHCI_DEFAULT_XFER_TIMEOUT;
1537 
1538 	/*
1539 	 * Initialize the callback and any callback data for when
1540 	 * the qtd completes.
1541 	 */
1542 	tw->tw_handle_qtd = ehci_handle_ctrl_qtd;
1543 	tw->tw_handle_callback_value = NULL;
1544 
1545 	/*
1546 	 * swap the setup bytes where necessary since we specified
1547 	 * NEVERSWAP
1548 	 */
1549 	setup_packet[0] = bmRequestType;
1550 	setup_packet[1] = bRequest;
1551 	setup_packet[2] = (uint8_t)wValue;
1552 	setup_packet[3] = wValue >> 8;
1553 	setup_packet[4] = (uint8_t)wIndex;
1554 	setup_packet[5] = wIndex >> 8;
1555 	setup_packet[6] = (uint8_t)wLength;
1556 	setup_packet[7] = wLength >> 8;
1557 
1558 	bcopy(setup_packet, tw->tw_buf, SETUP_SIZE);
1559 
1560 	Sync_IO_Buffer_for_device(tw->tw_dmahandle, SETUP_SIZE);
1561 
1562 	ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_0 | EHCI_QTD_CTRL_SETUP_PID);
1563 
1564 	/*
1565 	 * The QTD's are placed on the QH one at a time.
1566 	 * Once this QTD is placed on the done list, the
1567 	 * data or status phase QTD will be enqueued.
1568 	 */
1569 	(void) ehci_insert_qtd(ehcip, ctrl, 0, SETUP_SIZE,
1570 	    EHCI_CTRL_SETUP_PHASE, pp, tw);
1571 
1572 	USB_DPRINTF_L3(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1573 	    "ehci_insert_ctrl_req: pp 0x%p", (void *)pp);
1574 
1575 	/*
1576 	 * If this control transfer has a data phase, record the
1577 	 * direction. If the data phase is an OUT transaction,
1578 	 * copy the data into the buffer of the transfer wrapper.
1579 	 */
1580 	if (wLength != 0) {
1581 		/* There is a data stage.  Find the direction */
1582 		if (bmRequestType & USB_DEV_REQ_DEV_TO_HOST) {
1583 			tw->tw_direction = EHCI_QTD_CTRL_IN_PID;
1584 		} else {
1585 			tw->tw_direction = EHCI_QTD_CTRL_OUT_PID;
1586 
1587 			/* Copy the data into the message */
1588 			bcopy(data->b_rptr, tw->tw_buf + EHCI_MAX_QTD_BUF_SIZE,
1589 			    wLength);
1590 
1591 			Sync_IO_Buffer_for_device(tw->tw_dmahandle,
1592 			    wLength + EHCI_MAX_QTD_BUF_SIZE);
1593 		}
1594 
1595 		ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1 | tw->tw_direction);
1596 
1597 		/*
1598 		 * Create the QTD.  If this is an OUT transaction,
1599 		 * the data is already in the buffer of the TW.
1600 		 * The transfer should start from EHCI_MAX_QTD_BUF_SIZE
1601 		 * which is 4K aligned, though the ctrl phase only
1602 		 * transfers a length of SETUP_SIZE. The padding data
1603 		 * in the TW buffer are discarded.
1604 		 */
1605 		(void) ehci_insert_qtd(ehcip, ctrl, EHCI_MAX_QTD_BUF_SIZE,
1606 		    tw->tw_length - EHCI_MAX_QTD_BUF_SIZE,
1607 		    EHCI_CTRL_DATA_PHASE, pp, tw);
1608 
1609 		/*
1610 		 * The direction of the STATUS QTD depends  on
1611 		 * the direction of the transfer.
1612 		 */
1613 		if (tw->tw_direction == EHCI_QTD_CTRL_IN_PID) {
1614 			ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1|
1615 			    EHCI_QTD_CTRL_OUT_PID |
1616 			    EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1617 		} else {
1618 			ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1|
1619 			    EHCI_QTD_CTRL_IN_PID |
1620 			    EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1621 		}
1622 	} else {
1623 		/*
1624 		 * There is no data stage,  then initiate
1625 		 * status phase from the host.
1626 		 */
1627 		ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1 |
1628 		    EHCI_QTD_CTRL_IN_PID |
1629 		    EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1630 	}
1631 
1632 
1633 	(void) ehci_insert_qtd(ehcip, ctrl, 0, 0,
1634 	    EHCI_CTRL_STATUS_PHASE, pp,  tw);
1635 
1636 	/* Start the timer for this control transfer */
1637 	ehci_start_xfer_timer(ehcip, pp, tw);
1638 }
1639 
1640 
1641 /*
1642  * ehci_allocate_bulk_resources:
1643  *
1644  * Calculates the number of tds necessary for a ctrl transfer, and allocates
1645  * all the resources necessary.
1646  *
1647  * Returns NULL if there is insufficient resources otherwise TW.
1648  */
1649 ehci_trans_wrapper_t *
1650 ehci_allocate_bulk_resources(
1651 	ehci_state_t		*ehcip,
1652 	ehci_pipe_private_t	*pp,
1653 	usb_bulk_req_t		*bulk_reqp,
1654 	usb_flags_t		usb_flags)
1655 {
1656 	size_t			qtd_count = 0;
1657 	ehci_trans_wrapper_t	*tw;
1658 
1659 	/* Check the size of bulk request */
1660 	if (bulk_reqp->bulk_len > EHCI_MAX_BULK_XFER_SIZE) {
1661 
1662 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1663 		    "ehci_allocate_bulk_resources: Bulk request size 0x%x is "
1664 		    "more than 0x%x", bulk_reqp->bulk_len,
1665 		    EHCI_MAX_BULK_XFER_SIZE);
1666 
1667 		return (NULL);
1668 	}
1669 
1670 	/* Get the required bulk packet size */
1671 	qtd_count = bulk_reqp->bulk_len / EHCI_MAX_QTD_XFER_SIZE;
1672 	if (bulk_reqp->bulk_len % EHCI_MAX_QTD_XFER_SIZE ||
1673 	    bulk_reqp->bulk_len == 0) {
1674 		qtd_count += 1;
1675 	}
1676 
1677 	tw = ehci_allocate_tw_resources(ehcip, pp, bulk_reqp->bulk_len,
1678 	    usb_flags, qtd_count);
1679 
1680 	return (tw);
1681 }
1682 
1683 /*
1684  * ehci_insert_bulk_req:
1685  *
1686  * Create a Transfer Descriptor (QTD) and a data buffer for a bulk
1687  * endpoint.
1688  */
1689 /* ARGSUSED */
1690 void
1691 ehci_insert_bulk_req(
1692 	ehci_state_t		*ehcip,
1693 	usba_pipe_handle_data_t	*ph,
1694 	usb_bulk_req_t		*bulk_reqp,
1695 	ehci_trans_wrapper_t	*tw,
1696 	usb_flags_t		flags)
1697 {
1698 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1699 	uint_t			bulk_pkt_size, count;
1700 	size_t			residue = 0, len = 0;
1701 	uint32_t		ctrl = 0;
1702 	int			pipe_dir;
1703 
1704 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1705 	    "ehci_insert_bulk_req: bulk_reqp = 0x%p flags = 0x%x",
1706 	    (void *)bulk_reqp, flags);
1707 
1708 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1709 
1710 	/* Get the bulk pipe direction */
1711 	pipe_dir = ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK;
1712 
1713 	/* Get the required bulk packet size */
1714 	bulk_pkt_size = min(bulk_reqp->bulk_len, EHCI_MAX_QTD_XFER_SIZE);
1715 
1716 	if (bulk_pkt_size) {
1717 		residue = tw->tw_length % bulk_pkt_size;
1718 	}
1719 
1720 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1721 	    "ehci_insert_bulk_req: bulk_pkt_size = %d", bulk_pkt_size);
1722 
1723 	/*
1724 	 * Save current bulk request pointer and timeout values
1725 	 * in transfer wrapper.
1726 	 */
1727 	tw->tw_curr_xfer_reqp = (usb_opaque_t)bulk_reqp;
1728 	tw->tw_timeout = bulk_reqp->bulk_timeout;
1729 
1730 	/*
1731 	 * Initialize the callback and any callback
1732 	 * data required when the qtd completes.
1733 	 */
1734 	tw->tw_handle_qtd = ehci_handle_bulk_qtd;
1735 	tw->tw_handle_callback_value = NULL;
1736 
1737 	tw->tw_direction = (pipe_dir == USB_EP_DIR_OUT) ?
1738 	    EHCI_QTD_CTRL_OUT_PID : EHCI_QTD_CTRL_IN_PID;
1739 
1740 	if (tw->tw_direction == EHCI_QTD_CTRL_OUT_PID) {
1741 
1742 		if (bulk_reqp->bulk_len) {
1743 			ASSERT(bulk_reqp->bulk_data != NULL);
1744 
1745 			bcopy(bulk_reqp->bulk_data->b_rptr, tw->tw_buf,
1746 			    bulk_reqp->bulk_len);
1747 
1748 			Sync_IO_Buffer_for_device(tw->tw_dmahandle,
1749 			    bulk_reqp->bulk_len);
1750 		}
1751 	}
1752 
1753 	ctrl = tw->tw_direction;
1754 
1755 	/* Insert all the bulk QTDs */
1756 	for (count = 0; count < tw->tw_num_qtds; count++) {
1757 
1758 		/* Check for last qtd */
1759 		if (count == (tw->tw_num_qtds - 1)) {
1760 
1761 			ctrl |= EHCI_QTD_CTRL_INTR_ON_COMPLETE;
1762 
1763 			/* Check for inserting residue data */
1764 			if (residue) {
1765 				bulk_pkt_size = (uint_t)residue;
1766 			}
1767 		}
1768 
1769 		/* Insert the QTD onto the endpoint */
1770 		(void) ehci_insert_qtd(ehcip, ctrl, len, bulk_pkt_size,
1771 		    0, pp, tw);
1772 
1773 		len = len + bulk_pkt_size;
1774 	}
1775 
1776 	/* Start the timer for this bulk transfer */
1777 	ehci_start_xfer_timer(ehcip, pp, tw);
1778 }
1779 
1780 
1781 /*
1782  * ehci_start_periodic_pipe_polling:
1783  *
1784  * NOTE: This function is also called from POLLED MODE.
1785  */
1786 int
1787 ehci_start_periodic_pipe_polling(
1788 	ehci_state_t		*ehcip,
1789 	usba_pipe_handle_data_t	*ph,
1790 	usb_opaque_t		periodic_in_reqp,
1791 	usb_flags_t		flags)
1792 {
1793 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1794 	usb_ep_descr_t		*eptd = &ph->p_ep;
1795 	int			error = USB_SUCCESS;
1796 
1797 	USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
1798 	    "ehci_start_periodic_pipe_polling: ep%d",
1799 	    ph->p_ep.bEndpointAddress & USB_EP_NUM_MASK);
1800 
1801 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1802 
1803 	/*
1804 	 * Check and handle start polling on root hub interrupt pipe.
1805 	 */
1806 	if ((ph->p_usba_device->usb_addr == ROOT_HUB_ADDR) &&
1807 	    ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
1808 	    USB_EP_ATTR_INTR)) {
1809 
1810 		error = ehci_handle_root_hub_pipe_start_intr_polling(ph,
1811 		    (usb_intr_req_t *)periodic_in_reqp, flags);
1812 
1813 		return (error);
1814 	}
1815 
1816 	switch (pp->pp_state) {
1817 	case EHCI_PIPE_STATE_IDLE:
1818 		/* Save the Original client's Periodic IN request */
1819 		pp->pp_client_periodic_in_reqp = periodic_in_reqp;
1820 
1821 		/*
1822 		 * This pipe is uninitialized or if a valid QTD is
1823 		 * not found then insert a QTD on the interrupt IN
1824 		 * endpoint.
1825 		 */
1826 		error = ehci_start_pipe_polling(ehcip, ph, flags);
1827 
1828 		if (error != USB_SUCCESS) {
1829 			USB_DPRINTF_L2(PRINT_MASK_INTR,
1830 			    ehcip->ehci_log_hdl,
1831 			    "ehci_start_periodic_pipe_polling: "
1832 			    "Start polling failed");
1833 
1834 			pp->pp_client_periodic_in_reqp = NULL;
1835 
1836 			return (error);
1837 		}
1838 
1839 		USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
1840 		    "ehci_start_periodic_pipe_polling: PP = 0x%p", (void *)pp);
1841 
1842 #ifdef DEBUG
1843 		switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
1844 		case USB_EP_ATTR_INTR:
1845 			ASSERT((pp->pp_tw_head != NULL) &&
1846 			    (pp->pp_tw_tail != NULL));
1847 			break;
1848 		case USB_EP_ATTR_ISOCH:
1849 			ASSERT((pp->pp_itw_head != NULL) &&
1850 			    (pp->pp_itw_tail != NULL));
1851 			break;
1852 		}
1853 #endif
1854 
1855 		break;
1856 	case EHCI_PIPE_STATE_ACTIVE:
1857 		USB_DPRINTF_L2(PRINT_MASK_INTR,
1858 		    ehcip->ehci_log_hdl,
1859 		    "ehci_start_periodic_pipe_polling: "
1860 		    "Polling is already in progress");
1861 
1862 		error = USB_FAILURE;
1863 		break;
1864 	case EHCI_PIPE_STATE_ERROR:
1865 		USB_DPRINTF_L2(PRINT_MASK_INTR,
1866 		    ehcip->ehci_log_hdl,
1867 		    "ehci_start_periodic_pipe_polling: "
1868 		    "Pipe is halted and perform reset"
1869 		    "before restart polling");
1870 
1871 		error = USB_FAILURE;
1872 		break;
1873 	default:
1874 		USB_DPRINTF_L2(PRINT_MASK_INTR,
1875 		    ehcip->ehci_log_hdl,
1876 		    "ehci_start_periodic_pipe_polling: "
1877 		    "Undefined state");
1878 
1879 		error = USB_FAILURE;
1880 		break;
1881 	}
1882 
1883 	return (error);
1884 }
1885 
1886 
1887 /*
1888  * ehci_start_pipe_polling:
1889  *
1890  * Insert the number of periodic requests corresponding to polling
1891  * interval as calculated during pipe open.
1892  */
1893 static int
1894 ehci_start_pipe_polling(
1895 	ehci_state_t		*ehcip,
1896 	usba_pipe_handle_data_t	*ph,
1897 	usb_flags_t		flags)
1898 {
1899 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1900 	usb_ep_descr_t		*eptd = &ph->p_ep;
1901 	int			error = USB_FAILURE;
1902 
1903 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1904 	    "ehci_start_pipe_polling:");
1905 
1906 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1907 
1908 	/*
1909 	 * For the start polling, pp_max_periodic_req_cnt will be zero
1910 	 * and for the restart polling request, it will be non zero.
1911 	 *
1912 	 * In case of start polling request, find out number of requests
1913 	 * required for the Interrupt IN endpoints corresponding to the
1914 	 * endpoint polling interval. For Isochronous IN endpoints, it is
1915 	 * always fixed since its polling interval will be one ms.
1916 	 */
1917 	if (pp->pp_max_periodic_req_cnt == 0) {
1918 
1919 		ehci_set_periodic_pipe_polling(ehcip, ph);
1920 	}
1921 
1922 	ASSERT(pp->pp_max_periodic_req_cnt != 0);
1923 
1924 	switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
1925 	case USB_EP_ATTR_INTR:
1926 		error = ehci_start_intr_polling(ehcip, ph, flags);
1927 		break;
1928 	case USB_EP_ATTR_ISOCH:
1929 		error = ehci_start_isoc_polling(ehcip, ph, flags);
1930 		break;
1931 	}
1932 
1933 	return (error);
1934 }
1935 
1936 static int
1937 ehci_start_intr_polling(
1938 	ehci_state_t		*ehcip,
1939 	usba_pipe_handle_data_t	*ph,
1940 	usb_flags_t		flags)
1941 {
1942 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1943 	ehci_trans_wrapper_t	*tw_list, *tw;
1944 	int			i, total_tws;
1945 	int			error = USB_SUCCESS;
1946 
1947 	/* Allocate all the necessary resources for the IN transfer */
1948 	tw_list = NULL;
1949 	total_tws = pp->pp_max_periodic_req_cnt - pp->pp_cur_periodic_req_cnt;
1950 	for (i = 0; i < total_tws; i += 1) {
1951 		tw = ehci_allocate_intr_resources(ehcip, ph, NULL, flags);
1952 		if (tw == NULL) {
1953 			error = USB_NO_RESOURCES;
1954 			/* There are not enough resources, deallocate the TWs */
1955 			tw = tw_list;
1956 			while (tw != NULL) {
1957 				tw_list = tw->tw_next;
1958 				ehci_deallocate_intr_in_resource(
1959 				    ehcip, pp, tw);
1960 				ehci_deallocate_tw(ehcip, pp, tw);
1961 				tw = tw_list;
1962 			}
1963 
1964 			return (error);
1965 		} else {
1966 			if (tw_list == NULL) {
1967 				tw_list = tw;
1968 			}
1969 		}
1970 	}
1971 
1972 	while (pp->pp_cur_periodic_req_cnt < pp->pp_max_periodic_req_cnt) {
1973 
1974 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1975 		    "ehci_start_pipe_polling: max = %d curr = %d tw = %p:",
1976 		    pp->pp_max_periodic_req_cnt, pp->pp_cur_periodic_req_cnt,
1977 		    (void *)tw_list);
1978 
1979 		tw = tw_list;
1980 		tw_list = tw->tw_next;
1981 
1982 		ehci_insert_intr_req(ehcip, pp, tw, flags);
1983 
1984 		pp->pp_cur_periodic_req_cnt++;
1985 	}
1986 
1987 	return (error);
1988 }
1989 
1990 
1991 /*
1992  * ehci_set_periodic_pipe_polling:
1993  *
1994  * Calculate the number of periodic requests needed corresponding to the
1995  * interrupt IN endpoints polling interval. Table below gives the number
1996  * of periodic requests needed for the interrupt IN endpoints  according
1997  * to endpoint polling interval.
1998  *
1999  * Polling interval		Number of periodic requests
2000  *
2001  * 1ms				4
2002  * 2ms				2
2003  * 4ms to 32ms			1
2004  */
2005 static void
2006 ehci_set_periodic_pipe_polling(
2007 	ehci_state_t		*ehcip,
2008 	usba_pipe_handle_data_t	*ph)
2009 {
2010 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2011 	usb_ep_descr_t		*endpoint = &ph->p_ep;
2012 	uchar_t			ep_attr = endpoint->bmAttributes;
2013 	uint_t			interval;
2014 
2015 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2016 	    "ehci_set_periodic_pipe_polling:");
2017 
2018 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2019 
2020 	pp->pp_cur_periodic_req_cnt = 0;
2021 
2022 	/*
2023 	 * Check usb flag whether USB_FLAGS_ONE_TIME_POLL flag is
2024 	 * set and if so, set pp->pp_max_periodic_req_cnt to one.
2025 	 */
2026 	if (((ep_attr & USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) &&
2027 	    (pp->pp_client_periodic_in_reqp)) {
2028 		usb_intr_req_t *intr_reqp = (usb_intr_req_t *)
2029 		    pp->pp_client_periodic_in_reqp;
2030 
2031 		if (intr_reqp->intr_attributes &
2032 		    USB_ATTRS_ONE_XFER) {
2033 
2034 			pp->pp_max_periodic_req_cnt = EHCI_INTR_XMS_REQS;
2035 
2036 			return;
2037 		}
2038 	}
2039 
2040 	mutex_enter(&ph->p_usba_device->usb_mutex);
2041 
2042 	/*
2043 	 * The ehci_adjust_polling_interval function will not fail
2044 	 * at this instance since bandwidth allocation is already
2045 	 * done. Here we are getting only the periodic interval.
2046 	 */
2047 	interval = ehci_adjust_polling_interval(ehcip, endpoint,
2048 	    ph->p_usba_device->usb_port_status);
2049 
2050 	mutex_exit(&ph->p_usba_device->usb_mutex);
2051 
2052 	switch (interval) {
2053 	case EHCI_INTR_1MS_POLL:
2054 		pp->pp_max_periodic_req_cnt = EHCI_INTR_1MS_REQS;
2055 		break;
2056 	case EHCI_INTR_2MS_POLL:
2057 		pp->pp_max_periodic_req_cnt = EHCI_INTR_2MS_REQS;
2058 		break;
2059 	default:
2060 		pp->pp_max_periodic_req_cnt = EHCI_INTR_XMS_REQS;
2061 		break;
2062 	}
2063 
2064 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2065 	    "ehci_set_periodic_pipe_polling: Max periodic requests = %d",
2066 	    pp->pp_max_periodic_req_cnt);
2067 }
2068 
2069 /*
2070  * ehci_allocate_intr_resources:
2071  *
2072  * Calculates the number of tds necessary for a intr transfer, and allocates
2073  * all the necessary resources.
2074  *
2075  * Returns NULL if there is insufficient resources otherwise TW.
2076  */
2077 ehci_trans_wrapper_t *
2078 ehci_allocate_intr_resources(
2079 	ehci_state_t		*ehcip,
2080 	usba_pipe_handle_data_t	*ph,
2081 	usb_intr_req_t		*intr_reqp,
2082 	usb_flags_t		flags)
2083 {
2084 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2085 	int			pipe_dir;
2086 	size_t			qtd_count = 1;
2087 	size_t			tw_length;
2088 	ehci_trans_wrapper_t	*tw;
2089 
2090 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2091 	    "ehci_allocate_intr_resources:");
2092 
2093 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2094 
2095 	pipe_dir = ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK;
2096 
2097 	/* Get the length of interrupt transfer & alloc data */
2098 	if (intr_reqp) {
2099 		tw_length = intr_reqp->intr_len;
2100 	} else {
2101 		ASSERT(pipe_dir == USB_EP_DIR_IN);
2102 		tw_length = (pp->pp_client_periodic_in_reqp) ?
2103 		    (((usb_intr_req_t *)pp->
2104 		    pp_client_periodic_in_reqp)->intr_len) :
2105 		    ph->p_ep.wMaxPacketSize;
2106 	}
2107 
2108 	/* Check the size of interrupt request */
2109 	if (tw_length > EHCI_MAX_QTD_XFER_SIZE) {
2110 
2111 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2112 		    "ehci_allocate_intr_resources: Intr request size 0x%lx is "
2113 		    "more than 0x%x", tw_length, EHCI_MAX_QTD_XFER_SIZE);
2114 
2115 		return (NULL);
2116 	}
2117 
2118 	if ((tw = ehci_allocate_tw_resources(ehcip, pp, tw_length, flags,
2119 	    qtd_count)) == NULL) {
2120 
2121 		return (NULL);
2122 	}
2123 
2124 	if (pipe_dir == USB_EP_DIR_IN) {
2125 		if (ehci_allocate_intr_in_resource(ehcip, pp, tw, flags) !=
2126 		    USB_SUCCESS) {
2127 			ehci_deallocate_tw(ehcip, pp, tw);
2128 		}
2129 		tw->tw_direction = EHCI_QTD_CTRL_IN_PID;
2130 	} else {
2131 		if (tw_length) {
2132 			ASSERT(intr_reqp->intr_data != NULL);
2133 
2134 			/* Copy the data into the buffer */
2135 			bcopy(intr_reqp->intr_data->b_rptr, tw->tw_buf,
2136 			    intr_reqp->intr_len);
2137 
2138 			Sync_IO_Buffer_for_device(tw->tw_dmahandle,
2139 			    intr_reqp->intr_len);
2140 		}
2141 
2142 		tw->tw_curr_xfer_reqp = (usb_opaque_t)intr_reqp;
2143 		tw->tw_direction = EHCI_QTD_CTRL_OUT_PID;
2144 	}
2145 
2146 	if (intr_reqp) {
2147 		tw->tw_timeout = intr_reqp->intr_timeout;
2148 	}
2149 
2150 	/*
2151 	 * Initialize the callback and any callback
2152 	 * data required when the qtd completes.
2153 	 */
2154 	tw->tw_handle_qtd = ehci_handle_intr_qtd;
2155 	tw->tw_handle_callback_value = NULL;
2156 
2157 	return (tw);
2158 }
2159 
2160 
2161 /*
2162  * ehci_insert_intr_req:
2163  *
2164  * Insert an Interrupt request into the Host Controller's periodic list.
2165  */
2166 /* ARGSUSED */
2167 void
2168 ehci_insert_intr_req(
2169 	ehci_state_t		*ehcip,
2170 	ehci_pipe_private_t	*pp,
2171 	ehci_trans_wrapper_t	*tw,
2172 	usb_flags_t		flags)
2173 {
2174 	uint_t			ctrl = 0;
2175 
2176 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2177 
2178 	ASSERT(tw->tw_curr_xfer_reqp != NULL);
2179 
2180 	ctrl = (tw->tw_direction | EHCI_QTD_CTRL_INTR_ON_COMPLETE);
2181 
2182 	/* Insert another interrupt QTD */
2183 	(void) ehci_insert_qtd(ehcip, ctrl, 0, tw->tw_length, 0, pp, tw);
2184 
2185 	/* Start the timer for this Interrupt transfer */
2186 	ehci_start_xfer_timer(ehcip, pp, tw);
2187 }
2188 
2189 
2190 /*
2191  * ehci_stop_periodic_pipe_polling:
2192  */
2193 /* ARGSUSED */
2194 int
2195 ehci_stop_periodic_pipe_polling(
2196 	ehci_state_t		*ehcip,
2197 	usba_pipe_handle_data_t	*ph,
2198 	usb_flags_t		flags)
2199 {
2200 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2201 	usb_ep_descr_t		*eptd = &ph->p_ep;
2202 
2203 	USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
2204 	    "ehci_stop_periodic_pipe_polling: Flags = 0x%x", flags);
2205 
2206 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2207 
2208 	/*
2209 	 * Check and handle stop polling on root hub interrupt pipe.
2210 	 */
2211 	if ((ph->p_usba_device->usb_addr == ROOT_HUB_ADDR) &&
2212 	    ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
2213 	    USB_EP_ATTR_INTR)) {
2214 
2215 		ehci_handle_root_hub_pipe_stop_intr_polling(ph, flags);
2216 
2217 		return (USB_SUCCESS);
2218 	}
2219 
2220 	if (pp->pp_state != EHCI_PIPE_STATE_ACTIVE) {
2221 
2222 		USB_DPRINTF_L2(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
2223 		    "ehci_stop_periodic_pipe_polling: "
2224 		    "Polling already stopped");
2225 
2226 		return (USB_SUCCESS);
2227 	}
2228 
2229 	/* Set pipe state to pipe stop polling */
2230 	pp->pp_state = EHCI_PIPE_STATE_STOP_POLLING;
2231 
2232 	ehci_pipe_cleanup(ehcip, ph);
2233 
2234 	return (USB_SUCCESS);
2235 }
2236 
2237 
2238 /*
2239  * ehci_insert_qtd:
2240  *
2241  * Insert a Transfer Descriptor (QTD) on an Endpoint Descriptor (QH).
2242  * Always returns USB_SUCCESS for now.	Once Isoch has been implemented,
2243  * it may return USB_FAILURE.
2244  */
2245 int
2246 ehci_insert_qtd(
2247 	ehci_state_t		*ehcip,
2248 	uint32_t		qtd_ctrl,
2249 	size_t			qtd_dma_offs,
2250 	size_t			qtd_length,
2251 	uint32_t		qtd_ctrl_phase,
2252 	ehci_pipe_private_t	*pp,
2253 	ehci_trans_wrapper_t	*tw)
2254 {
2255 	ehci_qtd_t		*curr_dummy_qtd, *next_dummy_qtd;
2256 	ehci_qtd_t		*new_dummy_qtd;
2257 	ehci_qh_t		*qh = pp->pp_qh;
2258 	int			error = USB_SUCCESS;
2259 
2260 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2261 
2262 	/* Allocate new dummy QTD */
2263 	new_dummy_qtd = tw->tw_qtd_free_list;
2264 
2265 	ASSERT(new_dummy_qtd != NULL);
2266 	tw->tw_qtd_free_list = ehci_qtd_iommu_to_cpu(ehcip,
2267 	    Get_QTD(new_dummy_qtd->qtd_tw_next_qtd));
2268 	Set_QTD(new_dummy_qtd->qtd_tw_next_qtd, NULL);
2269 
2270 	/* Get the current and next dummy QTDs */
2271 	curr_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2272 	    Get_QH(qh->qh_dummy_qtd));
2273 	next_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2274 	    Get_QTD(curr_dummy_qtd->qtd_next_qtd));
2275 
2276 	/* Update QH's dummy qtd field */
2277 	Set_QH(qh->qh_dummy_qtd, ehci_qtd_cpu_to_iommu(ehcip, next_dummy_qtd));
2278 
2279 	/* Update next dummy's next qtd pointer */
2280 	Set_QTD(next_dummy_qtd->qtd_next_qtd,
2281 	    ehci_qtd_cpu_to_iommu(ehcip, new_dummy_qtd));
2282 
2283 	/*
2284 	 * Fill in the current dummy qtd and
2285 	 * add the new dummy to the end.
2286 	 */
2287 	ehci_fill_in_qtd(ehcip, curr_dummy_qtd, qtd_ctrl,
2288 	    qtd_dma_offs, qtd_length, qtd_ctrl_phase, pp, tw);
2289 
2290 	/* Insert this qtd onto the tw */
2291 	ehci_insert_qtd_on_tw(ehcip, tw, curr_dummy_qtd);
2292 
2293 	/*
2294 	 * Insert this qtd onto active qtd list.
2295 	 * Don't insert polled mode qtd here.
2296 	 */
2297 	if (pp->pp_flag != EHCI_POLLED_MODE_FLAG) {
2298 		/* Insert this qtd onto active qtd list */
2299 		ehci_insert_qtd_into_active_qtd_list(ehcip, curr_dummy_qtd);
2300 	}
2301 
2302 	/* Print qh and qtd */
2303 	ehci_print_qh(ehcip, qh);
2304 	ehci_print_qtd(ehcip, curr_dummy_qtd);
2305 
2306 	return (error);
2307 }
2308 
2309 
2310 /*
2311  * ehci_allocate_qtd_from_pool:
2312  *
2313  * Allocate a Transfer Descriptor (QTD) from the QTD buffer pool.
2314  */
2315 static ehci_qtd_t *
2316 ehci_allocate_qtd_from_pool(ehci_state_t	*ehcip)
2317 {
2318 	int		i, ctrl;
2319 	ehci_qtd_t	*qtd;
2320 
2321 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2322 
2323 	/*
2324 	 * Search for a blank Transfer Descriptor (QTD)
2325 	 * in the QTD buffer pool.
2326 	 */
2327 	for (i = 0; i < ehci_qtd_pool_size; i ++) {
2328 		ctrl = Get_QTD(ehcip->ehci_qtd_pool_addr[i].qtd_state);
2329 		if (ctrl == EHCI_QTD_FREE) {
2330 			break;
2331 		}
2332 	}
2333 
2334 	if (i >= ehci_qtd_pool_size) {
2335 		USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2336 		    "ehci_allocate_qtd_from_pool: QTD exhausted");
2337 
2338 		return (NULL);
2339 	}
2340 
2341 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2342 	    "ehci_allocate_qtd_from_pool: Allocated %d", i);
2343 
2344 	/* Create a new dummy for the end of the QTD list */
2345 	qtd = &ehcip->ehci_qtd_pool_addr[i];
2346 
2347 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2348 	    "ehci_allocate_qtd_from_pool: qtd 0x%p", (void *)qtd);
2349 
2350 	/* Mark the newly allocated QTD as a dummy */
2351 	Set_QTD(qtd->qtd_state, EHCI_QTD_DUMMY);
2352 
2353 	/* Mark the status of this new QTD to halted state */
2354 	Set_QTD(qtd->qtd_ctrl, EHCI_QTD_CTRL_HALTED_XACT);
2355 
2356 	/* Disable dummy QTD's next and alternate next pointers */
2357 	Set_QTD(qtd->qtd_next_qtd, EHCI_QTD_NEXT_QTD_PTR_VALID);
2358 	Set_QTD(qtd->qtd_alt_next_qtd, EHCI_QTD_ALT_NEXT_QTD_PTR_VALID);
2359 
2360 	return (qtd);
2361 }
2362 
2363 
2364 /*
2365  * ehci_fill_in_qtd:
2366  *
2367  * Fill in the fields of a Transfer Descriptor (QTD).
2368  * The "Buffer Pointer" fields of a QTD are retrieved from the TW
2369  * it is associated with.
2370  *
2371  * Note:
2372  * qtd_dma_offs - the starting offset into the TW buffer, where the QTD
2373  *		  should transfer from. It should be 4K aligned. And when
2374  *		  a TW has more than one QTDs, the QTDs must be filled in
2375  *		  increasing order.
2376  * qtd_length - the total bytes to transfer.
2377  */
2378 /*ARGSUSED*/
2379 static void
2380 ehci_fill_in_qtd(
2381 	ehci_state_t		*ehcip,
2382 	ehci_qtd_t		*qtd,
2383 	uint32_t		qtd_ctrl,
2384 	size_t			qtd_dma_offs,
2385 	size_t			qtd_length,
2386 	uint32_t		qtd_ctrl_phase,
2387 	ehci_pipe_private_t	*pp,
2388 	ehci_trans_wrapper_t	*tw)
2389 {
2390 	uint32_t		buf_addr;
2391 	size_t			buf_len = qtd_length;
2392 	uint32_t		ctrl = qtd_ctrl;
2393 	uint_t			i = 0;
2394 	int			rem_len;
2395 
2396 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2397 	    "ehci_fill_in_qtd: qtd 0x%p ctrl 0x%x bufoffs 0x%lx "
2398 	    "len 0x%lx", (void *)qtd, qtd_ctrl, qtd_dma_offs, qtd_length);
2399 
2400 	/* Assert that the qtd to be filled in is a dummy */
2401 	ASSERT(Get_QTD(qtd->qtd_state) == EHCI_QTD_DUMMY);
2402 
2403 	/* Change QTD's state Active */
2404 	Set_QTD(qtd->qtd_state, EHCI_QTD_ACTIVE);
2405 
2406 	/* Set the total length data transfer */
2407 	ctrl |= (((qtd_length << EHCI_QTD_CTRL_BYTES_TO_XFER_SHIFT)
2408 	    & EHCI_QTD_CTRL_BYTES_TO_XFER) | EHCI_QTD_CTRL_MAX_ERR_COUNTS);
2409 
2410 	/*
2411 	 * QTDs must be filled in increasing DMA offset order.
2412 	 * tw_dma_offs is initialized to be 0 at TW creation and
2413 	 * is only increased in this function.
2414 	 */
2415 	ASSERT(buf_len == 0 || qtd_dma_offs >= tw->tw_dma_offs);
2416 
2417 	/*
2418 	 * Save the starting dma buffer offset used and
2419 	 * length of data that will be transfered in
2420 	 * the current QTD.
2421 	 */
2422 	Set_QTD(qtd->qtd_xfer_offs, qtd_dma_offs);
2423 	Set_QTD(qtd->qtd_xfer_len, buf_len);
2424 
2425 	while (buf_len) {
2426 		/*
2427 		 * Advance to the next DMA cookie until finding the cookie
2428 		 * that qtd_dma_offs falls in.
2429 		 * It is very likely this loop will never repeat more than
2430 		 * once. It is here just to accommodate the case qtd_dma_offs
2431 		 * is increased by multiple cookies during two consecutive
2432 		 * calls into this function. In that case, the interim DMA
2433 		 * buffer is allowed to be skipped.
2434 		 */
2435 		while ((tw->tw_dma_offs + tw->tw_cookie.dmac_size) <=
2436 		    qtd_dma_offs) {
2437 			/*
2438 			 * tw_dma_offs always points to the starting offset
2439 			 * of a cookie
2440 			 */
2441 			tw->tw_dma_offs += tw->tw_cookie.dmac_size;
2442 			ddi_dma_nextcookie(tw->tw_dmahandle, &tw->tw_cookie);
2443 			tw->tw_cookie_idx++;
2444 			ASSERT(tw->tw_cookie_idx < tw->tw_ncookies);
2445 		}
2446 
2447 		/*
2448 		 * Counting the remained buffer length to be filled in
2449 		 * the QTD for current DMA cookie
2450 		 */
2451 		rem_len = (tw->tw_dma_offs + tw->tw_cookie.dmac_size) -
2452 		    qtd_dma_offs;
2453 
2454 		/* Update the beginning of the buffer */
2455 		buf_addr = (qtd_dma_offs - tw->tw_dma_offs) +
2456 		    tw->tw_cookie.dmac_address;
2457 		ASSERT((buf_addr % EHCI_4K_ALIGN) == 0);
2458 		Set_QTD(qtd->qtd_buf[i], buf_addr);
2459 
2460 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2461 		    "ehci_fill_in_qtd: dmac_addr 0x%x dmac_size "
2462 		    "0x%lx idx %d", buf_addr, tw->tw_cookie.dmac_size,
2463 		    tw->tw_cookie_idx);
2464 
2465 		if (buf_len <= EHCI_MAX_QTD_BUF_SIZE) {
2466 			ASSERT(buf_len <= rem_len);
2467 			break;
2468 		} else {
2469 			ASSERT(rem_len >= EHCI_MAX_QTD_BUF_SIZE);
2470 			buf_len -= EHCI_MAX_QTD_BUF_SIZE;
2471 			qtd_dma_offs += EHCI_MAX_QTD_BUF_SIZE;
2472 		}
2473 
2474 		i++;
2475 	}
2476 
2477 	/*
2478 	 * Setup the alternate next qTD pointer if appropriate.  The alternate
2479 	 * qtd is currently pointing to a QTD that is not yet linked, but will
2480 	 * be in the very near future.	If a short_xfer occurs in this
2481 	 * situation , the HC will automatically skip this QH.	Eventually
2482 	 * everything will be placed and the alternate_qtd will be valid QTD.
2483 	 * For more information on alternate qtds look at section 3.5.2 in the
2484 	 * EHCI spec.
2485 	 */
2486 	if (tw->tw_alt_qtd != NULL) {
2487 		Set_QTD(qtd->qtd_alt_next_qtd,
2488 		    (ehci_qtd_cpu_to_iommu(ehcip, tw->tw_alt_qtd) &
2489 		    EHCI_QTD_ALT_NEXT_QTD_PTR));
2490 	}
2491 
2492 	/*
2493 	 * For control, bulk and interrupt QTD, now
2494 	 * enable current QTD by setting active bit.
2495 	 */
2496 	Set_QTD(qtd->qtd_ctrl, (ctrl | EHCI_QTD_CTRL_ACTIVE_XACT));
2497 
2498 	/*
2499 	 * For Control Xfer, qtd_ctrl_phase is a valid filed.
2500 	 */
2501 	if (qtd_ctrl_phase) {
2502 		Set_QTD(qtd->qtd_ctrl_phase, qtd_ctrl_phase);
2503 	}
2504 
2505 	/* Set the transfer wrapper */
2506 	ASSERT(tw != NULL);
2507 	ASSERT(tw->tw_id != NULL);
2508 
2509 	Set_QTD(qtd->qtd_trans_wrapper, (uint32_t)tw->tw_id);
2510 }
2511 
2512 
2513 /*
2514  * ehci_insert_qtd_on_tw:
2515  *
2516  * The transfer wrapper keeps a list of all Transfer Descriptors (QTD) that
2517  * are allocated for this transfer. Insert a QTD  onto this list. The  list
2518  * of QTD's does not include the dummy QTD that is at the end of the list of
2519  * QTD's for the endpoint.
2520  */
2521 static void
2522 ehci_insert_qtd_on_tw(
2523 	ehci_state_t		*ehcip,
2524 	ehci_trans_wrapper_t	*tw,
2525 	ehci_qtd_t		*qtd)
2526 {
2527 	/*
2528 	 * Set the next pointer to NULL because
2529 	 * this is the last QTD on list.
2530 	 */
2531 	Set_QTD(qtd->qtd_tw_next_qtd, NULL);
2532 
2533 	if (tw->tw_qtd_head == NULL) {
2534 		ASSERT(tw->tw_qtd_tail == NULL);
2535 		tw->tw_qtd_head = qtd;
2536 		tw->tw_qtd_tail = qtd;
2537 	} else {
2538 		ehci_qtd_t *dummy = (ehci_qtd_t *)tw->tw_qtd_tail;
2539 
2540 		ASSERT(dummy != NULL);
2541 		ASSERT(dummy != qtd);
2542 		ASSERT(Get_QTD(qtd->qtd_state) != EHCI_QTD_DUMMY);
2543 
2544 		/* Add the qtd to the end of the list */
2545 		Set_QTD(dummy->qtd_tw_next_qtd,
2546 		    ehci_qtd_cpu_to_iommu(ehcip, qtd));
2547 
2548 		tw->tw_qtd_tail = qtd;
2549 
2550 		ASSERT(Get_QTD(qtd->qtd_tw_next_qtd) == NULL);
2551 	}
2552 }
2553 
2554 
2555 /*
2556  * ehci_insert_qtd_into_active_qtd_list:
2557  *
2558  * Insert current QTD into active QTD list.
2559  */
2560 static void
2561 ehci_insert_qtd_into_active_qtd_list(
2562 	ehci_state_t		*ehcip,
2563 	ehci_qtd_t		*qtd)
2564 {
2565 	ehci_qtd_t		*curr_qtd, *next_qtd;
2566 
2567 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2568 
2569 	curr_qtd = ehcip->ehci_active_qtd_list;
2570 
2571 	/* Insert this QTD into QTD Active List */
2572 	if (curr_qtd) {
2573 		next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2574 		    Get_QTD(curr_qtd->qtd_active_qtd_next));
2575 
2576 		while (next_qtd) {
2577 			curr_qtd = next_qtd;
2578 			next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2579 			    Get_QTD(curr_qtd->qtd_active_qtd_next));
2580 		}
2581 
2582 		Set_QTD(qtd->qtd_active_qtd_prev,
2583 		    ehci_qtd_cpu_to_iommu(ehcip, curr_qtd));
2584 
2585 		Set_QTD(curr_qtd->qtd_active_qtd_next,
2586 		    ehci_qtd_cpu_to_iommu(ehcip, qtd));
2587 	} else {
2588 		ehcip->ehci_active_qtd_list = qtd;
2589 		Set_QTD(qtd->qtd_active_qtd_next, NULL);
2590 		Set_QTD(qtd->qtd_active_qtd_prev, NULL);
2591 	}
2592 }
2593 
2594 
2595 /*
2596  * ehci_remove_qtd_from_active_qtd_list:
2597  *
2598  * Remove current QTD from the active QTD list.
2599  *
2600  * NOTE: This function is also called from POLLED MODE.
2601  */
2602 void
2603 ehci_remove_qtd_from_active_qtd_list(
2604 	ehci_state_t		*ehcip,
2605 	ehci_qtd_t		*qtd)
2606 {
2607 	ehci_qtd_t		*curr_qtd, *prev_qtd, *next_qtd;
2608 
2609 	ASSERT(qtd != NULL);
2610 
2611 	curr_qtd = ehcip->ehci_active_qtd_list;
2612 
2613 	while ((curr_qtd) && (curr_qtd != qtd)) {
2614 		curr_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2615 		    Get_QTD(curr_qtd->qtd_active_qtd_next));
2616 	}
2617 
2618 	if ((curr_qtd) && (curr_qtd == qtd)) {
2619 		prev_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2620 		    Get_QTD(curr_qtd->qtd_active_qtd_prev));
2621 		next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2622 		    Get_QTD(curr_qtd->qtd_active_qtd_next));
2623 
2624 		if (prev_qtd) {
2625 			Set_QTD(prev_qtd->qtd_active_qtd_next,
2626 			    Get_QTD(curr_qtd->qtd_active_qtd_next));
2627 		} else {
2628 			ehcip->ehci_active_qtd_list = next_qtd;
2629 		}
2630 
2631 		if (next_qtd) {
2632 			Set_QTD(next_qtd->qtd_active_qtd_prev,
2633 			    Get_QTD(curr_qtd->qtd_active_qtd_prev));
2634 		}
2635 	} else {
2636 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2637 		    "ehci_remove_qtd_from_active_qtd_list: "
2638 		    "Unable to find QTD in active_qtd_list");
2639 	}
2640 }
2641 
2642 
2643 /*
2644  * ehci_traverse_qtds:
2645  *
2646  * Traverse the list of QTDs for given pipe using transfer wrapper.  Since
2647  * the endpoint is marked as Halted, the Host Controller (HC) is no longer
2648  * accessing these QTDs. Remove all the QTDs that are attached to endpoint.
2649  */
2650 static void
2651 ehci_traverse_qtds(
2652 	ehci_state_t		*ehcip,
2653 	usba_pipe_handle_data_t	*ph)
2654 {
2655 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2656 	ehci_trans_wrapper_t	*next_tw;
2657 	ehci_qtd_t		*qtd;
2658 	ehci_qtd_t		*next_qtd;
2659 
2660 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2661 
2662 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2663 	    "ehci_traverse_qtds:");
2664 
2665 	/* Process the transfer wrappers for this pipe */
2666 	next_tw = pp->pp_tw_head;
2667 
2668 	while (next_tw) {
2669 		/* Stop the the transfer timer */
2670 		ehci_stop_xfer_timer(ehcip, next_tw, EHCI_REMOVE_XFER_ALWAYS);
2671 
2672 		qtd = (ehci_qtd_t *)next_tw->tw_qtd_head;
2673 
2674 		/* Walk through each QTD for this transfer wrapper */
2675 		while (qtd) {
2676 			/* Remove this QTD from active QTD list */
2677 			ehci_remove_qtd_from_active_qtd_list(ehcip, qtd);
2678 
2679 			next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2680 			    Get_QTD(qtd->qtd_tw_next_qtd));
2681 
2682 			/* Deallocate this QTD */
2683 			ehci_deallocate_qtd(ehcip, qtd);
2684 
2685 			qtd = next_qtd;
2686 		}
2687 
2688 		next_tw = next_tw->tw_next;
2689 	}
2690 
2691 	/* Clear current qtd pointer */
2692 	Set_QH(pp->pp_qh->qh_curr_qtd, (uint32_t)0x00000000);
2693 
2694 	/* Update the next qtd pointer in the QH */
2695 	Set_QH(pp->pp_qh->qh_next_qtd, Get_QH(pp->pp_qh->qh_dummy_qtd));
2696 }
2697 
2698 
2699 /*
2700  * ehci_deallocate_qtd:
2701  *
2702  * Deallocate a Host Controller's (HC) Transfer Descriptor (QTD).
2703  *
2704  * NOTE: This function is also called from POLLED MODE.
2705  */
2706 void
2707 ehci_deallocate_qtd(
2708 	ehci_state_t		*ehcip,
2709 	ehci_qtd_t		*old_qtd)
2710 {
2711 	ehci_trans_wrapper_t	*tw = NULL;
2712 
2713 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2714 	    "ehci_deallocate_qtd: old_qtd = 0x%p", (void *)old_qtd);
2715 
2716 	/*
2717 	 * Obtain the transaction wrapper and tw will be
2718 	 * NULL for the dummy QTDs.
2719 	 */
2720 	if (Get_QTD(old_qtd->qtd_state) != EHCI_QTD_DUMMY) {
2721 		tw = (ehci_trans_wrapper_t *)
2722 		    EHCI_LOOKUP_ID((uint32_t)
2723 		    Get_QTD(old_qtd->qtd_trans_wrapper));
2724 
2725 		ASSERT(tw != NULL);
2726 	}
2727 
2728 	/*
2729 	 * If QTD's transfer wrapper is NULL, don't access its TW.
2730 	 * Just free the QTD.
2731 	 */
2732 	if (tw) {
2733 		ehci_qtd_t	*qtd, *next_qtd;
2734 
2735 		qtd = tw->tw_qtd_head;
2736 
2737 		if (old_qtd != qtd) {
2738 			next_qtd = ehci_qtd_iommu_to_cpu(
2739 			    ehcip, Get_QTD(qtd->qtd_tw_next_qtd));
2740 
2741 			while (next_qtd != old_qtd) {
2742 				qtd = next_qtd;
2743 				next_qtd = ehci_qtd_iommu_to_cpu(
2744 				    ehcip, Get_QTD(qtd->qtd_tw_next_qtd));
2745 			}
2746 
2747 			Set_QTD(qtd->qtd_tw_next_qtd, old_qtd->qtd_tw_next_qtd);
2748 
2749 			if (qtd->qtd_tw_next_qtd == NULL) {
2750 				tw->tw_qtd_tail = qtd;
2751 			}
2752 		} else {
2753 			tw->tw_qtd_head = ehci_qtd_iommu_to_cpu(
2754 			    ehcip, Get_QTD(old_qtd->qtd_tw_next_qtd));
2755 
2756 			if (tw->tw_qtd_head == NULL) {
2757 				tw->tw_qtd_tail = NULL;
2758 			}
2759 		}
2760 	}
2761 
2762 	bzero((void *)old_qtd, sizeof (ehci_qtd_t));
2763 	Set_QTD(old_qtd->qtd_state, EHCI_QTD_FREE);
2764 
2765 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2766 	    "Dealloc_qtd: qtd 0x%p", (void *)old_qtd);
2767 }
2768 
2769 
2770 /*
2771  * ehci_qtd_cpu_to_iommu:
2772  *
2773  * This function converts for the given Transfer Descriptor (QTD) CPU address
2774  * to IO address.
2775  *
2776  * NOTE: This function is also called from POLLED MODE.
2777  */
2778 uint32_t
2779 ehci_qtd_cpu_to_iommu(
2780 	ehci_state_t	*ehcip,
2781 	ehci_qtd_t	*addr)
2782 {
2783 	uint32_t	td;
2784 
2785 	td  = (uint32_t)ehcip->ehci_qtd_pool_cookie.dmac_address +
2786 	    (uint32_t)((uintptr_t)addr -
2787 	    (uintptr_t)(ehcip->ehci_qtd_pool_addr));
2788 
2789 	ASSERT((ehcip->ehci_qtd_pool_cookie.dmac_address +
2790 	    (uint32_t) (sizeof (ehci_qtd_t) *
2791 	    (addr - ehcip->ehci_qtd_pool_addr))) ==
2792 	    (ehcip->ehci_qtd_pool_cookie.dmac_address +
2793 	    (uint32_t)((uintptr_t)addr - (uintptr_t)
2794 	    (ehcip->ehci_qtd_pool_addr))));
2795 
2796 	ASSERT(td >= ehcip->ehci_qtd_pool_cookie.dmac_address);
2797 	ASSERT(td <= ehcip->ehci_qtd_pool_cookie.dmac_address +
2798 	    sizeof (ehci_qtd_t) * ehci_qtd_pool_size);
2799 
2800 	return (td);
2801 }
2802 
2803 
2804 /*
2805  * ehci_qtd_iommu_to_cpu:
2806  *
2807  * This function converts for the given Transfer Descriptor (QTD) IO address
2808  * to CPU address.
2809  *
2810  * NOTE: This function is also called from POLLED MODE.
2811  */
2812 ehci_qtd_t *
2813 ehci_qtd_iommu_to_cpu(
2814 	ehci_state_t	*ehcip,
2815 	uintptr_t	addr)
2816 {
2817 	ehci_qtd_t	*qtd;
2818 
2819 	if (addr == NULL) {
2820 
2821 		return (NULL);
2822 	}
2823 
2824 	qtd = (ehci_qtd_t *)((uintptr_t)
2825 	    (addr - ehcip->ehci_qtd_pool_cookie.dmac_address) +
2826 	    (uintptr_t)ehcip->ehci_qtd_pool_addr);
2827 
2828 	ASSERT(qtd >= ehcip->ehci_qtd_pool_addr);
2829 	ASSERT((uintptr_t)qtd <= (uintptr_t)ehcip->ehci_qtd_pool_addr +
2830 	    (uintptr_t)(sizeof (ehci_qtd_t) * ehci_qtd_pool_size));
2831 
2832 	return (qtd);
2833 }
2834 
2835 /*
2836  * ehci_allocate_tds_for_tw_resources:
2837  *
2838  * Allocate n Transfer Descriptors (TD) from the TD buffer pool and places it
2839  * into the TW.  Also chooses the correct alternate qtd when required.	It is
2840  * used for hardware short transfer support.  For more information on
2841  * alternate qtds look at section 3.5.2 in the EHCI spec.
2842  * Here is how each alternate qtd's are used:
2843  *
2844  * Bulk: used fully.
2845  * Intr: xfers only require 1 QTD, so alternate qtds are never used.
2846  * Ctrl: Should not use alternate QTD
2847  * Isoch: Doesn't support short_xfer nor does it use QTD
2848  *
2849  * Returns USB_NO_RESOURCES if it was not able to allocate all the requested TD
2850  * otherwise USB_SUCCESS.
2851  */
2852 int
2853 ehci_allocate_tds_for_tw(
2854 	ehci_state_t		*ehcip,
2855 	ehci_pipe_private_t	*pp,
2856 	ehci_trans_wrapper_t	*tw,
2857 	size_t			qtd_count)
2858 {
2859 	usb_ep_descr_t		*eptd = &pp->pp_pipe_handle->p_ep;
2860 	uchar_t			attributes;
2861 	ehci_qtd_t		*qtd;
2862 	uint32_t		qtd_addr;
2863 	int			i;
2864 	int			error = USB_SUCCESS;
2865 
2866 	attributes = eptd->bmAttributes & USB_EP_ATTR_MASK;
2867 
2868 	for (i = 0; i < qtd_count; i += 1) {
2869 		qtd = ehci_allocate_qtd_from_pool(ehcip);
2870 		if (qtd == NULL) {
2871 			error = USB_NO_RESOURCES;
2872 			USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2873 			    "ehci_allocate_qtds_for_tw: "
2874 			    "Unable to allocate %lu QTDs",
2875 			    qtd_count);
2876 			break;
2877 		}
2878 		if (i > 0) {
2879 			qtd_addr = ehci_qtd_cpu_to_iommu(ehcip,
2880 			    tw->tw_qtd_free_list);
2881 			Set_QTD(qtd->qtd_tw_next_qtd, qtd_addr);
2882 		}
2883 		tw->tw_qtd_free_list = qtd;
2884 
2885 		/*
2886 		 * Save the second one as a pointer to the new dummy 1.
2887 		 * It is used later for the alt_qtd_ptr.  Xfers with only
2888 		 * one qtd do not need alt_qtd_ptr.
2889 		 * The tds's are allocated and put into a stack, that is
2890 		 * why the second qtd allocated will turn out to be the
2891 		 * new dummy 1.
2892 		 */
2893 		if ((i == 1) && (attributes == USB_EP_ATTR_BULK)) {
2894 			tw->tw_alt_qtd = qtd;
2895 		}
2896 	}
2897 
2898 	return (error);
2899 }
2900 
2901 /*
2902  * ehci_allocate_tw_resources:
2903  *
2904  * Allocate a Transaction Wrapper (TW) and n Transfer Descriptors (QTD)
2905  * from the QTD buffer pool and places it into the TW.	It does an all
2906  * or nothing transaction.
2907  *
2908  * Returns NULL if there is insufficient resources otherwise TW.
2909  */
2910 static ehci_trans_wrapper_t *
2911 ehci_allocate_tw_resources(
2912 	ehci_state_t		*ehcip,
2913 	ehci_pipe_private_t	*pp,
2914 	size_t			tw_length,
2915 	usb_flags_t		usb_flags,
2916 	size_t			qtd_count)
2917 {
2918 	ehci_trans_wrapper_t	*tw;
2919 
2920 	tw = ehci_create_transfer_wrapper(ehcip, pp, tw_length, usb_flags);
2921 
2922 	if (tw == NULL) {
2923 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2924 		    "ehci_allocate_tw_resources: Unable to allocate TW");
2925 	} else {
2926 		if (ehci_allocate_tds_for_tw(ehcip, pp, tw, qtd_count) ==
2927 		    USB_SUCCESS) {
2928 			tw->tw_num_qtds = (uint_t)qtd_count;
2929 		} else {
2930 			ehci_deallocate_tw(ehcip, pp, tw);
2931 			tw = NULL;
2932 		}
2933 	}
2934 
2935 	return (tw);
2936 }
2937 
2938 
2939 /*
2940  * ehci_free_tw_td_resources:
2941  *
2942  * Free all allocated resources for Transaction Wrapper (TW).
2943  * Does not free the TW itself.
2944  *
2945  * Returns NULL if there is insufficient resources otherwise TW.
2946  */
2947 static void
2948 ehci_free_tw_td_resources(
2949 	ehci_state_t		*ehcip,
2950 	ehci_trans_wrapper_t	*tw)
2951 {
2952 	ehci_qtd_t		*qtd = NULL;
2953 	ehci_qtd_t		*temp_qtd = NULL;
2954 
2955 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2956 	    "ehci_free_tw_td_resources: tw = 0x%p", (void *)tw);
2957 
2958 	qtd = tw->tw_qtd_free_list;
2959 	while (qtd != NULL) {
2960 		/* Save the pointer to the next qtd before destroying it */
2961 		temp_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2962 		    Get_QTD(qtd->qtd_tw_next_qtd));
2963 		ehci_deallocate_qtd(ehcip, qtd);
2964 		qtd = temp_qtd;
2965 	}
2966 	tw->tw_qtd_free_list = NULL;
2967 }
2968 
2969 /*
2970  * Transfer Wrapper functions
2971  *
2972  * ehci_create_transfer_wrapper:
2973  *
2974  * Create a Transaction Wrapper (TW) and this involves the allocating of DMA
2975  * resources.
2976  */
2977 static ehci_trans_wrapper_t *
2978 ehci_create_transfer_wrapper(
2979 	ehci_state_t		*ehcip,
2980 	ehci_pipe_private_t	*pp,
2981 	size_t			length,
2982 	uint_t			usb_flags)
2983 {
2984 	ddi_device_acc_attr_t	dev_attr;
2985 	ddi_dma_attr_t		dma_attr;
2986 	int			result;
2987 	size_t			real_length;
2988 	ehci_trans_wrapper_t	*tw;
2989 	int			kmem_flag;
2990 	int			(*dmamem_wait)(caddr_t);
2991 	usb_ep_descr_t		*eptd = &pp->pp_pipe_handle->p_ep;
2992 
2993 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2994 	    "ehci_create_transfer_wrapper: length = 0x%lx flags = 0x%x",
2995 	    length, usb_flags);
2996 
2997 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2998 
2999 	/* SLEEP flag should not be used while holding mutex */
3000 	kmem_flag = KM_NOSLEEP;
3001 	dmamem_wait = DDI_DMA_DONTWAIT;
3002 
3003 	/* Allocate space for the transfer wrapper */
3004 	tw = kmem_zalloc(sizeof (ehci_trans_wrapper_t), kmem_flag);
3005 
3006 	if (tw == NULL) {
3007 		USB_DPRINTF_L2(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3008 		    "ehci_create_transfer_wrapper: kmem_zalloc failed");
3009 
3010 		return (NULL);
3011 	}
3012 
3013 	/* zero-length packet doesn't need to allocate dma memory */
3014 	if (length == 0) {
3015 
3016 		goto dmadone;
3017 	}
3018 
3019 	/* allow sg lists for transfer wrapper dma memory */
3020 	bcopy(&ehcip->ehci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t));
3021 	dma_attr.dma_attr_sgllen = EHCI_DMA_ATTR_TW_SGLLEN;
3022 	dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
3023 
3024 	/* Allocate the DMA handle */
3025 	result = ddi_dma_alloc_handle(ehcip->ehci_dip,
3026 	    &dma_attr, dmamem_wait, 0, &tw->tw_dmahandle);
3027 
3028 	if (result != DDI_SUCCESS) {
3029 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3030 		    "ehci_create_transfer_wrapper: Alloc handle failed");
3031 
3032 		kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3033 
3034 		return (NULL);
3035 	}
3036 
3037 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
3038 
3039 	/* no need for swapping the raw data */
3040 	dev_attr.devacc_attr_endian_flags  = DDI_NEVERSWAP_ACC;
3041 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
3042 
3043 	/* Allocate the memory */
3044 	result = ddi_dma_mem_alloc(tw->tw_dmahandle, length,
3045 	    &dev_attr, DDI_DMA_CONSISTENT, dmamem_wait, NULL,
3046 	    (caddr_t *)&tw->tw_buf, &real_length, &tw->tw_accesshandle);
3047 
3048 	if (result != DDI_SUCCESS) {
3049 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3050 		    "ehci_create_transfer_wrapper: dma_mem_alloc fail");
3051 
3052 		ddi_dma_free_handle(&tw->tw_dmahandle);
3053 		kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3054 
3055 		return (NULL);
3056 	}
3057 
3058 	ASSERT(real_length >= length);
3059 
3060 	/* Bind the handle */
3061 	result = ddi_dma_addr_bind_handle(tw->tw_dmahandle, NULL,
3062 	    (caddr_t)tw->tw_buf, real_length, DDI_DMA_RDWR|DDI_DMA_CONSISTENT,
3063 	    dmamem_wait, NULL, &tw->tw_cookie, &tw->tw_ncookies);
3064 
3065 	if (result != DDI_DMA_MAPPED) {
3066 		ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
3067 
3068 		ddi_dma_mem_free(&tw->tw_accesshandle);
3069 		ddi_dma_free_handle(&tw->tw_dmahandle);
3070 		kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3071 
3072 		return (NULL);
3073 	}
3074 
3075 	tw->tw_cookie_idx = 0;
3076 	tw->tw_dma_offs = 0;
3077 
3078 dmadone:
3079 	/*
3080 	 * Only allow one wrapper to be added at a time. Insert the
3081 	 * new transaction wrapper into the list for this pipe.
3082 	 */
3083 	if (pp->pp_tw_head == NULL) {
3084 		pp->pp_tw_head = tw;
3085 		pp->pp_tw_tail = tw;
3086 	} else {
3087 		pp->pp_tw_tail->tw_next = tw;
3088 		pp->pp_tw_tail = tw;
3089 	}
3090 
3091 	/* Store the transfer length */
3092 	tw->tw_length = length;
3093 
3094 	/* Store a back pointer to the pipe private structure */
3095 	tw->tw_pipe_private = pp;
3096 
3097 	/* Store the transfer type - synchronous or asynchronous */
3098 	tw->tw_flags = usb_flags;
3099 
3100 	/* Get and Store 32bit ID */
3101 	tw->tw_id = EHCI_GET_ID((void *)tw);
3102 
3103 	ASSERT(tw->tw_id != NULL);
3104 
3105 	/* isoc ep will not come here */
3106 	if (EHCI_INTR_ENDPOINT(eptd)) {
3107 		ehcip->ehci_periodic_req_count++;
3108 	} else {
3109 		ehcip->ehci_async_req_count++;
3110 	}
3111 	ehci_toggle_scheduler(ehcip);
3112 
3113 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3114 	    "ehci_create_transfer_wrapper: tw = 0x%p, ncookies = %u",
3115 	    (void *)tw, tw->tw_ncookies);
3116 
3117 	return (tw);
3118 }
3119 
3120 
3121 /*
3122  * ehci_start_xfer_timer:
3123  *
3124  * Start the timer for the control, bulk and for one time interrupt
3125  * transfers.
3126  */
3127 /* ARGSUSED */
3128 static void
3129 ehci_start_xfer_timer(
3130 	ehci_state_t		*ehcip,
3131 	ehci_pipe_private_t	*pp,
3132 	ehci_trans_wrapper_t	*tw)
3133 {
3134 	USB_DPRINTF_L3(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3135 	    "ehci_start_xfer_timer: tw = 0x%p", (void *)tw);
3136 
3137 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3138 
3139 	/*
3140 	 * The timeout handling is done only for control, bulk and for
3141 	 * one time Interrupt transfers.
3142 	 *
3143 	 * NOTE: If timeout is zero; Assume infinite timeout and don't
3144 	 * insert this transfer on the timeout list.
3145 	 */
3146 	if (tw->tw_timeout) {
3147 		/*
3148 		 * Add this transfer wrapper to the head of the pipe's
3149 		 * tw timeout list.
3150 		 */
3151 		if (pp->pp_timeout_list) {
3152 			tw->tw_timeout_next = pp->pp_timeout_list;
3153 		}
3154 
3155 		pp->pp_timeout_list = tw;
3156 		ehci_start_timer(ehcip, pp);
3157 	}
3158 }
3159 
3160 
3161 /*
3162  * ehci_stop_xfer_timer:
3163  *
3164  * Start the timer for the control, bulk and for one time interrupt
3165  * transfers.
3166  */
3167 void
3168 ehci_stop_xfer_timer(
3169 	ehci_state_t		*ehcip,
3170 	ehci_trans_wrapper_t	*tw,
3171 	uint_t			flag)
3172 {
3173 	ehci_pipe_private_t	*pp;
3174 	timeout_id_t		timer_id;
3175 
3176 	USB_DPRINTF_L3(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3177 	    "ehci_stop_xfer_timer: tw = 0x%p", (void *)tw);
3178 
3179 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3180 
3181 	/* Obtain the pipe private structure */
3182 	pp = tw->tw_pipe_private;
3183 
3184 	/* check if the timeout tw list is empty */
3185 	if (pp->pp_timeout_list == NULL) {
3186 
3187 		return;
3188 	}
3189 
3190 	switch (flag) {
3191 	case EHCI_REMOVE_XFER_IFLAST:
3192 		if (tw->tw_qtd_head != tw->tw_qtd_tail) {
3193 			break;
3194 		}
3195 
3196 		/* FALLTHRU */
3197 	case EHCI_REMOVE_XFER_ALWAYS:
3198 		ehci_remove_tw_from_timeout_list(ehcip, tw);
3199 
3200 		if ((pp->pp_timeout_list == NULL) &&
3201 		    (pp->pp_timer_id)) {
3202 
3203 			timer_id = pp->pp_timer_id;
3204 
3205 			/* Reset the timer id to zero */
3206 			pp->pp_timer_id = 0;
3207 
3208 			mutex_exit(&ehcip->ehci_int_mutex);
3209 
3210 			(void) untimeout(timer_id);
3211 
3212 			mutex_enter(&ehcip->ehci_int_mutex);
3213 		}
3214 		break;
3215 	default:
3216 		break;
3217 	}
3218 }
3219 
3220 
3221 /*
3222  * ehci_xfer_timeout_handler:
3223  *
3224  * Control or bulk transfer timeout handler.
3225  */
3226 static void
3227 ehci_xfer_timeout_handler(void *arg)
3228 {
3229 	usba_pipe_handle_data_t	*ph = (usba_pipe_handle_data_t *)arg;
3230 	ehci_state_t		*ehcip = ehci_obtain_state(
3231 	    ph->p_usba_device->usb_root_hub_dip);
3232 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3233 	ehci_trans_wrapper_t	*tw, *next;
3234 	ehci_trans_wrapper_t	*expire_xfer_list = NULL;
3235 	ehci_qtd_t		*qtd;
3236 
3237 	USB_DPRINTF_L4(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3238 	    "ehci_xfer_timeout_handler: ehcip = 0x%p, ph = 0x%p",
3239 	    (void *)ehcip, (void *)ph);
3240 
3241 	mutex_enter(&ehcip->ehci_int_mutex);
3242 
3243 	/*
3244 	 * Check whether still timeout handler is valid.
3245 	 */
3246 	if (pp->pp_timer_id != 0) {
3247 
3248 		/* Reset the timer id to zero */
3249 		pp->pp_timer_id = 0;
3250 	} else {
3251 		mutex_exit(&ehcip->ehci_int_mutex);
3252 
3253 		return;
3254 	}
3255 
3256 	/* Get the transfer timeout list head */
3257 	tw = pp->pp_timeout_list;
3258 
3259 	while (tw) {
3260 
3261 		/* Get the transfer on the timeout list */
3262 		next = tw->tw_timeout_next;
3263 
3264 		tw->tw_timeout--;
3265 
3266 		if (tw->tw_timeout <= 0) {
3267 
3268 			/* remove the tw from the timeout list */
3269 			ehci_remove_tw_from_timeout_list(ehcip, tw);
3270 
3271 			/* remove QTDs from active QTD list */
3272 			qtd = tw->tw_qtd_head;
3273 			while (qtd) {
3274 				ehci_remove_qtd_from_active_qtd_list(
3275 				    ehcip, qtd);
3276 
3277 				/* Get the next QTD from the wrapper */
3278 				qtd = ehci_qtd_iommu_to_cpu(ehcip,
3279 				    Get_QTD(qtd->qtd_tw_next_qtd));
3280 			}
3281 
3282 			/*
3283 			 * Preserve the order to the requests
3284 			 * started time sequence.
3285 			 */
3286 			tw->tw_timeout_next = expire_xfer_list;
3287 			expire_xfer_list = tw;
3288 		}
3289 
3290 		tw = next;
3291 	}
3292 
3293 	/*
3294 	 * The timer should be started before the callbacks.
3295 	 * There is always a chance that ehci interrupts come
3296 	 * in when we release the mutex while calling the tw back.
3297 	 * To keep an accurate timeout it should be restarted
3298 	 * as soon as possible.
3299 	 */
3300 	ehci_start_timer(ehcip, pp);
3301 
3302 	/* Get the expired transfer timeout list head */
3303 	tw = expire_xfer_list;
3304 
3305 	while (tw) {
3306 
3307 		/* Get the next tw on the expired transfer timeout list */
3308 		next = tw->tw_timeout_next;
3309 
3310 		/*
3311 		 * The error handle routine will release the mutex when
3312 		 * calling back to USBA. But this will not cause any race.
3313 		 * We do the callback and are relying on ehci_pipe_cleanup()
3314 		 * to halt the queue head and clean up since we should not
3315 		 * block in timeout context.
3316 		 */
3317 		ehci_handle_error(ehcip, tw->tw_qtd_head, USB_CR_TIMEOUT);
3318 
3319 		tw = next;
3320 	}
3321 	mutex_exit(&ehcip->ehci_int_mutex);
3322 }
3323 
3324 
3325 /*
3326  * ehci_remove_tw_from_timeout_list:
3327  *
3328  * Remove Control or bulk transfer from the timeout list.
3329  */
3330 static void
3331 ehci_remove_tw_from_timeout_list(
3332 	ehci_state_t		*ehcip,
3333 	ehci_trans_wrapper_t	*tw)
3334 {
3335 	ehci_pipe_private_t	*pp;
3336 	ehci_trans_wrapper_t	*prev, *next;
3337 
3338 	USB_DPRINTF_L3(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3339 	    "ehci_remove_tw_from_timeout_list: tw = 0x%p", (void *)tw);
3340 
3341 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3342 
3343 	/* Obtain the pipe private structure */
3344 	pp = tw->tw_pipe_private;
3345 
3346 	if (pp->pp_timeout_list) {
3347 		if (pp->pp_timeout_list == tw) {
3348 			pp->pp_timeout_list = tw->tw_timeout_next;
3349 
3350 			tw->tw_timeout_next = NULL;
3351 		} else {
3352 			prev = pp->pp_timeout_list;
3353 			next = prev->tw_timeout_next;
3354 
3355 			while (next && (next != tw)) {
3356 				prev = next;
3357 				next = next->tw_timeout_next;
3358 			}
3359 
3360 			if (next == tw) {
3361 				prev->tw_timeout_next =
3362 				    next->tw_timeout_next;
3363 				tw->tw_timeout_next = NULL;
3364 			}
3365 		}
3366 	}
3367 }
3368 
3369 
3370 /*
3371  * ehci_start_timer:
3372  *
3373  * Start the pipe's timer
3374  */
3375 static void
3376 ehci_start_timer(
3377 	ehci_state_t		*ehcip,
3378 	ehci_pipe_private_t	*pp)
3379 {
3380 	USB_DPRINTF_L4(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3381 	    "ehci_start_timer: ehcip = 0x%p, pp = 0x%p",
3382 	    (void *)ehcip, (void *)pp);
3383 
3384 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3385 
3386 	/*
3387 	 * Start the pipe's timer only if currently timer is not
3388 	 * running and if there are any transfers on the timeout
3389 	 * list. This timer will be per pipe.
3390 	 */
3391 	if ((!pp->pp_timer_id) && (pp->pp_timeout_list)) {
3392 		pp->pp_timer_id = timeout(ehci_xfer_timeout_handler,
3393 		    (void *)(pp->pp_pipe_handle), drv_usectohz(1000000));
3394 	}
3395 }
3396 
3397 /*
3398  * ehci_deallocate_tw:
3399  *
3400  * Deallocate of a Transaction Wrapper (TW) and this involves the freeing of
3401  * of DMA resources.
3402  */
3403 void
3404 ehci_deallocate_tw(
3405 	ehci_state_t		*ehcip,
3406 	ehci_pipe_private_t	*pp,
3407 	ehci_trans_wrapper_t	*tw)
3408 {
3409 	ehci_trans_wrapper_t	*prev, *next;
3410 
3411 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3412 	    "ehci_deallocate_tw: tw = 0x%p", (void *)tw);
3413 
3414 	/*
3415 	 * If the transfer wrapper has no Host Controller (HC)
3416 	 * Transfer Descriptors (QTD) associated with it,  then
3417 	 * remove the transfer wrapper.
3418 	 */
3419 	if (tw->tw_qtd_head) {
3420 		ASSERT(tw->tw_qtd_tail != NULL);
3421 
3422 		return;
3423 	}
3424 
3425 	ASSERT(tw->tw_qtd_tail == NULL);
3426 
3427 	/* Make sure we return all the unused qtd's to the pool as well */
3428 	ehci_free_tw_td_resources(ehcip, tw);
3429 
3430 	/*
3431 	 * If pp->pp_tw_head and pp->pp_tw_tail are pointing to
3432 	 * given TW then set the head and  tail  equal to NULL.
3433 	 * Otherwise search for this TW in the linked TW's list
3434 	 * and then remove this TW from the list.
3435 	 */
3436 	if (pp->pp_tw_head == tw) {
3437 		if (pp->pp_tw_tail == tw) {
3438 			pp->pp_tw_head = NULL;
3439 			pp->pp_tw_tail = NULL;
3440 		} else {
3441 			pp->pp_tw_head = tw->tw_next;
3442 		}
3443 	} else {
3444 		prev = pp->pp_tw_head;
3445 		next = prev->tw_next;
3446 
3447 		while (next && (next != tw)) {
3448 			prev = next;
3449 			next = next->tw_next;
3450 		}
3451 
3452 		if (next == tw) {
3453 			prev->tw_next = next->tw_next;
3454 
3455 			if (pp->pp_tw_tail == tw) {
3456 				pp->pp_tw_tail = prev;
3457 			}
3458 		}
3459 	}
3460 
3461 	/*
3462 	 * Make sure that, this TW has been removed
3463 	 * from the timeout list.
3464 	 */
3465 	ehci_remove_tw_from_timeout_list(ehcip, tw);
3466 
3467 	/* Deallocate this TW */
3468 	ehci_free_tw(ehcip, pp, tw);
3469 }
3470 
3471 
3472 /*
3473  * ehci_free_dma_resources:
3474  *
3475  * Free dma resources of a Transfer Wrapper (TW) and also free the TW.
3476  *
3477  * NOTE: This function is also called from POLLED MODE.
3478  */
3479 void
3480 ehci_free_dma_resources(
3481 	ehci_state_t		*ehcip,
3482 	usba_pipe_handle_data_t	*ph)
3483 {
3484 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3485 	ehci_trans_wrapper_t	*head_tw = pp->pp_tw_head;
3486 	ehci_trans_wrapper_t	*next_tw, *tw;
3487 
3488 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3489 	    "ehci_free_dma_resources: ph = 0x%p", (void *)ph);
3490 
3491 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3492 
3493 	/* Process the Transfer Wrappers */
3494 	next_tw = head_tw;
3495 	while (next_tw) {
3496 		tw = next_tw;
3497 		next_tw = tw->tw_next;
3498 
3499 		USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3500 		    "ehci_free_dma_resources: Free TW = 0x%p", (void *)tw);
3501 
3502 		ehci_free_tw(ehcip, pp, tw);
3503 	}
3504 
3505 	/* Adjust the head and tail pointers */
3506 	pp->pp_tw_head = NULL;
3507 	pp->pp_tw_tail = NULL;
3508 }
3509 
3510 
3511 /*
3512  * ehci_free_tw:
3513  *
3514  * Free the Transfer Wrapper (TW).
3515  */
3516 /*ARGSUSED*/
3517 static void
3518 ehci_free_tw(
3519 	ehci_state_t		*ehcip,
3520 	ehci_pipe_private_t	*pp,
3521 	ehci_trans_wrapper_t	*tw)
3522 {
3523 	int	rval;
3524 	usb_ep_descr_t	*eptd = &pp->pp_pipe_handle->p_ep;
3525 
3526 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3527 	    "ehci_free_tw: tw = 0x%p", (void *)tw);
3528 
3529 	ASSERT(tw != NULL);
3530 	ASSERT(tw->tw_id != NULL);
3531 
3532 	/* Free 32bit ID */
3533 	EHCI_FREE_ID((uint32_t)tw->tw_id);
3534 
3535 	if (tw->tw_dmahandle != NULL) {
3536 		rval = ddi_dma_unbind_handle(tw->tw_dmahandle);
3537 		ASSERT(rval == DDI_SUCCESS);
3538 
3539 		ddi_dma_mem_free(&tw->tw_accesshandle);
3540 		ddi_dma_free_handle(&tw->tw_dmahandle);
3541 	}
3542 
3543 	/* interrupt ep will come to this point */
3544 	if (EHCI_INTR_ENDPOINT(eptd)) {
3545 		ehcip->ehci_periodic_req_count--;
3546 	} else {
3547 		ehcip->ehci_async_req_count--;
3548 	}
3549 	ehci_toggle_scheduler(ehcip);
3550 
3551 	/* Free transfer wrapper */
3552 	kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3553 }
3554 
3555 
3556 /*
3557  * Miscellaneous functions
3558  */
3559 
3560 /*
3561  * ehci_allocate_intr_in_resource
3562  *
3563  * Allocate interrupt request structure for the interrupt IN transfer.
3564  */
3565 /*ARGSUSED*/
3566 int
3567 ehci_allocate_intr_in_resource(
3568 	ehci_state_t		*ehcip,
3569 	ehci_pipe_private_t	*pp,
3570 	ehci_trans_wrapper_t	*tw,
3571 	usb_flags_t		flags)
3572 {
3573 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
3574 	usb_intr_req_t		*curr_intr_reqp;
3575 	usb_opaque_t		client_periodic_in_reqp;
3576 	size_t			length = 0;
3577 
3578 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3579 	    "ehci_allocate_intr_in_resource:"
3580 	    "pp = 0x%p tw = 0x%p flags = 0x%x", (void *)pp, (void *)tw, flags);
3581 
3582 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3583 	ASSERT(tw->tw_curr_xfer_reqp == NULL);
3584 
3585 	/* Get the client periodic in request pointer */
3586 	client_periodic_in_reqp = pp->pp_client_periodic_in_reqp;
3587 
3588 	/*
3589 	 * If it a periodic IN request and periodic request is NULL,
3590 	 * allocate corresponding usb periodic IN request for the
3591 	 * current periodic polling request and copy the information
3592 	 * from the saved periodic request structure.
3593 	 */
3594 	if (client_periodic_in_reqp) {
3595 
3596 		/* Get the interrupt transfer length */
3597 		length = ((usb_intr_req_t *)
3598 		    client_periodic_in_reqp)->intr_len;
3599 
3600 		curr_intr_reqp = usba_hcdi_dup_intr_req(ph->p_dip,
3601 		    (usb_intr_req_t *)client_periodic_in_reqp, length, flags);
3602 	} else {
3603 		curr_intr_reqp = usb_alloc_intr_req(ph->p_dip, length, flags);
3604 	}
3605 
3606 	if (curr_intr_reqp == NULL) {
3607 
3608 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3609 		    "ehci_allocate_intr_in_resource: Interrupt"
3610 		    "request structure allocation failed");
3611 
3612 		return (USB_NO_RESOURCES);
3613 	}
3614 
3615 	/* For polled mode */
3616 	if (client_periodic_in_reqp == NULL) {
3617 		curr_intr_reqp->intr_attributes = USB_ATTRS_SHORT_XFER_OK;
3618 		curr_intr_reqp->intr_len = ph->p_ep.wMaxPacketSize;
3619 	} else {
3620 		/* Check and save the timeout value */
3621 		tw->tw_timeout = (curr_intr_reqp->intr_attributes &
3622 		    USB_ATTRS_ONE_XFER) ? curr_intr_reqp->intr_timeout: 0;
3623 	}
3624 
3625 	tw->tw_curr_xfer_reqp = (usb_opaque_t)curr_intr_reqp;
3626 	tw->tw_length = curr_intr_reqp->intr_len;
3627 
3628 	mutex_enter(&ph->p_mutex);
3629 	ph->p_req_count++;
3630 	mutex_exit(&ph->p_mutex);
3631 
3632 	pp->pp_state = EHCI_PIPE_STATE_ACTIVE;
3633 
3634 	return (USB_SUCCESS);
3635 }
3636 
3637 /*
3638  * ehci_pipe_cleanup
3639  *
3640  * Cleanup ehci pipe.
3641  */
3642 void
3643 ehci_pipe_cleanup(
3644 	ehci_state_t		*ehcip,
3645 	usba_pipe_handle_data_t	*ph)
3646 {
3647 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3648 	uint_t			pipe_state = pp->pp_state;
3649 	usb_cr_t		completion_reason;
3650 	usb_ep_descr_t		*eptd = &ph->p_ep;
3651 
3652 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3653 	    "ehci_pipe_cleanup: ph = 0x%p", (void *)ph);
3654 
3655 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3656 
3657 	if (EHCI_ISOC_ENDPOINT(eptd)) {
3658 		ehci_isoc_pipe_cleanup(ehcip, ph);
3659 
3660 		return;
3661 	}
3662 
3663 	ASSERT(!servicing_interrupt());
3664 
3665 	/*
3666 	 * Set the QH's status to Halt condition.
3667 	 * If another thread is halting this function will automatically
3668 	 * wait. If a pipe close happens at this time
3669 	 * we will be in lots of trouble.
3670 	 * If we are in an interrupt thread, don't halt, because it may
3671 	 * do a wait_for_sof.
3672 	 */
3673 	ehci_modify_qh_status_bit(ehcip, pp, SET_HALT);
3674 
3675 	/*
3676 	 * Wait for processing all completed transfers and
3677 	 * to send results to upstream.
3678 	 */
3679 	ehci_wait_for_transfers_completion(ehcip, pp);
3680 
3681 	/* Save the data toggle information */
3682 	ehci_save_data_toggle(ehcip, ph);
3683 
3684 	/*
3685 	 * Traverse the list of QTDs for this pipe using transfer
3686 	 * wrapper. Process these QTDs depending on their status.
3687 	 * And stop the timer of this pipe.
3688 	 */
3689 	ehci_traverse_qtds(ehcip, ph);
3690 
3691 	/* Make sure the timer is not running */
3692 	ASSERT(pp->pp_timer_id == 0);
3693 
3694 	/* Do callbacks for all unfinished requests */
3695 	ehci_handle_outstanding_requests(ehcip, pp);
3696 
3697 	/* Free DMA resources */
3698 	ehci_free_dma_resources(ehcip, ph);
3699 
3700 	switch (pipe_state) {
3701 	case EHCI_PIPE_STATE_CLOSE:
3702 		completion_reason = USB_CR_PIPE_CLOSING;
3703 		break;
3704 	case EHCI_PIPE_STATE_RESET:
3705 	case EHCI_PIPE_STATE_STOP_POLLING:
3706 		/* Set completion reason */
3707 		completion_reason = (pipe_state ==
3708 		    EHCI_PIPE_STATE_RESET) ?
3709 		    USB_CR_PIPE_RESET: USB_CR_STOPPED_POLLING;
3710 
3711 		/* Restore the data toggle information */
3712 		ehci_restore_data_toggle(ehcip, ph);
3713 
3714 		/*
3715 		 * Clear the halt bit to restart all the
3716 		 * transactions on this pipe.
3717 		 */
3718 		ehci_modify_qh_status_bit(ehcip, pp, CLEAR_HALT);
3719 
3720 		/* Set pipe state to idle */
3721 		pp->pp_state = EHCI_PIPE_STATE_IDLE;
3722 
3723 		break;
3724 	}
3725 
3726 	/*
3727 	 * Do the callback for the original client
3728 	 * periodic IN request.
3729 	 */
3730 	if ((EHCI_PERIODIC_ENDPOINT(eptd)) &&
3731 	    ((ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK) ==
3732 	    USB_EP_DIR_IN)) {
3733 
3734 		ehci_do_client_periodic_in_req_callback(
3735 		    ehcip, pp, completion_reason);
3736 	}
3737 }
3738 
3739 
3740 /*
3741  * ehci_wait_for_transfers_completion:
3742  *
3743  * Wait for processing all completed transfers and to send results
3744  * to upstream.
3745  */
3746 static void
3747 ehci_wait_for_transfers_completion(
3748 	ehci_state_t		*ehcip,
3749 	ehci_pipe_private_t	*pp)
3750 {
3751 	ehci_trans_wrapper_t	*next_tw = pp->pp_tw_head;
3752 	ehci_qtd_t		*qtd;
3753 
3754 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3755 	    ehcip->ehci_log_hdl,
3756 	    "ehci_wait_for_transfers_completion: pp = 0x%p", (void *)pp);
3757 
3758 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3759 
3760 	if ((ehci_state_is_operational(ehcip)) != USB_SUCCESS) {
3761 
3762 		return;
3763 	}
3764 
3765 	pp->pp_count_done_qtds = 0;
3766 
3767 	/* Process the transfer wrappers for this pipe */
3768 	while (next_tw) {
3769 		qtd = (ehci_qtd_t *)next_tw->tw_qtd_head;
3770 
3771 		/*
3772 		 * Walk through each QTD for this transfer wrapper.
3773 		 * If a QTD still exists, then it is either on done
3774 		 * list or on the QH's list.
3775 		 */
3776 		while (qtd) {
3777 			if (!(Get_QTD(qtd->qtd_ctrl) &
3778 			    EHCI_QTD_CTRL_ACTIVE_XACT)) {
3779 				pp->pp_count_done_qtds++;
3780 			}
3781 
3782 			qtd = ehci_qtd_iommu_to_cpu(ehcip,
3783 			    Get_QTD(qtd->qtd_tw_next_qtd));
3784 		}
3785 
3786 		next_tw = next_tw->tw_next;
3787 	}
3788 
3789 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3790 	    "ehci_wait_for_transfers_completion: count_done_qtds = 0x%x",
3791 	    pp->pp_count_done_qtds);
3792 
3793 	if (!pp->pp_count_done_qtds) {
3794 
3795 		return;
3796 	}
3797 
3798 	(void) cv_reltimedwait(&pp->pp_xfer_cmpl_cv, &ehcip->ehci_int_mutex,
3799 	    drv_usectohz(EHCI_XFER_CMPL_TIMEWAIT * 1000000), TR_CLOCK_TICK);
3800 
3801 	if (pp->pp_count_done_qtds) {
3802 
3803 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3804 		    "ehci_wait_for_transfers_completion:"
3805 		    "No transfers completion confirmation received");
3806 	}
3807 }
3808 
3809 /*
3810  * ehci_check_for_transfers_completion:
3811  *
3812  * Check whether anybody is waiting for transfers completion event. If so, send
3813  * this event and also stop initiating any new transfers on this pipe.
3814  */
3815 void
3816 ehci_check_for_transfers_completion(
3817 	ehci_state_t		*ehcip,
3818 	ehci_pipe_private_t	*pp)
3819 {
3820 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3821 	    ehcip->ehci_log_hdl,
3822 	    "ehci_check_for_transfers_completion: pp = 0x%p", (void *)pp);
3823 
3824 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3825 
3826 	if ((pp->pp_state == EHCI_PIPE_STATE_STOP_POLLING) &&
3827 	    (pp->pp_error == USB_CR_NO_RESOURCES) &&
3828 	    (pp->pp_cur_periodic_req_cnt == 0)) {
3829 
3830 		/* Reset pipe error to zero */
3831 		pp->pp_error = 0;
3832 
3833 		/* Do callback for original request */
3834 		ehci_do_client_periodic_in_req_callback(
3835 		    ehcip, pp, USB_CR_NO_RESOURCES);
3836 	}
3837 
3838 	if (pp->pp_count_done_qtds) {
3839 
3840 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3841 		    "ehci_check_for_transfers_completion:"
3842 		    "count_done_qtds = 0x%x", pp->pp_count_done_qtds);
3843 
3844 		/* Decrement the done qtd count */
3845 		pp->pp_count_done_qtds--;
3846 
3847 		if (!pp->pp_count_done_qtds) {
3848 
3849 			USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3850 			    "ehci_check_for_transfers_completion:"
3851 			    "Sent transfers completion event pp = 0x%p",
3852 			    (void *)pp);
3853 
3854 			/* Send the transfer completion signal */
3855 			cv_signal(&pp->pp_xfer_cmpl_cv);
3856 		}
3857 	}
3858 }
3859 
3860 
3861 /*
3862  * ehci_save_data_toggle:
3863  *
3864  * Save the data toggle information.
3865  */
3866 static void
3867 ehci_save_data_toggle(
3868 	ehci_state_t		*ehcip,
3869 	usba_pipe_handle_data_t	*ph)
3870 {
3871 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3872 	usb_ep_descr_t		*eptd = &ph->p_ep;
3873 	uint_t			data_toggle;
3874 	usb_cr_t		error = pp->pp_error;
3875 	ehci_qh_t		*qh = pp->pp_qh;
3876 
3877 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3878 	    ehcip->ehci_log_hdl,
3879 	    "ehci_save_data_toggle: ph = 0x%p", (void *)ph);
3880 
3881 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3882 
3883 	/* Reset the pipe error value */
3884 	pp->pp_error = USB_CR_OK;
3885 
3886 	/* Return immediately if it is a control pipe */
3887 	if ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
3888 	    USB_EP_ATTR_CONTROL) {
3889 
3890 		return;
3891 	}
3892 
3893 	/* Get the data toggle information from the endpoint (QH) */
3894 	data_toggle = (Get_QH(qh->qh_status) &
3895 	    EHCI_QH_STS_DATA_TOGGLE)? DATA1:DATA0;
3896 
3897 	/*
3898 	 * If error is STALL, then, set
3899 	 * data toggle to zero.
3900 	 */
3901 	if (error == USB_CR_STALL) {
3902 		data_toggle = DATA0;
3903 	}
3904 
3905 	/*
3906 	 * Save the data toggle information
3907 	 * in the usb device structure.
3908 	 */
3909 	mutex_enter(&ph->p_mutex);
3910 	usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress,
3911 	    data_toggle);
3912 	mutex_exit(&ph->p_mutex);
3913 }
3914 
3915 
3916 /*
3917  * ehci_restore_data_toggle:
3918  *
3919  * Restore the data toggle information.
3920  */
3921 void
3922 ehci_restore_data_toggle(
3923 	ehci_state_t		*ehcip,
3924 	usba_pipe_handle_data_t	*ph)
3925 {
3926 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3927 	usb_ep_descr_t		*eptd = &ph->p_ep;
3928 	uint_t			data_toggle = 0;
3929 
3930 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3931 	    ehcip->ehci_log_hdl,
3932 	    "ehci_restore_data_toggle: ph = 0x%p", (void *)ph);
3933 
3934 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3935 
3936 	/* Return immediately if it is a control pipe */
3937 	if ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
3938 	    USB_EP_ATTR_CONTROL) {
3939 
3940 		return;
3941 	}
3942 
3943 	mutex_enter(&ph->p_mutex);
3944 
3945 	data_toggle = usba_hcdi_get_data_toggle(ph->p_usba_device,
3946 	    ph->p_ep.bEndpointAddress);
3947 	usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress,
3948 	    0);
3949 
3950 	mutex_exit(&ph->p_mutex);
3951 
3952 	/*
3953 	 * Restore the data toggle bit depending on the
3954 	 * previous data toggle information.
3955 	 */
3956 	if (data_toggle) {
3957 		Set_QH(pp->pp_qh->qh_status,
3958 		    Get_QH(pp->pp_qh->qh_status) | EHCI_QH_STS_DATA_TOGGLE);
3959 	} else {
3960 		Set_QH(pp->pp_qh->qh_status,
3961 		    Get_QH(pp->pp_qh->qh_status) & (~EHCI_QH_STS_DATA_TOGGLE));
3962 	}
3963 }
3964 
3965 
3966 /*
3967  * ehci_handle_outstanding_requests
3968  *
3969  * Deallocate interrupt request structure for the interrupt IN transfer.
3970  * Do the callbacks for all unfinished requests.
3971  *
3972  * NOTE: This function is also called from POLLED MODE.
3973  */
3974 void
3975 ehci_handle_outstanding_requests(
3976 	ehci_state_t		*ehcip,
3977 	ehci_pipe_private_t	*pp)
3978 {
3979 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
3980 	usb_ep_descr_t		*eptd = &ph->p_ep;
3981 	ehci_trans_wrapper_t	*curr_tw;
3982 	ehci_trans_wrapper_t	*next_tw;
3983 	usb_opaque_t		curr_xfer_reqp;
3984 
3985 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3986 	    ehcip->ehci_log_hdl,
3987 	    "ehci_handle_outstanding_requests: pp = 0x%p", (void *)pp);
3988 
3989 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3990 
3991 	/* Deallocate all pre-allocated interrupt requests */
3992 	next_tw = pp->pp_tw_head;
3993 
3994 	while (next_tw) {
3995 		curr_tw = next_tw;
3996 		next_tw = curr_tw->tw_next;
3997 
3998 		curr_xfer_reqp = curr_tw->tw_curr_xfer_reqp;
3999 
4000 		/* Deallocate current interrupt request */
4001 		if (curr_xfer_reqp) {
4002 
4003 			if ((EHCI_PERIODIC_ENDPOINT(eptd)) &&
4004 			    (curr_tw->tw_direction == EHCI_QTD_CTRL_IN_PID)) {
4005 
4006 				/* Decrement periodic in request count */
4007 				pp->pp_cur_periodic_req_cnt--;
4008 
4009 				ehci_deallocate_intr_in_resource(
4010 				    ehcip, pp, curr_tw);
4011 			} else {
4012 				ehci_hcdi_callback(ph, curr_tw, USB_CR_FLUSHED);
4013 			}
4014 		}
4015 	}
4016 }
4017 
4018 
4019 /*
4020  * ehci_deallocate_intr_in_resource
4021  *
4022  * Deallocate interrupt request structure for the interrupt IN transfer.
4023  */
4024 void
4025 ehci_deallocate_intr_in_resource(
4026 	ehci_state_t		*ehcip,
4027 	ehci_pipe_private_t	*pp,
4028 	ehci_trans_wrapper_t	*tw)
4029 {
4030 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
4031 	uchar_t			ep_attr = ph->p_ep.bmAttributes;
4032 	usb_opaque_t		curr_xfer_reqp;
4033 
4034 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
4035 	    ehcip->ehci_log_hdl,
4036 	    "ehci_deallocate_intr_in_resource: "
4037 	    "pp = 0x%p tw = 0x%p", (void *)pp, (void *)tw);
4038 
4039 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
4040 	ASSERT((ep_attr & USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR);
4041 
4042 	curr_xfer_reqp = tw->tw_curr_xfer_reqp;
4043 
4044 	/* Check the current periodic in request pointer */
4045 	if (curr_xfer_reqp) {
4046 
4047 		tw->tw_curr_xfer_reqp = NULL;
4048 
4049 		mutex_enter(&ph->p_mutex);
4050 		ph->p_req_count--;
4051 		mutex_exit(&ph->p_mutex);
4052 
4053 		/* Free pre-allocated interrupt requests */
4054 		usb_free_intr_req((usb_intr_req_t *)curr_xfer_reqp);
4055 
4056 		/* Set periodic in pipe state to idle */
4057 		pp->pp_state = EHCI_PIPE_STATE_IDLE;
4058 	}
4059 }
4060 
4061 
4062 /*
4063  * ehci_do_client_periodic_in_req_callback
4064  *
4065  * Do callback for the original client periodic IN request.
4066  */
4067 void
4068 ehci_do_client_periodic_in_req_callback(
4069 	ehci_state_t		*ehcip,
4070 	ehci_pipe_private_t	*pp,
4071 	usb_cr_t		completion_reason)
4072 {
4073 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
4074 	usb_ep_descr_t		*eptd = &ph->p_ep;
4075 
4076 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
4077 	    ehcip->ehci_log_hdl,
4078 	    "ehci_do_client_periodic_in_req_callback: "
4079 	    "pp = 0x%p cc = 0x%x", (void *)pp, completion_reason);
4080 
4081 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
4082 
4083 	/*
4084 	 * Check for Interrupt/Isochronous IN, whether we need to do
4085 	 * callback for the original client's periodic IN request.
4086 	 */
4087 	if (pp->pp_client_periodic_in_reqp) {
4088 		ASSERT(pp->pp_cur_periodic_req_cnt == 0);
4089 		if (EHCI_ISOC_ENDPOINT(eptd)) {
4090 			ehci_hcdi_isoc_callback(ph, NULL, completion_reason);
4091 		} else {
4092 			ehci_hcdi_callback(ph, NULL, completion_reason);
4093 		}
4094 	}
4095 }
4096 
4097 
4098 /*
4099  * ehci_hcdi_callback()
4100  *
4101  * Convenience wrapper around usba_hcdi_cb() other than root hub.
4102  */
4103 void
4104 ehci_hcdi_callback(
4105 	usba_pipe_handle_data_t	*ph,
4106 	ehci_trans_wrapper_t	*tw,
4107 	usb_cr_t		completion_reason)
4108 {
4109 	ehci_state_t		*ehcip = ehci_obtain_state(
4110 	    ph->p_usba_device->usb_root_hub_dip);
4111 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
4112 	usb_opaque_t		curr_xfer_reqp;
4113 	uint_t			pipe_state = 0;
4114 
4115 	USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
4116 	    "ehci_hcdi_callback: ph = 0x%p, tw = 0x%p, cr = 0x%x",
4117 	    (void *)ph, (void *)tw, completion_reason);
4118 
4119 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
4120 
4121 	/* Set the pipe state as per completion reason */
4122 	switch (completion_reason) {
4123 	case USB_CR_OK:
4124 		pipe_state = pp->pp_state;
4125 		break;
4126 	case USB_CR_NO_RESOURCES:
4127 	case USB_CR_NOT_SUPPORTED:
4128 	case USB_CR_PIPE_RESET:
4129 	case USB_CR_STOPPED_POLLING:
4130 		pipe_state = EHCI_PIPE_STATE_IDLE;
4131 		break;
4132 	case USB_CR_PIPE_CLOSING:
4133 		break;
4134 	default:
4135 		/* Set the pipe state to error */
4136 		pipe_state = EHCI_PIPE_STATE_ERROR;
4137 		pp->pp_error = completion_reason;
4138 		break;
4139 
4140 	}
4141 
4142 	pp->pp_state = pipe_state;
4143 
4144 	if (tw && tw->tw_curr_xfer_reqp) {
4145 		curr_xfer_reqp = tw->tw_curr_xfer_reqp;
4146 		tw->tw_curr_xfer_reqp = NULL;
4147 	} else {
4148 		ASSERT(pp->pp_client_periodic_in_reqp != NULL);
4149 
4150 		curr_xfer_reqp = pp->pp_client_periodic_in_reqp;
4151 		pp->pp_client_periodic_in_reqp = NULL;
4152 	}
4153 
4154 	ASSERT(curr_xfer_reqp != NULL);
4155 
4156 	mutex_exit(&ehcip->ehci_int_mutex);
4157 
4158 	usba_hcdi_cb(ph, curr_xfer_reqp, completion_reason);
4159 
4160 	mutex_enter(&ehcip->ehci_int_mutex);
4161 }
4162