xref: /illumos-gate/usr/src/uts/common/io/usb/hcd/ehci/ehci_xfer.c (revision bea83d026ee1bd1b2a2419e1d0232f107a5d7d9b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * EHCI Host Controller Driver (EHCI)
30  *
31  * The EHCI driver is a software driver which interfaces to the Universal
32  * Serial Bus layer (USBA) and the Host Controller (HC). The interface to
33  * the Host Controller is defined by the EHCI Host Controller Interface.
34  *
35  * This module contains the main EHCI driver code which handles all USB
36  * transfers, bandwidth allocations and other general functionalities.
37  */
38 
39 #include <sys/usb/hcd/ehci/ehcid.h>
40 #include <sys/usb/hcd/ehci/ehci_intr.h>
41 #include <sys/usb/hcd/ehci/ehci_util.h>
42 #include <sys/usb/hcd/ehci/ehci_isoch.h>
43 
44 /* Adjustable variables for the size of the pools */
45 extern int ehci_qh_pool_size;
46 extern int ehci_qtd_pool_size;
47 
48 
49 /* Endpoint Descriptor (QH) related functions */
50 ehci_qh_t	*ehci_alloc_qh(
51 				ehci_state_t		*ehcip,
52 				usba_pipe_handle_data_t	*ph,
53 				uint_t			flag);
54 static void	ehci_unpack_endpoint(
55 				ehci_state_t		*ehcip,
56 				usba_pipe_handle_data_t	*ph,
57 				ehci_qh_t		*qh);
58 void		ehci_insert_qh(
59 				ehci_state_t		*ehcip,
60 				usba_pipe_handle_data_t	*ph);
61 static void	ehci_insert_async_qh(
62 				ehci_state_t		*ehcip,
63 				ehci_pipe_private_t	*pp);
64 static void	ehci_insert_intr_qh(
65 				ehci_state_t		*ehcip,
66 				ehci_pipe_private_t	*pp);
67 static void	ehci_modify_qh_status_bit(
68 				ehci_state_t		*ehcip,
69 				ehci_pipe_private_t	*pp,
70 				halt_bit_t		action);
71 static void	ehci_halt_hs_qh(
72 				ehci_state_t		*ehcip,
73 				ehci_pipe_private_t	*pp,
74 				ehci_qh_t		*qh);
75 static void	ehci_halt_fls_ctrl_and_bulk_qh(
76 				ehci_state_t		*ehcip,
77 				ehci_pipe_private_t	*pp,
78 				ehci_qh_t		*qh);
79 static void	ehci_clear_tt_buffer(
80 				ehci_state_t		*ehcip,
81 				usba_pipe_handle_data_t	*ph,
82 				ehci_qh_t		*qh);
83 static void	ehci_halt_fls_intr_qh(
84 				ehci_state_t		*ehcip,
85 				ehci_qh_t		*qh);
86 void		ehci_remove_qh(
87 				ehci_state_t		*ehcip,
88 				ehci_pipe_private_t	*pp,
89 				boolean_t		reclaim);
90 static void	ehci_remove_async_qh(
91 				ehci_state_t		*ehcip,
92 				ehci_pipe_private_t	*pp,
93 				boolean_t		reclaim);
94 static void	ehci_remove_intr_qh(
95 				ehci_state_t		*ehcip,
96 				ehci_pipe_private_t	*pp,
97 				boolean_t		reclaim);
98 static void	ehci_insert_qh_on_reclaim_list(
99 				ehci_state_t		*ehcip,
100 				ehci_pipe_private_t	*pp);
101 void		ehci_deallocate_qh(
102 				ehci_state_t		*ehcip,
103 				ehci_qh_t		*old_qh);
104 uint32_t	ehci_qh_cpu_to_iommu(
105 				ehci_state_t		*ehcip,
106 				ehci_qh_t		*addr);
107 ehci_qh_t	*ehci_qh_iommu_to_cpu(
108 				ehci_state_t		*ehcip,
109 				uintptr_t		addr);
110 
111 /* Transfer Descriptor (QTD) related functions */
112 static int	ehci_initialize_dummy(
113 				ehci_state_t		*ehcip,
114 				ehci_qh_t		*qh);
115 ehci_trans_wrapper_t *ehci_allocate_ctrl_resources(
116 				ehci_state_t		*ehcip,
117 				ehci_pipe_private_t	*pp,
118 				usb_ctrl_req_t		*ctrl_reqp,
119 				usb_flags_t		usb_flags);
120 void		ehci_insert_ctrl_req(
121 				ehci_state_t		*ehcip,
122 				usba_pipe_handle_data_t	*ph,
123 				usb_ctrl_req_t		*ctrl_reqp,
124 				ehci_trans_wrapper_t	*tw,
125 				usb_flags_t		usb_flags);
126 ehci_trans_wrapper_t *ehci_allocate_bulk_resources(
127 				ehci_state_t		*ehcip,
128 				ehci_pipe_private_t	*pp,
129 				usb_bulk_req_t		*bulk_reqp,
130 				usb_flags_t		usb_flags);
131 void		ehci_insert_bulk_req(
132 				ehci_state_t		*ehcip,
133 				usba_pipe_handle_data_t	*ph,
134 				usb_bulk_req_t		*bulk_reqp,
135 				ehci_trans_wrapper_t	*tw,
136 				usb_flags_t		flags);
137 int		ehci_start_periodic_pipe_polling(
138 				ehci_state_t		*ehcip,
139 				usba_pipe_handle_data_t	*ph,
140 				usb_opaque_t		periodic_in_reqp,
141 				usb_flags_t		flags);
142 static int	ehci_start_pipe_polling(
143 				ehci_state_t		*ehcip,
144 				usba_pipe_handle_data_t	*ph,
145 				usb_flags_t		flags);
146 static int	ehci_start_intr_polling(
147 				ehci_state_t		*ehcip,
148 				usba_pipe_handle_data_t	*ph,
149 				usb_flags_t		flags);
150 static void	ehci_set_periodic_pipe_polling(
151 				ehci_state_t		*ehcip,
152 				usba_pipe_handle_data_t	*ph);
153 ehci_trans_wrapper_t *ehci_allocate_intr_resources(
154 				ehci_state_t		*ehcip,
155 				usba_pipe_handle_data_t	*ph,
156 				usb_intr_req_t		*intr_reqp,
157 				usb_flags_t		usb_flags);
158 void		ehci_insert_intr_req(
159 				ehci_state_t		*ehcip,
160 				ehci_pipe_private_t	*pp,
161 				ehci_trans_wrapper_t	*tw,
162 				usb_flags_t		flags);
163 int		ehci_stop_periodic_pipe_polling(
164 				ehci_state_t		*ehcip,
165 				usba_pipe_handle_data_t	*ph,
166 				usb_flags_t		flags);
167 int		ehci_insert_qtd(
168 				ehci_state_t		*ehcip,
169 				uint32_t		qtd_ctrl,
170 				size_t			qtd_dma_offs,
171 				size_t			qtd_length,
172 				uint32_t		qtd_ctrl_phase,
173 				ehci_pipe_private_t	*pp,
174 				ehci_trans_wrapper_t	*tw);
175 static ehci_qtd_t *ehci_allocate_qtd_from_pool(
176 				ehci_state_t		*ehcip);
177 static void	ehci_fill_in_qtd(
178 				ehci_state_t		*ehcip,
179 				ehci_qtd_t		*qtd,
180 				uint32_t		qtd_ctrl,
181 				size_t			qtd_dma_offs,
182 				size_t			qtd_length,
183 				uint32_t		qtd_ctrl_phase,
184 				ehci_pipe_private_t	*pp,
185 				ehci_trans_wrapper_t	*tw);
186 static void	ehci_insert_qtd_on_tw(
187 				ehci_state_t		*ehcip,
188 				ehci_trans_wrapper_t	*tw,
189 				ehci_qtd_t		*qtd);
190 static void	ehci_insert_qtd_into_active_qtd_list(
191 				ehci_state_t		*ehcip,
192 				ehci_qtd_t		*curr_qtd);
193 void		ehci_remove_qtd_from_active_qtd_list(
194 				ehci_state_t		*ehcip,
195 				ehci_qtd_t		*curr_qtd);
196 static void	ehci_traverse_qtds(
197 				ehci_state_t		*ehcip,
198 				usba_pipe_handle_data_t	*ph);
199 void		ehci_deallocate_qtd(
200 				ehci_state_t		*ehcip,
201 				ehci_qtd_t		*old_qtd);
202 uint32_t	ehci_qtd_cpu_to_iommu(
203 				ehci_state_t		*ehcip,
204 				ehci_qtd_t		*addr);
205 ehci_qtd_t	*ehci_qtd_iommu_to_cpu(
206 				ehci_state_t		*ehcip,
207 				uintptr_t		addr);
208 
209 /* Transfer Wrapper (TW) functions */
210 static ehci_trans_wrapper_t  *ehci_create_transfer_wrapper(
211 				ehci_state_t		*ehcip,
212 				ehci_pipe_private_t	*pp,
213 				size_t			length,
214 				uint_t			usb_flags);
215 int		ehci_allocate_tds_for_tw(
216 				ehci_state_t		*ehcip,
217 				ehci_pipe_private_t	*pp,
218 				ehci_trans_wrapper_t	*tw,
219 				size_t			qtd_count);
220 static ehci_trans_wrapper_t  *ehci_allocate_tw_resources(
221 				ehci_state_t		*ehcip,
222 				ehci_pipe_private_t	*pp,
223 				size_t			length,
224 				usb_flags_t		usb_flags,
225 				size_t			td_count);
226 static void	ehci_free_tw_td_resources(
227 				ehci_state_t		*ehcip,
228 				ehci_trans_wrapper_t	*tw);
229 static void	ehci_start_xfer_timer(
230 				ehci_state_t		*ehcip,
231 				ehci_pipe_private_t	*pp,
232 				ehci_trans_wrapper_t	*tw);
233 void		ehci_stop_xfer_timer(
234 				ehci_state_t		*ehcip,
235 				ehci_trans_wrapper_t	*tw,
236 				uint_t			flag);
237 static void	ehci_xfer_timeout_handler(void		*arg);
238 static void	ehci_remove_tw_from_timeout_list(
239 				ehci_state_t		*ehcip,
240 				ehci_trans_wrapper_t	*tw);
241 static void	ehci_start_timer(ehci_state_t		*ehcip,
242 				ehci_pipe_private_t	*pp);
243 void		ehci_deallocate_tw(
244 				ehci_state_t		*ehcip,
245 				ehci_pipe_private_t	*pp,
246 				ehci_trans_wrapper_t	*tw);
247 void		ehci_free_dma_resources(
248 				ehci_state_t		*ehcip,
249 				usba_pipe_handle_data_t	*ph);
250 static void	ehci_free_tw(
251 				ehci_state_t		*ehcip,
252 				ehci_pipe_private_t	*pp,
253 				ehci_trans_wrapper_t	*tw);
254 
255 /* Miscellaneous functions */
256 int		ehci_allocate_intr_in_resource(
257 				ehci_state_t		*ehcip,
258 				ehci_pipe_private_t	*pp,
259 				ehci_trans_wrapper_t	*tw,
260 				usb_flags_t		flags);
261 void		ehci_pipe_cleanup(
262 				ehci_state_t		*ehcip,
263 				usba_pipe_handle_data_t	*ph);
264 static void	ehci_wait_for_transfers_completion(
265 				ehci_state_t		*ehcip,
266 				ehci_pipe_private_t	*pp);
267 void		ehci_check_for_transfers_completion(
268 				ehci_state_t		*ehcip,
269 				ehci_pipe_private_t	*pp);
270 static void	ehci_save_data_toggle(
271 				ehci_state_t		*ehcip,
272 				usba_pipe_handle_data_t	*ph);
273 void		ehci_restore_data_toggle(
274 				ehci_state_t		*ehcip,
275 				usba_pipe_handle_data_t	*ph);
276 void		ehci_handle_outstanding_requests(
277 				ehci_state_t		*ehcip,
278 				ehci_pipe_private_t	*pp);
279 void		ehci_deallocate_intr_in_resource(
280 				ehci_state_t		*ehcip,
281 				ehci_pipe_private_t	*pp,
282 				ehci_trans_wrapper_t	*tw);
283 void		ehci_do_client_periodic_in_req_callback(
284 				ehci_state_t		*ehcip,
285 				ehci_pipe_private_t	*pp,
286 				usb_cr_t		completion_reason);
287 void		ehci_hcdi_callback(
288 				usba_pipe_handle_data_t	*ph,
289 				ehci_trans_wrapper_t	*tw,
290 				usb_cr_t		completion_reason);
291 
292 
293 /*
294  * Endpoint Descriptor (QH) manipulations functions
295  */
296 
297 /*
298  * ehci_alloc_qh:
299  *
300  * Allocate an endpoint descriptor (QH)
301  *
302  * NOTE: This function is also called from POLLED MODE.
303  */
304 ehci_qh_t *
305 ehci_alloc_qh(
306 	ehci_state_t		*ehcip,
307 	usba_pipe_handle_data_t	*ph,
308 	uint_t			flag)
309 {
310 	int			i, state;
311 	ehci_qh_t		*qh;
312 
313 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
314 	    "ehci_alloc_qh: ph = 0x%p flag = 0x%x", (void *)ph, flag);
315 
316 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
317 
318 	/*
319 	 * If this is for a ISOC endpoint return null.
320 	 * Isochronous uses ITD put directly onto the PFL.
321 	 */
322 	if (ph) {
323 		if (EHCI_ISOC_ENDPOINT((&ph->p_ep))) {
324 
325 			return (NULL);
326 		}
327 	}
328 
329 	/*
330 	 * The first 63 endpoints in the Endpoint Descriptor (QH)
331 	 * buffer pool are reserved for building interrupt lattice
332 	 * tree. Search for a blank endpoint descriptor in the QH
333 	 * buffer pool.
334 	 */
335 	for (i = EHCI_NUM_STATIC_NODES; i < ehci_qh_pool_size; i ++) {
336 		state = Get_QH(ehcip->ehci_qh_pool_addr[i].qh_state);
337 
338 		if (state == EHCI_QH_FREE) {
339 			break;
340 		}
341 	}
342 
343 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
344 	    "ehci_alloc_qh: Allocated %d", i);
345 
346 	if (i == ehci_qh_pool_size) {
347 		USB_DPRINTF_L2(PRINT_MASK_ALLOC,  ehcip->ehci_log_hdl,
348 		    "ehci_alloc_qh: QH exhausted");
349 
350 		return (NULL);
351 	} else {
352 		qh = &ehcip->ehci_qh_pool_addr[i];
353 		bzero((void *)qh, sizeof (ehci_qh_t));
354 
355 		USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
356 		    "ehci_alloc_qh: Allocated address 0x%p", (void *)qh);
357 
358 		/* Check polled mode flag */
359 		if (flag == EHCI_POLLED_MODE_FLAG) {
360 			Set_QH(qh->qh_link_ptr, EHCI_QH_LINK_PTR_VALID);
361 			Set_QH(qh->qh_ctrl, EHCI_QH_CTRL_ED_INACTIVATE);
362 		}
363 
364 		/* Unpack the endpoint descriptor into a control field */
365 		if (ph) {
366 			if ((ehci_initialize_dummy(ehcip,
367 			    qh)) == USB_NO_RESOURCES) {
368 
369 				Set_QH(qh->qh_state, EHCI_QH_FREE);
370 
371 				return (NULL);
372 			}
373 
374 			ehci_unpack_endpoint(ehcip, ph, qh);
375 
376 			Set_QH(qh->qh_curr_qtd, NULL);
377 			Set_QH(qh->qh_alt_next_qtd,
378 			    EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
379 
380 			/* Change QH's state Active */
381 			Set_QH(qh->qh_state, EHCI_QH_ACTIVE);
382 		} else {
383 			Set_QH(qh->qh_status, EHCI_QH_STS_HALTED);
384 
385 			/* Change QH's state Static */
386 			Set_QH(qh->qh_state, EHCI_QH_STATIC);
387 		}
388 
389 		ehci_print_qh(ehcip, qh);
390 
391 		return (qh);
392 	}
393 }
394 
395 
396 /*
397  * ehci_unpack_endpoint:
398  *
399  * Unpack the information in the pipe handle and create the first byte
400  * of the Host Controller's (HC) Endpoint Descriptor (QH).
401  */
402 static void
403 ehci_unpack_endpoint(
404 	ehci_state_t		*ehcip,
405 	usba_pipe_handle_data_t	*ph,
406 	ehci_qh_t		*qh)
407 {
408 	usb_ep_descr_t		*endpoint = &ph->p_ep;
409 	uint_t			maxpacketsize, addr, xactions;
410 	uint_t			ctrl = 0, status = 0, split_ctrl = 0;
411 	usb_port_status_t	usb_port_status;
412 	usba_device_t		*usba_device = ph->p_usba_device;
413 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
414 
415 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
416 	    "ehci_unpack_endpoint:");
417 
418 	mutex_enter(&usba_device->usb_mutex);
419 	ctrl = usba_device->usb_addr;
420 	usb_port_status = usba_device->usb_port_status;
421 	mutex_exit(&usba_device->usb_mutex);
422 
423 	addr = endpoint->bEndpointAddress;
424 
425 	/* Assign the endpoint's address */
426 	ctrl |= ((addr & USB_EP_NUM_MASK) << EHCI_QH_CTRL_ED_NUMBER_SHIFT);
427 
428 	/* Assign the speed */
429 	switch (usb_port_status) {
430 	case USBA_LOW_SPEED_DEV:
431 		ctrl |= EHCI_QH_CTRL_ED_LOW_SPEED;
432 		break;
433 	case USBA_FULL_SPEED_DEV:
434 		ctrl |= EHCI_QH_CTRL_ED_FULL_SPEED;
435 		break;
436 	case USBA_HIGH_SPEED_DEV:
437 		ctrl |= EHCI_QH_CTRL_ED_HIGH_SPEED;
438 		break;
439 	}
440 
441 	switch (endpoint->bmAttributes & USB_EP_ATTR_MASK) {
442 	case USB_EP_ATTR_CONTROL:
443 		/* Assign data toggle information */
444 		ctrl |= EHCI_QH_CTRL_DATA_TOGGLE;
445 
446 		if (usb_port_status != USBA_HIGH_SPEED_DEV) {
447 			ctrl |= EHCI_QH_CTRL_CONTROL_ED_FLAG;
448 		}
449 		/* FALLTHRU */
450 	case USB_EP_ATTR_BULK:
451 		/* Maximum nak counter */
452 		ctrl |= EHCI_QH_CTRL_MAX_NC;
453 
454 		if (usb_port_status == USBA_HIGH_SPEED_DEV) {
455 			/*
456 			 * Perform ping before executing control
457 			 * and bulk transactions.
458 			 */
459 			status = EHCI_QH_STS_DO_PING;
460 		}
461 		break;
462 	case USB_EP_ATTR_INTR:
463 		/* Set start split mask */
464 		split_ctrl = (pp->pp_smask & EHCI_QH_SPLIT_CTRL_INTR_MASK);
465 
466 		/*
467 		 * Set complete split mask for low/full speed
468 		 * usb devices.
469 		 */
470 		if (usb_port_status != USBA_HIGH_SPEED_DEV) {
471 			split_ctrl |= ((pp->pp_cmask <<
472 			    EHCI_QH_SPLIT_CTRL_COMP_SHIFT) &
473 			    EHCI_QH_SPLIT_CTRL_COMP_MASK);
474 		}
475 		break;
476 	}
477 
478 	/* Get the max transactions per microframe */
479 	xactions = (endpoint->wMaxPacketSize &
480 	    USB_EP_MAX_XACTS_MASK) >>  USB_EP_MAX_XACTS_SHIFT;
481 
482 	switch (xactions) {
483 	case 0:
484 		split_ctrl |= EHCI_QH_SPLIT_CTRL_1_XACTS;
485 		break;
486 	case 1:
487 		split_ctrl |= EHCI_QH_SPLIT_CTRL_2_XACTS;
488 		break;
489 	case 2:
490 		split_ctrl |= EHCI_QH_SPLIT_CTRL_3_XACTS;
491 		break;
492 	default:
493 		split_ctrl |= EHCI_QH_SPLIT_CTRL_1_XACTS;
494 		break;
495 	}
496 
497 	/*
498 	 * For low/full speed devices, program high speed hub
499 	 * address and port number.
500 	 */
501 	if (usb_port_status != USBA_HIGH_SPEED_DEV) {
502 		mutex_enter(&usba_device->usb_mutex);
503 		split_ctrl |= ((usba_device->usb_hs_hub_addr
504 		    << EHCI_QH_SPLIT_CTRL_HUB_ADDR_SHIFT) &
505 		    EHCI_QH_SPLIT_CTRL_HUB_ADDR);
506 
507 		split_ctrl |= ((usba_device->usb_hs_hub_port
508 		    << EHCI_QH_SPLIT_CTRL_HUB_PORT_SHIFT) &
509 		    EHCI_QH_SPLIT_CTRL_HUB_PORT);
510 
511 		mutex_exit(&usba_device->usb_mutex);
512 
513 		/* Set start split transaction state */
514 		status = EHCI_QH_STS_DO_START_SPLIT;
515 	}
516 
517 	/* Assign endpoint's maxpacketsize */
518 	maxpacketsize = endpoint->wMaxPacketSize & USB_EP_MAX_PKTSZ_MASK;
519 	maxpacketsize = maxpacketsize << EHCI_QH_CTRL_MAXPKTSZ_SHIFT;
520 	ctrl |= (maxpacketsize & EHCI_QH_CTRL_MAXPKTSZ);
521 
522 	Set_QH(qh->qh_ctrl, ctrl);
523 	Set_QH(qh->qh_split_ctrl, split_ctrl);
524 	Set_QH(qh->qh_status, status);
525 }
526 
527 
528 /*
529  * ehci_insert_qh:
530  *
531  * Add the Endpoint Descriptor (QH) into the Host Controller's
532  * (HC) appropriate endpoint list.
533  */
534 void
535 ehci_insert_qh(
536 	ehci_state_t		*ehcip,
537 	usba_pipe_handle_data_t	*ph)
538 {
539 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
540 
541 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
542 	    "ehci_insert_qh: qh=0x%p", pp->pp_qh);
543 
544 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
545 
546 	switch (ph->p_ep.bmAttributes & USB_EP_ATTR_MASK) {
547 	case USB_EP_ATTR_CONTROL:
548 	case USB_EP_ATTR_BULK:
549 		ehci_insert_async_qh(ehcip, pp);
550 		ehcip->ehci_open_async_count++;
551 		break;
552 	case USB_EP_ATTR_INTR:
553 		ehci_insert_intr_qh(ehcip, pp);
554 		ehcip->ehci_open_periodic_count++;
555 		break;
556 	case USB_EP_ATTR_ISOCH:
557 		/* ISOCH does not use QH, don't do anything but update count */
558 		ehcip->ehci_open_periodic_count++;
559 		break;
560 	}
561 	ehci_toggle_scheduler(ehcip);
562 }
563 
564 
565 /*
566  * ehci_insert_async_qh:
567  *
568  * Insert a control/bulk endpoint into the Host Controller's (HC)
569  * Asynchronous schedule endpoint list.
570  */
571 static void
572 ehci_insert_async_qh(
573 	ehci_state_t		*ehcip,
574 	ehci_pipe_private_t	*pp)
575 {
576 	ehci_qh_t		*qh = pp->pp_qh;
577 	ehci_qh_t		*async_head_qh;
578 	ehci_qh_t		*next_qh;
579 	uintptr_t		qh_addr;
580 
581 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
582 	    "ehci_insert_async_qh:");
583 
584 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
585 
586 	/* Make sure this QH is not already in the list */
587 	ASSERT((Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR) == NULL);
588 
589 	qh_addr = ehci_qh_cpu_to_iommu(ehcip, qh);
590 
591 	/* Obtain a ptr to the head of the Async schedule list */
592 	async_head_qh = ehcip->ehci_head_of_async_sched_list;
593 
594 	if (async_head_qh == NULL) {
595 		/* Set this QH to be the "head" of the circular list */
596 		Set_QH(qh->qh_ctrl,
597 		    (Get_QH(qh->qh_ctrl) | EHCI_QH_CTRL_RECLAIM_HEAD));
598 
599 		/* Set new QH's link and previous pointer to itself */
600 		Set_QH(qh->qh_link_ptr, qh_addr | EHCI_QH_LINK_REF_QH);
601 		Set_QH(qh->qh_prev, qh_addr);
602 
603 		ehcip->ehci_head_of_async_sched_list = qh;
604 
605 		/* Set the head ptr to the new endpoint */
606 		Set_OpReg(ehci_async_list_addr, qh_addr);
607 
608 		/*
609 		 * For some reason this register might get nulled out by
610 		 * the Uli M1575 South Bridge. To workaround the hardware
611 		 * problem, check the value after write and retry if the
612 		 * last write fails.
613 		 *
614 		 * If the ASYNCLISTADDR remains "stuck" after
615 		 * EHCI_MAX_RETRY retries, then the M1575 is broken
616 		 * and is stuck in an inconsistent state and is about
617 		 * to crash the machine with a trn_oor panic when it
618 		 * does a DMA read from 0x0.  It is better to panic
619 		 * now rather than wait for the trn_oor crash; this
620 		 * way Customer Service will have a clean signature
621 		 * that indicts the M1575 chip rather than a
622 		 * mysterious and hard-to-diagnose trn_oor panic.
623 		 */
624 		if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
625 		    (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
626 		    (qh_addr != Get_OpReg(ehci_async_list_addr))) {
627 			int retry = 0;
628 
629 			Set_OpRegRetry(ehci_async_list_addr, qh_addr, retry);
630 			if (retry >= EHCI_MAX_RETRY)
631 				cmn_err(CE_PANIC, "ehci_insert_async_qh:"
632 				    " ASYNCLISTADDR write failed.");
633 
634 			USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
635 			    "ehci_insert_async_qh: ASYNCLISTADDR "
636 			    "write failed, retry=%d", retry);
637 		}
638 	} else {
639 		ASSERT(Get_QH(async_head_qh->qh_ctrl) &
640 		    EHCI_QH_CTRL_RECLAIM_HEAD);
641 
642 		/* Ensure this QH's "H" bit is not set */
643 		Set_QH(qh->qh_ctrl,
644 		    (Get_QH(qh->qh_ctrl) & ~EHCI_QH_CTRL_RECLAIM_HEAD));
645 
646 		next_qh = ehci_qh_iommu_to_cpu(ehcip,
647 		    Get_QH(async_head_qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
648 
649 		/* Set new QH's link and previous pointers */
650 		Set_QH(qh->qh_link_ptr,
651 		    Get_QH(async_head_qh->qh_link_ptr) | EHCI_QH_LINK_REF_QH);
652 		Set_QH(qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, async_head_qh));
653 
654 		/* Set next QH's prev pointer */
655 		Set_QH(next_qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, qh));
656 
657 		/* Set QH Head's link pointer points to new QH */
658 		Set_QH(async_head_qh->qh_link_ptr,
659 		    qh_addr | EHCI_QH_LINK_REF_QH);
660 	}
661 }
662 
663 
664 /*
665  * ehci_insert_intr_qh:
666  *
667  * Insert a interrupt endpoint into the Host Controller's (HC) interrupt
668  * lattice tree.
669  */
670 static void
671 ehci_insert_intr_qh(
672 	ehci_state_t		*ehcip,
673 	ehci_pipe_private_t	*pp)
674 {
675 	ehci_qh_t		*qh = pp->pp_qh;
676 	ehci_qh_t		*next_lattice_qh, *lattice_qh;
677 	uint_t			hnode;
678 
679 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
680 	    "ehci_insert_intr_qh:");
681 
682 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
683 
684 	/* Make sure this QH is not already in the list */
685 	ASSERT((Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR) == NULL);
686 
687 	/*
688 	 * The appropriate high speed node was found
689 	 * during the opening of the pipe.
690 	 */
691 	hnode = pp->pp_pnode;
692 
693 	/* Find the lattice endpoint */
694 	lattice_qh = &ehcip->ehci_qh_pool_addr[hnode];
695 
696 	/* Find the next lattice endpoint */
697 	next_lattice_qh = ehci_qh_iommu_to_cpu(
698 	    ehcip, (Get_QH(lattice_qh->qh_link_ptr) & EHCI_QH_LINK_PTR));
699 
700 	/* Update the previous pointer */
701 	Set_QH(qh->qh_prev, ehci_qh_cpu_to_iommu(ehcip, lattice_qh));
702 
703 	/* Check next_lattice_qh value */
704 	if (next_lattice_qh) {
705 		/* Update this qh to point to the next one in the lattice */
706 		Set_QH(qh->qh_link_ptr, Get_QH(lattice_qh->qh_link_ptr));
707 
708 		/* Update the previous pointer of qh->qh_link_ptr */
709 		if (Get_QH(next_lattice_qh->qh_state) != EHCI_QH_STATIC) {
710 			Set_QH(next_lattice_qh->qh_prev,
711 			    ehci_qh_cpu_to_iommu(ehcip, qh));
712 		}
713 	} else {
714 		/* Update qh's link pointer to terminate periodic list */
715 		Set_QH(qh->qh_link_ptr,
716 		    (Get_QH(lattice_qh->qh_link_ptr) | EHCI_QH_LINK_PTR_VALID));
717 	}
718 
719 	/* Insert this endpoint into the lattice */
720 	Set_QH(lattice_qh->qh_link_ptr,
721 	    (ehci_qh_cpu_to_iommu(ehcip, qh) | EHCI_QH_LINK_REF_QH));
722 }
723 
724 
725 /*
726  * ehci_modify_qh_status_bit:
727  *
728  * Modify the halt bit on the Host Controller (HC) Endpoint Descriptor (QH).
729  *
730  * If several threads try to halt the same pipe, they will need to wait on
731  * a condition variable.  Only one thread is allowed to halt or unhalt the
732  * pipe at a time.
733  *
734  * Usually after a halt pipe, an unhalt pipe will follow soon after.  There
735  * is an assumption that an Unhalt pipe will never occur without a halt pipe.
736  */
737 static void
738 ehci_modify_qh_status_bit(
739 	ehci_state_t		*ehcip,
740 	ehci_pipe_private_t	*pp,
741 	halt_bit_t		action)
742 {
743 	ehci_qh_t		*qh = pp->pp_qh;
744 	uint_t			smask, eps, split_intr_qh;
745 	uint_t			status;
746 
747 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
748 	    "ehci_modify_qh_status_bit: action=0x%x qh=0x%p",
749 	    action, qh);
750 
751 	ehci_print_qh(ehcip, qh);
752 
753 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
754 
755 	/*
756 	 * If this pipe is in the middle of halting don't allow another
757 	 * thread to come in and modify the same pipe.
758 	 */
759 	while (pp->pp_halt_state & EHCI_HALT_STATE_HALTING) {
760 
761 		cv_wait(&pp->pp_halt_cmpl_cv,
762 		    &ehcip->ehci_int_mutex);
763 	}
764 
765 	/* Sync the QH QTD pool to get up to date information */
766 	Sync_QH_QTD_Pool(ehcip);
767 
768 
769 	if (action == CLEAR_HALT) {
770 		/*
771 		 * If the halt bit is to be cleared, just clear it.
772 		 * there shouldn't be any race condition problems.
773 		 * If the host controller reads the bit before the
774 		 * driver has a chance to set the bit, the bit will
775 		 * be reread on the next frame.
776 		 */
777 		Set_QH(qh->qh_ctrl,
778 		    (Get_QH(qh->qh_ctrl) & ~EHCI_QH_CTRL_ED_INACTIVATE));
779 		Set_QH(qh->qh_status,
780 		    Get_QH(qh->qh_status) & ~(EHCI_QH_STS_XACT_STATUS));
781 
782 		goto success;
783 	}
784 
785 	/* Halt the the QH, but first check to see if it is already halted */
786 	status = Get_QH(qh->qh_status);
787 	if (!(status & EHCI_QH_STS_HALTED)) {
788 		/* Indicate that this pipe is in the middle of halting. */
789 		pp->pp_halt_state |= EHCI_HALT_STATE_HALTING;
790 
791 		/*
792 		 * Find out if this is an full/low speed interrupt endpoint.
793 		 * A non-zero Cmask indicates that this QH is an interrupt
794 		 * endpoint.  Check the endpoint speed to see if it is either
795 		 * FULL or LOW .
796 		 */
797 		smask = Get_QH(qh->qh_split_ctrl) &
798 		    EHCI_QH_SPLIT_CTRL_INTR_MASK;
799 		eps = Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_ED_SPEED;
800 		split_intr_qh = ((smask != 0) &&
801 		    (eps != EHCI_QH_CTRL_ED_HIGH_SPEED));
802 
803 		if (eps == EHCI_QH_CTRL_ED_HIGH_SPEED) {
804 			ehci_halt_hs_qh(ehcip, pp, qh);
805 		} else {
806 			if (split_intr_qh) {
807 				ehci_halt_fls_intr_qh(ehcip, qh);
808 			} else {
809 				ehci_halt_fls_ctrl_and_bulk_qh(ehcip, pp, qh);
810 			}
811 		}
812 
813 		/* Indicate that this pipe is not in the middle of halting. */
814 		pp->pp_halt_state &= ~EHCI_HALT_STATE_HALTING;
815 	}
816 
817 	/* Sync the QH QTD pool again to get the most up to date information */
818 	Sync_QH_QTD_Pool(ehcip);
819 
820 	ehci_print_qh(ehcip, qh);
821 
822 	status = Get_QH(qh->qh_status);
823 	if (!(status & EHCI_QH_STS_HALTED)) {
824 		USB_DPRINTF_L1(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
825 		    "ehci_modify_qh_status_bit: Failed to halt qh=0x%p", qh);
826 
827 		ehci_print_qh(ehcip, qh);
828 
829 		/* Set host controller soft state to error */
830 		ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
831 
832 		ASSERT(status & EHCI_QH_STS_HALTED);
833 	}
834 
835 success:
836 	/* Wake up threads waiting for this pipe to be halted. */
837 	cv_signal(&pp->pp_halt_cmpl_cv);
838 }
839 
840 
841 /*
842  * ehci_halt_hs_qh:
843  *
844  * Halts all types of HIGH SPEED QHs.
845  */
846 static void
847 ehci_halt_hs_qh(
848 	ehci_state_t		*ehcip,
849 	ehci_pipe_private_t	*pp,
850 	ehci_qh_t		*qh)
851 {
852 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
853 
854 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
855 	    "ehci_halt_hs_qh:");
856 
857 	/* Remove this qh from the HCD's view, but do not reclaim it */
858 	ehci_remove_qh(ehcip, pp, B_FALSE);
859 
860 	/*
861 	 * Wait for atleast one SOF, just in case the HCD is in the
862 	 * middle accessing this QH.
863 	 */
864 	(void) ehci_wait_for_sof(ehcip);
865 
866 	/* Sync the QH QTD pool to get up to date information */
867 	Sync_QH_QTD_Pool(ehcip);
868 
869 	/* Modify the status bit and halt this QH. */
870 	Set_QH(qh->qh_status,
871 	    ((Get_QH(qh->qh_status) &
872 	    ~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
873 
874 	/* Insert this QH back into the HCD's view */
875 	ehci_insert_qh(ehcip, ph);
876 }
877 
878 
879 /*
880  * ehci_halt_fls_ctrl_and_bulk_qh:
881  *
882  * Halts FULL/LOW Ctrl and Bulk QHs only.
883  */
884 static void
885 ehci_halt_fls_ctrl_and_bulk_qh(
886 	ehci_state_t		*ehcip,
887 	ehci_pipe_private_t	*pp,
888 	ehci_qh_t		*qh)
889 {
890 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
891 	uint_t			status, split_status, bytes_left;
892 
893 
894 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
895 	    "ehci_halt_fls_ctrl_and_bulk_qh:");
896 
897 	/* Remove this qh from the HCD's view, but do not reclaim it */
898 	ehci_remove_qh(ehcip, pp, B_FALSE);
899 
900 	/*
901 	 * Wait for atleast one SOF, just in case the HCD is in the
902 	 * middle accessing this QH.
903 	 */
904 	(void) ehci_wait_for_sof(ehcip);
905 
906 	/* Sync the QH QTD pool to get up to date information */
907 	Sync_QH_QTD_Pool(ehcip);
908 
909 	/* Modify the status bit and halt this QH. */
910 	Set_QH(qh->qh_status,
911 	    ((Get_QH(qh->qh_status) &
912 	    ~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
913 
914 	/* Check to see if the QH was in the middle of a transaction */
915 	status = Get_QH(qh->qh_status);
916 	split_status = status & EHCI_QH_STS_SPLIT_XSTATE;
917 	bytes_left = status & EHCI_QH_STS_BYTES_TO_XFER;
918 	if ((split_status == EHCI_QH_STS_DO_COMPLETE_SPLIT) &&
919 	    (bytes_left != 0)) {
920 		/* send ClearTTBuffer to this device's parent 2.0 hub */
921 		ehci_clear_tt_buffer(ehcip, ph, qh);
922 	}
923 
924 	/* Insert this QH back into the HCD's view */
925 	ehci_insert_qh(ehcip, ph);
926 }
927 
928 
929 /*
930  * ehci_clear_tt_buffer
931  *
932  * This function will sent a Clear_TT_Buffer request to the pipe's
933  * parent 2.0 hub.
934  */
935 static void
936 ehci_clear_tt_buffer(
937 	ehci_state_t		*ehcip,
938 	usba_pipe_handle_data_t	*ph,
939 	ehci_qh_t		*qh)
940 {
941 	usba_device_t		*usba_device;
942 	usba_device_t		*hub_usba_device;
943 	usb_pipe_handle_t	hub_def_ph;
944 	usb_ep_descr_t		*eptd;
945 	uchar_t			attributes;
946 	uint16_t		wValue;
947 	usb_ctrl_setup_t	setup;
948 	usb_cr_t		completion_reason;
949 	usb_cb_flags_t		cb_flags;
950 	int			retry;
951 
952 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
953 	    "ehci_clear_tt_buffer: ");
954 
955 	/* Get some information about the current pipe */
956 	usba_device = ph->p_usba_device;
957 	eptd = &ph->p_ep;
958 	attributes = eptd->bmAttributes & USB_EP_ATTR_MASK;
959 
960 	/*
961 	 * Create the wIndex for this request (usb spec 11.24.2.3)
962 	 * 3..0		Endpoint Number
963 	 * 10..4	Device Address
964 	 * 12..11	Endpoint Type
965 	 * 14..13	Reserved (must be 0)
966 	 * 15		Direction 1 = IN, 0 = OUT
967 	 */
968 	wValue = 0;
969 	if ((eptd->bEndpointAddress & USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
970 		wValue |= 0x8000;
971 	}
972 	wValue |= attributes << 11;
973 	wValue |= (Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_DEVICE_ADDRESS) << 4;
974 	wValue |= (Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_ED_HIGH_SPEED) >>
975 	    EHCI_QH_CTRL_ED_NUMBER_SHIFT;
976 
977 	mutex_exit(&ehcip->ehci_int_mutex);
978 
979 	/* Manually fill in the request. */
980 	setup.bmRequestType = EHCI_CLEAR_TT_BUFFER_REQTYPE;
981 	setup.bRequest = EHCI_CLEAR_TT_BUFFER_BREQ;
982 	setup.wValue = wValue;
983 	setup.wIndex = 1;
984 	setup.wLength = 0;
985 	setup.attrs = USB_ATTRS_NONE;
986 
987 	/* Get the usba_device of the parent 2.0 hub. */
988 	mutex_enter(&usba_device->usb_mutex);
989 	hub_usba_device = usba_device->usb_hs_hub_usba_dev;
990 	mutex_exit(&usba_device->usb_mutex);
991 
992 	/* Get the default ctrl pipe for the parent 2.0 hub */
993 	mutex_enter(&hub_usba_device->usb_mutex);
994 	hub_def_ph = (usb_pipe_handle_t)&hub_usba_device->usb_ph_list[0];
995 	mutex_exit(&hub_usba_device->usb_mutex);
996 
997 	for (retry = 0; retry < 3; retry++) {
998 
999 		/* sync send the request to the default pipe */
1000 		if (usb_pipe_ctrl_xfer_wait(
1001 		    hub_def_ph,
1002 		    &setup,
1003 		    NULL,
1004 		    &completion_reason, &cb_flags, 0) == USB_SUCCESS) {
1005 
1006 			break;
1007 		}
1008 
1009 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1010 		    "ehci_clear_tt_buffer: Failed to clear tt buffer,"
1011 		    "retry = %d, cr = %d, cb_flags = 0x%x\n",
1012 		    retry, completion_reason, cb_flags);
1013 	}
1014 
1015 	if (retry >= 3) {
1016 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1017 		dev_info_t *dip = hub_usba_device->usb_dip;
1018 
1019 		/*
1020 		 * Ask the user to hotplug the 2.0 hub, to make sure that
1021 		 * all the buffer is in sync since this command has failed.
1022 		 */
1023 		USB_DPRINTF_L0(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1024 		    "Error recovery failure: Please hotplug the 2.0 hub at"
1025 		    "%s", ddi_pathname(dip, path));
1026 
1027 		kmem_free(path, MAXPATHLEN);
1028 	}
1029 
1030 	mutex_enter(&ehcip->ehci_int_mutex);
1031 }
1032 
1033 /*
1034  * ehci_halt_fls_intr_qh:
1035  *
1036  * Halts FULL/LOW speed Intr QHs.
1037  */
1038 static void
1039 ehci_halt_fls_intr_qh(
1040 	ehci_state_t		*ehcip,
1041 	ehci_qh_t		*qh)
1042 {
1043 	usb_frame_number_t	starting_frame;
1044 	usb_frame_number_t	frames_past;
1045 	uint_t			status, i;
1046 
1047 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1048 	    "ehci_halt_fls_intr_qh:");
1049 
1050 	/*
1051 	 * Ask the HC to deactivate the QH in a
1052 	 * full/low periodic QH.
1053 	 */
1054 	Set_QH(qh->qh_ctrl,
1055 	    (Get_QH(qh->qh_ctrl) | EHCI_QH_CTRL_ED_INACTIVATE));
1056 
1057 	starting_frame = ehci_get_current_frame_number(ehcip);
1058 
1059 	/*
1060 	 * Wait at least EHCI_NUM_INTR_QH_LISTS+2 frame or until
1061 	 * the QH has been halted.
1062 	 */
1063 	Sync_QH_QTD_Pool(ehcip);
1064 	frames_past = 0;
1065 	status = Get_QH(qh->qh_status) & EHCI_QTD_CTRL_ACTIVE_XACT;
1066 
1067 	while ((frames_past <= (EHCI_NUM_INTR_QH_LISTS + 2)) &&
1068 	    (status != 0)) {
1069 
1070 		(void) ehci_wait_for_sof(ehcip);
1071 
1072 		Sync_QH_QTD_Pool(ehcip);
1073 		status = Get_QH(qh->qh_status) & EHCI_QTD_CTRL_ACTIVE_XACT;
1074 		frames_past = ehci_get_current_frame_number(ehcip) -
1075 		    starting_frame;
1076 	}
1077 
1078 	/* Modify the status bit and halt this QH. */
1079 	Sync_QH_QTD_Pool(ehcip);
1080 
1081 	status = Get_QH(qh->qh_status);
1082 
1083 	for (i = 0; i < EHCI_NUM_INTR_QH_LISTS; i++) {
1084 		Set_QH(qh->qh_status,
1085 		    ((Get_QH(qh->qh_status) &
1086 		    ~(EHCI_QH_STS_ACTIVE)) | EHCI_QH_STS_HALTED));
1087 
1088 		Sync_QH_QTD_Pool(ehcip);
1089 
1090 		(void) ehci_wait_for_sof(ehcip);
1091 		Sync_QH_QTD_Pool(ehcip);
1092 
1093 		if (Get_QH(qh->qh_status) & EHCI_QH_STS_HALTED) {
1094 
1095 			break;
1096 		}
1097 	}
1098 
1099 	Sync_QH_QTD_Pool(ehcip);
1100 
1101 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1102 	    "ehci_halt_fls_intr_qh: qh=0x%p frames past=%d, status=0x%x, 0x%x",
1103 	    qh, ehci_get_current_frame_number(ehcip) - starting_frame,
1104 	    status, Get_QH(qh->qh_status));
1105 }
1106 
1107 
1108 /*
1109  * ehci_remove_qh:
1110  *
1111  * Remove the Endpoint Descriptor (QH) from the Host Controller's appropriate
1112  * endpoint list.
1113  */
1114 void
1115 ehci_remove_qh(
1116 	ehci_state_t		*ehcip,
1117 	ehci_pipe_private_t	*pp,
1118 	boolean_t		reclaim)
1119 {
1120 	uchar_t			attributes;
1121 
1122 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1123 
1124 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1125 	    "ehci_remove_qh: qh=0x%p", pp->pp_qh);
1126 
1127 	attributes = pp->pp_pipe_handle->p_ep.bmAttributes & USB_EP_ATTR_MASK;
1128 
1129 	switch (attributes) {
1130 	case USB_EP_ATTR_CONTROL:
1131 	case USB_EP_ATTR_BULK:
1132 		ehci_remove_async_qh(ehcip, pp, reclaim);
1133 		ehcip->ehci_open_async_count--;
1134 		break;
1135 	case USB_EP_ATTR_INTR:
1136 		ehci_remove_intr_qh(ehcip, pp, reclaim);
1137 		ehcip->ehci_open_periodic_count--;
1138 		break;
1139 	case USB_EP_ATTR_ISOCH:
1140 		/* ISOCH does not use QH, don't do anything but update count */
1141 		ehcip->ehci_open_periodic_count--;
1142 		break;
1143 	}
1144 	ehci_toggle_scheduler(ehcip);
1145 }
1146 
1147 
1148 /*
1149  * ehci_remove_async_qh:
1150  *
1151  * Remove a control/bulk endpoint into the Host Controller's (HC)
1152  * Asynchronous schedule endpoint list.
1153  */
1154 static void
1155 ehci_remove_async_qh(
1156 	ehci_state_t		*ehcip,
1157 	ehci_pipe_private_t	*pp,
1158 	boolean_t		reclaim)
1159 {
1160 	ehci_qh_t		*qh = pp->pp_qh; /* qh to be removed */
1161 	ehci_qh_t		*prev_qh, *next_qh;
1162 
1163 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1164 	    "ehci_remove_async_qh:");
1165 
1166 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1167 
1168 	prev_qh = ehci_qh_iommu_to_cpu(ehcip,
1169 	    Get_QH(qh->qh_prev) & EHCI_QH_LINK_PTR);
1170 	next_qh = ehci_qh_iommu_to_cpu(ehcip,
1171 	    Get_QH(qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
1172 
1173 	/* Make sure this QH is in the list */
1174 	ASSERT(prev_qh != NULL);
1175 
1176 	/*
1177 	 * If next QH and current QH are the same, then this is the last
1178 	 * QH on the Asynchronous Schedule list.
1179 	 */
1180 	if (qh == next_qh) {
1181 		ASSERT(Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_RECLAIM_HEAD);
1182 		/*
1183 		 * Null our pointer to the async sched list, but do not
1184 		 * touch the host controller's list_addr.
1185 		 */
1186 		ehcip->ehci_head_of_async_sched_list = NULL;
1187 		ASSERT(ehcip->ehci_open_async_count == 1);
1188 	} else {
1189 		/* If this QH is the HEAD then find another one to replace it */
1190 		if (ehcip->ehci_head_of_async_sched_list == qh) {
1191 
1192 			ASSERT(Get_QH(qh->qh_ctrl) & EHCI_QH_CTRL_RECLAIM_HEAD);
1193 			ehcip->ehci_head_of_async_sched_list = next_qh;
1194 			Set_QH(next_qh->qh_ctrl,
1195 			    Get_QH(next_qh->qh_ctrl) |
1196 			    EHCI_QH_CTRL_RECLAIM_HEAD);
1197 		}
1198 		Set_QH(prev_qh->qh_link_ptr, Get_QH(qh->qh_link_ptr));
1199 		Set_QH(next_qh->qh_prev, Get_QH(qh->qh_prev));
1200 	}
1201 
1202 	/* qh_prev to indicate it is no longer in the circular list */
1203 	Set_QH(qh->qh_prev, NULL);
1204 
1205 	if (reclaim) {
1206 		ehci_insert_qh_on_reclaim_list(ehcip, pp);
1207 	}
1208 }
1209 
1210 
1211 /*
1212  * ehci_remove_intr_qh:
1213  *
1214  * Set up an interrupt endpoint to be removed from the Host Controller's (HC)
1215  * interrupt lattice tree. The Endpoint Descriptor (QH) will be freed in the
1216  * interrupt handler.
1217  */
1218 static void
1219 ehci_remove_intr_qh(
1220 	ehci_state_t		*ehcip,
1221 	ehci_pipe_private_t	*pp,
1222 	boolean_t		reclaim)
1223 {
1224 	ehci_qh_t		*qh = pp->pp_qh; /* qh to be removed */
1225 	ehci_qh_t		*prev_qh, *next_qh;
1226 
1227 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1228 	    "ehci_remove_intr_qh:");
1229 
1230 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1231 
1232 	prev_qh = ehci_qh_iommu_to_cpu(ehcip, Get_QH(qh->qh_prev));
1233 	next_qh = ehci_qh_iommu_to_cpu(ehcip,
1234 	    Get_QH(qh->qh_link_ptr) & EHCI_QH_LINK_PTR);
1235 
1236 	/* Make sure this QH is in the list */
1237 	ASSERT(prev_qh != NULL);
1238 
1239 	if (next_qh) {
1240 		/* Update previous qh's link pointer */
1241 		Set_QH(prev_qh->qh_link_ptr, Get_QH(qh->qh_link_ptr));
1242 
1243 		if (Get_QH(next_qh->qh_state) != EHCI_QH_STATIC) {
1244 			/* Set the previous pointer of the next one */
1245 			Set_QH(next_qh->qh_prev, Get_QH(qh->qh_prev));
1246 		}
1247 	} else {
1248 		/* Update previous qh's link pointer */
1249 		Set_QH(prev_qh->qh_link_ptr,
1250 		    (Get_QH(qh->qh_link_ptr) | EHCI_QH_LINK_PTR_VALID));
1251 	}
1252 
1253 	/* qh_prev to indicate it is no longer in the circular list */
1254 	Set_QH(qh->qh_prev, NULL);
1255 
1256 	if (reclaim) {
1257 		ehci_insert_qh_on_reclaim_list(ehcip, pp);
1258 	}
1259 }
1260 
1261 
1262 /*
1263  * ehci_insert_qh_on_reclaim_list:
1264  *
1265  * Insert Endpoint onto the reclaim list
1266  */
1267 static void
1268 ehci_insert_qh_on_reclaim_list(
1269 	ehci_state_t		*ehcip,
1270 	ehci_pipe_private_t	*pp)
1271 {
1272 	ehci_qh_t		*qh = pp->pp_qh; /* qh to be removed */
1273 	ehci_qh_t		*next_qh, *prev_qh;
1274 	usb_frame_number_t	frame_number;
1275 
1276 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1277 
1278 	/*
1279 	 * Read current usb frame number and add appropriate number of
1280 	 * usb frames needs to wait before reclaiming current endpoint.
1281 	 */
1282 	frame_number =
1283 	    ehci_get_current_frame_number(ehcip) + MAX_SOF_WAIT_COUNT;
1284 
1285 	/* Store 32-bit ID */
1286 	Set_QH(qh->qh_reclaim_frame,
1287 	    ((uint32_t)(EHCI_GET_ID((void *)(uintptr_t)frame_number))));
1288 
1289 	/* Insert the endpoint onto the reclamation list */
1290 	if (ehcip->ehci_reclaim_list) {
1291 		next_qh = ehcip->ehci_reclaim_list;
1292 
1293 		while (next_qh) {
1294 			prev_qh = next_qh;
1295 			next_qh = ehci_qh_iommu_to_cpu(ehcip,
1296 			    Get_QH(next_qh->qh_reclaim_next));
1297 		}
1298 
1299 		Set_QH(prev_qh->qh_reclaim_next,
1300 		    ehci_qh_cpu_to_iommu(ehcip, qh));
1301 	} else {
1302 		ehcip->ehci_reclaim_list = qh;
1303 	}
1304 
1305 	ASSERT(Get_QH(qh->qh_reclaim_next) == NULL);
1306 }
1307 
1308 
1309 /*
1310  * ehci_deallocate_qh:
1311  *
1312  * Deallocate a Host Controller's (HC) Endpoint Descriptor (QH).
1313  *
1314  * NOTE: This function is also called from POLLED MODE.
1315  */
1316 void
1317 ehci_deallocate_qh(
1318 	ehci_state_t	*ehcip,
1319 	ehci_qh_t	*old_qh)
1320 {
1321 	ehci_qtd_t	*first_dummy_qtd, *second_dummy_qtd;
1322 
1323 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1324 	    "ehci_deallocate_qh:");
1325 
1326 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1327 
1328 	first_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
1329 	    (Get_QH(old_qh->qh_next_qtd) & EHCI_QH_NEXT_QTD_PTR));
1330 
1331 	if (first_dummy_qtd) {
1332 		ASSERT(Get_QTD(first_dummy_qtd->qtd_state) == EHCI_QTD_DUMMY);
1333 
1334 		second_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
1335 		    Get_QTD(first_dummy_qtd->qtd_next_qtd));
1336 
1337 		if (second_dummy_qtd) {
1338 			ASSERT(Get_QTD(second_dummy_qtd->qtd_state) ==
1339 			    EHCI_QTD_DUMMY);
1340 
1341 			ehci_deallocate_qtd(ehcip, second_dummy_qtd);
1342 		}
1343 
1344 		ehci_deallocate_qtd(ehcip, first_dummy_qtd);
1345 	}
1346 
1347 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1348 	    "ehci_deallocate_qh: Deallocated 0x%p", (void *)old_qh);
1349 
1350 	Set_QH(old_qh->qh_state, EHCI_QH_FREE);
1351 }
1352 
1353 
1354 /*
1355  * ehci_qh_cpu_to_iommu:
1356  *
1357  * This function converts for the given Endpoint Descriptor (QH) CPU address
1358  * to IO address.
1359  *
1360  * NOTE: This function is also called from POLLED MODE.
1361  */
1362 uint32_t
1363 ehci_qh_cpu_to_iommu(
1364 	ehci_state_t	*ehcip,
1365 	ehci_qh_t	*addr)
1366 {
1367 	uint32_t	qh;
1368 
1369 	qh = (uint32_t)ehcip->ehci_qh_pool_cookie.dmac_address +
1370 	    (uint32_t)((uintptr_t)addr - (uintptr_t)(ehcip->ehci_qh_pool_addr));
1371 
1372 	ASSERT(qh >= ehcip->ehci_qh_pool_cookie.dmac_address);
1373 	ASSERT(qh <= ehcip->ehci_qh_pool_cookie.dmac_address +
1374 	    sizeof (ehci_qh_t) * ehci_qh_pool_size);
1375 
1376 	return (qh);
1377 }
1378 
1379 
1380 /*
1381  * ehci_qh_iommu_to_cpu:
1382  *
1383  * This function converts for the given Endpoint Descriptor (QH) IO address
1384  * to CPU address.
1385  */
1386 ehci_qh_t *
1387 ehci_qh_iommu_to_cpu(
1388 	ehci_state_t	*ehcip,
1389 	uintptr_t	addr)
1390 {
1391 	ehci_qh_t	*qh;
1392 
1393 	if (addr == NULL) {
1394 
1395 		return (NULL);
1396 	}
1397 
1398 	qh = (ehci_qh_t *)((uintptr_t)
1399 	    (addr - ehcip->ehci_qh_pool_cookie.dmac_address) +
1400 	    (uintptr_t)ehcip->ehci_qh_pool_addr);
1401 
1402 	ASSERT(qh >= ehcip->ehci_qh_pool_addr);
1403 	ASSERT((uintptr_t)qh <= (uintptr_t)ehcip->ehci_qh_pool_addr +
1404 	    (uintptr_t)(sizeof (ehci_qh_t) * ehci_qh_pool_size));
1405 
1406 	return (qh);
1407 }
1408 
1409 
1410 /*
1411  * Transfer Descriptor manipulations functions
1412  */
1413 
1414 /*
1415  * ehci_initialize_dummy:
1416  *
1417  * An Endpoint Descriptor (QH) has a  dummy Transfer Descriptor (QTD) on the
1418  * end of its QTD list. Initially, both the head and tail pointers of the QH
1419  * point to the dummy QTD.
1420  */
1421 static int
1422 ehci_initialize_dummy(
1423 	ehci_state_t	*ehcip,
1424 	ehci_qh_t	*qh)
1425 {
1426 	ehci_qtd_t	*first_dummy_qtd, *second_dummy_qtd;
1427 
1428 	/* Allocate first dummy QTD */
1429 	first_dummy_qtd = ehci_allocate_qtd_from_pool(ehcip);
1430 
1431 	if (first_dummy_qtd == NULL) {
1432 		return (USB_NO_RESOURCES);
1433 	}
1434 
1435 	/* Allocate second dummy QTD */
1436 	second_dummy_qtd = ehci_allocate_qtd_from_pool(ehcip);
1437 
1438 	if (second_dummy_qtd == NULL) {
1439 		/* Deallocate first dummy QTD */
1440 		ehci_deallocate_qtd(ehcip, first_dummy_qtd);
1441 
1442 		return (USB_NO_RESOURCES);
1443 	}
1444 
1445 	/* Next QTD pointer of an QH point to this new dummy QTD */
1446 	Set_QH(qh->qh_next_qtd, ehci_qtd_cpu_to_iommu(ehcip,
1447 	    first_dummy_qtd) & EHCI_QH_NEXT_QTD_PTR);
1448 
1449 	/* Set qh's dummy qtd field */
1450 	Set_QH(qh->qh_dummy_qtd, ehci_qtd_cpu_to_iommu(ehcip, first_dummy_qtd));
1451 
1452 	/* Set first_dummy's next qtd pointer */
1453 	Set_QTD(first_dummy_qtd->qtd_next_qtd,
1454 	    ehci_qtd_cpu_to_iommu(ehcip, second_dummy_qtd));
1455 
1456 	return (USB_SUCCESS);
1457 }
1458 
1459 /*
1460  * ehci_allocate_ctrl_resources:
1461  *
1462  * Calculates the number of tds necessary for a ctrl transfer, and allocates
1463  * all the resources necessary.
1464  *
1465  * Returns NULL if there is insufficient resources otherwise TW.
1466  */
1467 ehci_trans_wrapper_t *
1468 ehci_allocate_ctrl_resources(
1469 	ehci_state_t		*ehcip,
1470 	ehci_pipe_private_t	*pp,
1471 	usb_ctrl_req_t		*ctrl_reqp,
1472 	usb_flags_t		usb_flags)
1473 {
1474 	size_t			qtd_count = 2;
1475 	size_t			ctrl_buf_size;
1476 	ehci_trans_wrapper_t	*tw;
1477 
1478 	/* Add one more td for data phase */
1479 	if (ctrl_reqp->ctrl_wLength) {
1480 		qtd_count += 1;
1481 	}
1482 
1483 	/*
1484 	 * If we have a control data phase, the data buffer starts
1485 	 * on the next 4K page boundary. So the TW buffer is allocated
1486 	 * to be larger than required. The buffer in the range of
1487 	 * [SETUP_SIZE, EHCI_MAX_QTD_BUF_SIZE) is just for padding
1488 	 * and not to be transferred.
1489 	 */
1490 	if (ctrl_reqp->ctrl_wLength) {
1491 		ctrl_buf_size = EHCI_MAX_QTD_BUF_SIZE +
1492 		    ctrl_reqp->ctrl_wLength;
1493 	} else {
1494 		ctrl_buf_size = SETUP_SIZE;
1495 	}
1496 
1497 	tw = ehci_allocate_tw_resources(ehcip, pp, ctrl_buf_size,
1498 	    usb_flags, qtd_count);
1499 
1500 	return (tw);
1501 }
1502 
1503 /*
1504  * ehci_insert_ctrl_req:
1505  *
1506  * Create a Transfer Descriptor (QTD) and a data buffer for a control endpoint.
1507  */
1508 /* ARGSUSED */
1509 void
1510 ehci_insert_ctrl_req(
1511 	ehci_state_t		*ehcip,
1512 	usba_pipe_handle_data_t	*ph,
1513 	usb_ctrl_req_t		*ctrl_reqp,
1514 	ehci_trans_wrapper_t	*tw,
1515 	usb_flags_t		usb_flags)
1516 {
1517 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1518 	uchar_t			bmRequestType = ctrl_reqp->ctrl_bmRequestType;
1519 	uchar_t			bRequest = ctrl_reqp->ctrl_bRequest;
1520 	uint16_t		wValue = ctrl_reqp->ctrl_wValue;
1521 	uint16_t		wIndex = ctrl_reqp->ctrl_wIndex;
1522 	uint16_t		wLength = ctrl_reqp->ctrl_wLength;
1523 	mblk_t			*data = ctrl_reqp->ctrl_data;
1524 	uint32_t		ctrl = 0;
1525 	uint8_t			setup_packet[8];
1526 
1527 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1528 	    "ehci_insert_ctrl_req:");
1529 
1530 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1531 
1532 	/*
1533 	 * Save current control request pointer and timeout values
1534 	 * in transfer wrapper.
1535 	 */
1536 	tw->tw_curr_xfer_reqp = (usb_opaque_t)ctrl_reqp;
1537 	tw->tw_timeout = ctrl_reqp->ctrl_timeout ?
1538 	    ctrl_reqp->ctrl_timeout : EHCI_DEFAULT_XFER_TIMEOUT;
1539 
1540 	/*
1541 	 * Initialize the callback and any callback data for when
1542 	 * the qtd completes.
1543 	 */
1544 	tw->tw_handle_qtd = ehci_handle_ctrl_qtd;
1545 	tw->tw_handle_callback_value = NULL;
1546 
1547 	/*
1548 	 * swap the setup bytes where necessary since we specified
1549 	 * NEVERSWAP
1550 	 */
1551 	setup_packet[0] = bmRequestType;
1552 	setup_packet[1] = bRequest;
1553 	setup_packet[2] = wValue;
1554 	setup_packet[3] = wValue >> 8;
1555 	setup_packet[4] = wIndex;
1556 	setup_packet[5] = wIndex >> 8;
1557 	setup_packet[6] = wLength;
1558 	setup_packet[7] = wLength >> 8;
1559 
1560 	bcopy(setup_packet, tw->tw_buf, SETUP_SIZE);
1561 
1562 	Sync_IO_Buffer_for_device(tw->tw_dmahandle, SETUP_SIZE);
1563 
1564 	ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_0 | EHCI_QTD_CTRL_SETUP_PID);
1565 
1566 	/*
1567 	 * The QTD's are placed on the QH one at a time.
1568 	 * Once this QTD is placed on the done list, the
1569 	 * data or status phase QTD will be enqueued.
1570 	 */
1571 	(void) ehci_insert_qtd(ehcip, ctrl, 0, SETUP_SIZE,
1572 	    EHCI_CTRL_SETUP_PHASE, pp, tw);
1573 
1574 	USB_DPRINTF_L3(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
1575 	    "ehci_insert_ctrl_req: pp 0x%p", (void *)pp);
1576 
1577 	/*
1578 	 * If this control transfer has a data phase, record the
1579 	 * direction. If the data phase is an OUT transaction,
1580 	 * copy the data into the buffer of the transfer wrapper.
1581 	 */
1582 	if (wLength != 0) {
1583 		/* There is a data stage.  Find the direction */
1584 		if (bmRequestType & USB_DEV_REQ_DEV_TO_HOST) {
1585 			tw->tw_direction = EHCI_QTD_CTRL_IN_PID;
1586 		} else {
1587 			tw->tw_direction = EHCI_QTD_CTRL_OUT_PID;
1588 
1589 			/* Copy the data into the message */
1590 			bcopy(data->b_rptr, tw->tw_buf + EHCI_MAX_QTD_BUF_SIZE,
1591 			    wLength);
1592 
1593 			Sync_IO_Buffer_for_device(tw->tw_dmahandle,
1594 			    wLength + EHCI_MAX_QTD_BUF_SIZE);
1595 		}
1596 
1597 		ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1 | tw->tw_direction);
1598 
1599 		/*
1600 		 * Create the QTD.  If this is an OUT transaction,
1601 		 * the data is already in the buffer of the TW.
1602 		 * The transfer should start from EHCI_MAX_QTD_BUF_SIZE
1603 		 * which is 4K aligned, though the ctrl phase only
1604 		 * transfers a length of SETUP_SIZE. The padding data
1605 		 * in the TW buffer are discarded.
1606 		 */
1607 		(void) ehci_insert_qtd(ehcip, ctrl, EHCI_MAX_QTD_BUF_SIZE,
1608 		    tw->tw_length - EHCI_MAX_QTD_BUF_SIZE,
1609 		    EHCI_CTRL_DATA_PHASE, pp, tw);
1610 
1611 		/*
1612 		 * The direction of the STATUS QTD depends  on
1613 		 * the direction of the transfer.
1614 		 */
1615 		if (tw->tw_direction == EHCI_QTD_CTRL_IN_PID) {
1616 			ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1|
1617 			    EHCI_QTD_CTRL_OUT_PID |
1618 			    EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1619 		} else {
1620 			ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1|
1621 			    EHCI_QTD_CTRL_IN_PID |
1622 			    EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1623 		}
1624 	} else {
1625 		/*
1626 		 * There is no data stage,  then initiate
1627 		 * status phase from the host.
1628 		 */
1629 		ctrl = (EHCI_QTD_CTRL_DATA_TOGGLE_1 |
1630 		    EHCI_QTD_CTRL_IN_PID |
1631 		    EHCI_QTD_CTRL_INTR_ON_COMPLETE);
1632 	}
1633 
1634 
1635 	(void) ehci_insert_qtd(ehcip, ctrl, 0, 0,
1636 	    EHCI_CTRL_STATUS_PHASE, pp,  tw);
1637 
1638 	/* Start the timer for this control transfer */
1639 	ehci_start_xfer_timer(ehcip, pp, tw);
1640 }
1641 
1642 
1643 /*
1644  * ehci_allocate_bulk_resources:
1645  *
1646  * Calculates the number of tds necessary for a ctrl transfer, and allocates
1647  * all the resources necessary.
1648  *
1649  * Returns NULL if there is insufficient resources otherwise TW.
1650  */
1651 ehci_trans_wrapper_t *
1652 ehci_allocate_bulk_resources(
1653 	ehci_state_t		*ehcip,
1654 	ehci_pipe_private_t	*pp,
1655 	usb_bulk_req_t		*bulk_reqp,
1656 	usb_flags_t		usb_flags)
1657 {
1658 	size_t			qtd_count = 0;
1659 	ehci_trans_wrapper_t	*tw;
1660 
1661 	/* Check the size of bulk request */
1662 	if (bulk_reqp->bulk_len > EHCI_MAX_BULK_XFER_SIZE) {
1663 
1664 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1665 		    "ehci_allocate_bulk_resources: Bulk request size 0x%x is "
1666 		    "more than 0x%x", bulk_reqp->bulk_len,
1667 		    EHCI_MAX_BULK_XFER_SIZE);
1668 
1669 		return (NULL);
1670 	}
1671 
1672 	/* Get the required bulk packet size */
1673 	qtd_count = bulk_reqp->bulk_len / EHCI_MAX_QTD_XFER_SIZE;
1674 	if (bulk_reqp->bulk_len % EHCI_MAX_QTD_XFER_SIZE ||
1675 	    bulk_reqp->bulk_len == 0) {
1676 		qtd_count += 1;
1677 	}
1678 
1679 	tw = ehci_allocate_tw_resources(ehcip, pp, bulk_reqp->bulk_len,
1680 	    usb_flags, qtd_count);
1681 
1682 	return (tw);
1683 }
1684 
1685 /*
1686  * ehci_insert_bulk_req:
1687  *
1688  * Create a Transfer Descriptor (QTD) and a data buffer for a bulk
1689  * endpoint.
1690  */
1691 /* ARGSUSED */
1692 void
1693 ehci_insert_bulk_req(
1694 	ehci_state_t		*ehcip,
1695 	usba_pipe_handle_data_t	*ph,
1696 	usb_bulk_req_t		*bulk_reqp,
1697 	ehci_trans_wrapper_t	*tw,
1698 	usb_flags_t		flags)
1699 {
1700 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1701 	uint_t			bulk_pkt_size, count;
1702 	size_t			residue = 0, len = 0;
1703 	uint32_t		ctrl = 0;
1704 	int			pipe_dir;
1705 
1706 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1707 	    "ehci_insert_bulk_req: bulk_reqp = 0x%p flags = 0x%x",
1708 	    bulk_reqp, flags);
1709 
1710 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1711 
1712 	/* Get the bulk pipe direction */
1713 	pipe_dir = ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK;
1714 
1715 	/* Get the required bulk packet size */
1716 	bulk_pkt_size = min(bulk_reqp->bulk_len, EHCI_MAX_QTD_XFER_SIZE);
1717 
1718 	if (bulk_pkt_size) {
1719 		residue = tw->tw_length % bulk_pkt_size;
1720 	}
1721 
1722 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1723 	    "ehci_insert_bulk_req: bulk_pkt_size = %d", bulk_pkt_size);
1724 
1725 	/*
1726 	 * Save current bulk request pointer and timeout values
1727 	 * in transfer wrapper.
1728 	 */
1729 	tw->tw_curr_xfer_reqp = (usb_opaque_t)bulk_reqp;
1730 	tw->tw_timeout = bulk_reqp->bulk_timeout;
1731 
1732 	/*
1733 	 * Initialize the callback and any callback
1734 	 * data required when the qtd completes.
1735 	 */
1736 	tw->tw_handle_qtd = ehci_handle_bulk_qtd;
1737 	tw->tw_handle_callback_value = NULL;
1738 
1739 	tw->tw_direction = (pipe_dir == USB_EP_DIR_OUT) ?
1740 	    EHCI_QTD_CTRL_OUT_PID : EHCI_QTD_CTRL_IN_PID;
1741 
1742 	if (tw->tw_direction == EHCI_QTD_CTRL_OUT_PID) {
1743 
1744 		if (bulk_reqp->bulk_len) {
1745 			ASSERT(bulk_reqp->bulk_data != NULL);
1746 
1747 			bcopy(bulk_reqp->bulk_data->b_rptr, tw->tw_buf,
1748 			    bulk_reqp->bulk_len);
1749 
1750 			Sync_IO_Buffer_for_device(tw->tw_dmahandle,
1751 			    bulk_reqp->bulk_len);
1752 		}
1753 	}
1754 
1755 	ctrl = tw->tw_direction;
1756 
1757 	/* Insert all the bulk QTDs */
1758 	for (count = 0; count < tw->tw_num_qtds; count++) {
1759 
1760 		/* Check for last qtd */
1761 		if (count == (tw->tw_num_qtds - 1)) {
1762 
1763 			ctrl |= EHCI_QTD_CTRL_INTR_ON_COMPLETE;
1764 
1765 			/* Check for inserting residue data */
1766 			if (residue) {
1767 				bulk_pkt_size = residue;
1768 			}
1769 		}
1770 
1771 		/* Insert the QTD onto the endpoint */
1772 		(void) ehci_insert_qtd(ehcip, ctrl, len, bulk_pkt_size,
1773 		    0, pp, tw);
1774 
1775 		len = len + bulk_pkt_size;
1776 	}
1777 
1778 	/* Start the timer for this bulk transfer */
1779 	ehci_start_xfer_timer(ehcip, pp, tw);
1780 }
1781 
1782 
1783 /*
1784  * ehci_start_periodic_pipe_polling:
1785  *
1786  * NOTE: This function is also called from POLLED MODE.
1787  */
1788 int
1789 ehci_start_periodic_pipe_polling(
1790 	ehci_state_t		*ehcip,
1791 	usba_pipe_handle_data_t	*ph,
1792 	usb_opaque_t		periodic_in_reqp,
1793 	usb_flags_t		flags)
1794 {
1795 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1796 	usb_ep_descr_t		*eptd = &ph->p_ep;
1797 	int			error = USB_SUCCESS;
1798 
1799 	USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
1800 	    "ehci_start_periodic_pipe_polling: ep%d",
1801 	    ph->p_ep.bEndpointAddress & USB_EP_NUM_MASK);
1802 
1803 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1804 
1805 	/*
1806 	 * Check and handle start polling on root hub interrupt pipe.
1807 	 */
1808 	if ((ph->p_usba_device->usb_addr == ROOT_HUB_ADDR) &&
1809 	    ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
1810 	    USB_EP_ATTR_INTR)) {
1811 
1812 		error = ehci_handle_root_hub_pipe_start_intr_polling(ph,
1813 		    (usb_intr_req_t *)periodic_in_reqp, flags);
1814 
1815 		return (error);
1816 	}
1817 
1818 	switch (pp->pp_state) {
1819 	case EHCI_PIPE_STATE_IDLE:
1820 		/* Save the Original client's Periodic IN request */
1821 		pp->pp_client_periodic_in_reqp = periodic_in_reqp;
1822 
1823 		/*
1824 		 * This pipe is uninitialized or if a valid QTD is
1825 		 * not found then insert a QTD on the interrupt IN
1826 		 * endpoint.
1827 		 */
1828 		error = ehci_start_pipe_polling(ehcip, ph, flags);
1829 
1830 		if (error != USB_SUCCESS) {
1831 			USB_DPRINTF_L2(PRINT_MASK_INTR,
1832 			    ehcip->ehci_log_hdl,
1833 			    "ehci_start_periodic_pipe_polling: "
1834 			    "Start polling failed");
1835 
1836 			pp->pp_client_periodic_in_reqp = NULL;
1837 
1838 			return (error);
1839 		}
1840 
1841 		USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
1842 		    "ehci_start_periodic_pipe_polling: PP = 0x%p", pp);
1843 
1844 #ifdef DEBUG
1845 		switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
1846 		case USB_EP_ATTR_INTR:
1847 			ASSERT((pp->pp_tw_head != NULL) &&
1848 			    (pp->pp_tw_tail != NULL));
1849 			break;
1850 		case USB_EP_ATTR_ISOCH:
1851 			ASSERT((pp->pp_itw_head != NULL) &&
1852 			    (pp->pp_itw_tail != NULL));
1853 			break;
1854 		}
1855 #endif
1856 
1857 		break;
1858 	case EHCI_PIPE_STATE_ACTIVE:
1859 		USB_DPRINTF_L2(PRINT_MASK_INTR,
1860 		    ehcip->ehci_log_hdl,
1861 		    "ehci_start_periodic_pipe_polling: "
1862 		    "Polling is already in progress");
1863 
1864 		error = USB_FAILURE;
1865 		break;
1866 	case EHCI_PIPE_STATE_ERROR:
1867 		USB_DPRINTF_L2(PRINT_MASK_INTR,
1868 		    ehcip->ehci_log_hdl,
1869 		    "ehci_start_periodic_pipe_polling: "
1870 		    "Pipe is halted and perform reset"
1871 		    "before restart polling");
1872 
1873 		error = USB_FAILURE;
1874 		break;
1875 	default:
1876 		USB_DPRINTF_L2(PRINT_MASK_INTR,
1877 		    ehcip->ehci_log_hdl,
1878 		    "ehci_start_periodic_pipe_polling: "
1879 		    "Undefined state");
1880 
1881 		error = USB_FAILURE;
1882 		break;
1883 	}
1884 
1885 	return (error);
1886 }
1887 
1888 
1889 /*
1890  * ehci_start_pipe_polling:
1891  *
1892  * Insert the number of periodic requests corresponding to polling
1893  * interval as calculated during pipe open.
1894  */
1895 static int
1896 ehci_start_pipe_polling(
1897 	ehci_state_t		*ehcip,
1898 	usba_pipe_handle_data_t	*ph,
1899 	usb_flags_t		flags)
1900 {
1901 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1902 	usb_ep_descr_t		*eptd = &ph->p_ep;
1903 	int			error = USB_FAILURE;
1904 
1905 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1906 	    "ehci_start_pipe_polling:");
1907 
1908 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1909 
1910 	/*
1911 	 * For the start polling, pp_max_periodic_req_cnt will be zero
1912 	 * and for the restart polling request, it will be non zero.
1913 	 *
1914 	 * In case of start polling request, find out number of requests
1915 	 * required for the Interrupt IN endpoints corresponding to the
1916 	 * endpoint polling interval. For Isochronous IN endpoints, it is
1917 	 * always fixed since its polling interval will be one ms.
1918 	 */
1919 	if (pp->pp_max_periodic_req_cnt == 0) {
1920 
1921 		ehci_set_periodic_pipe_polling(ehcip, ph);
1922 	}
1923 
1924 	ASSERT(pp->pp_max_periodic_req_cnt != 0);
1925 
1926 	switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
1927 	case USB_EP_ATTR_INTR:
1928 		error = ehci_start_intr_polling(ehcip, ph, flags);
1929 		break;
1930 	case USB_EP_ATTR_ISOCH:
1931 		error = ehci_start_isoc_polling(ehcip, ph, flags);
1932 		break;
1933 	}
1934 
1935 	return (error);
1936 }
1937 
1938 static int
1939 ehci_start_intr_polling(
1940 	ehci_state_t		*ehcip,
1941 	usba_pipe_handle_data_t	*ph,
1942 	usb_flags_t		flags)
1943 {
1944 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
1945 	ehci_trans_wrapper_t	*tw_list, *tw;
1946 	int			i, total_tws;
1947 	int			error = USB_SUCCESS;
1948 
1949 	/* Allocate all the necessary resources for the IN transfer */
1950 	tw_list = NULL;
1951 	total_tws = pp->pp_max_periodic_req_cnt - pp->pp_cur_periodic_req_cnt;
1952 	for (i = 0; i < total_tws; i += 1) {
1953 		tw = ehci_allocate_intr_resources(ehcip, ph, NULL, flags);
1954 		if (tw == NULL) {
1955 			error = USB_NO_RESOURCES;
1956 			/* There are not enough resources, deallocate the TWs */
1957 			tw = tw_list;
1958 			while (tw != NULL) {
1959 				tw_list = tw->tw_next;
1960 				ehci_deallocate_intr_in_resource(
1961 				    ehcip, pp, tw);
1962 				ehci_deallocate_tw(ehcip, pp, tw);
1963 				tw = tw_list;
1964 			}
1965 
1966 			return (error);
1967 		} else {
1968 			if (tw_list == NULL) {
1969 				tw_list = tw;
1970 			}
1971 		}
1972 	}
1973 
1974 	while (pp->pp_cur_periodic_req_cnt < pp->pp_max_periodic_req_cnt) {
1975 
1976 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
1977 		    "ehci_start_pipe_polling: max = %d curr = %d tw = %p:",
1978 		    pp->pp_max_periodic_req_cnt, pp->pp_cur_periodic_req_cnt,
1979 		    tw_list);
1980 
1981 		tw = tw_list;
1982 		tw_list = tw->tw_next;
1983 
1984 		ehci_insert_intr_req(ehcip, pp, tw, flags);
1985 
1986 		pp->pp_cur_periodic_req_cnt++;
1987 	}
1988 
1989 	return (error);
1990 }
1991 
1992 
1993 /*
1994  * ehci_set_periodic_pipe_polling:
1995  *
1996  * Calculate the number of periodic requests needed corresponding to the
1997  * interrupt IN endpoints polling interval. Table below gives the number
1998  * of periodic requests needed for the interrupt IN endpoints  according
1999  * to endpoint polling interval.
2000  *
2001  * Polling interval		Number of periodic requests
2002  *
2003  * 1ms				4
2004  * 2ms				2
2005  * 4ms to 32ms			1
2006  */
2007 static void
2008 ehci_set_periodic_pipe_polling(
2009 	ehci_state_t		*ehcip,
2010 	usba_pipe_handle_data_t	*ph)
2011 {
2012 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2013 	usb_ep_descr_t		*endpoint = &ph->p_ep;
2014 	uchar_t			ep_attr = endpoint->bmAttributes;
2015 	uint_t			interval;
2016 
2017 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2018 	    "ehci_set_periodic_pipe_polling:");
2019 
2020 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2021 
2022 	pp->pp_cur_periodic_req_cnt = 0;
2023 
2024 	/*
2025 	 * Check usb flag whether USB_FLAGS_ONE_TIME_POLL flag is
2026 	 * set and if so, set pp->pp_max_periodic_req_cnt to one.
2027 	 */
2028 	if (((ep_attr & USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) &&
2029 	    (pp->pp_client_periodic_in_reqp)) {
2030 		usb_intr_req_t *intr_reqp = (usb_intr_req_t *)
2031 		    pp->pp_client_periodic_in_reqp;
2032 
2033 		if (intr_reqp->intr_attributes &
2034 		    USB_ATTRS_ONE_XFER) {
2035 
2036 			pp->pp_max_periodic_req_cnt = EHCI_INTR_XMS_REQS;
2037 
2038 			return;
2039 		}
2040 	}
2041 
2042 	mutex_enter(&ph->p_usba_device->usb_mutex);
2043 
2044 	/*
2045 	 * The ehci_adjust_polling_interval function will not fail
2046 	 * at this instance since bandwidth allocation is already
2047 	 * done. Here we are getting only the periodic interval.
2048 	 */
2049 	interval = ehci_adjust_polling_interval(ehcip, endpoint,
2050 	    ph->p_usba_device->usb_port_status);
2051 
2052 	mutex_exit(&ph->p_usba_device->usb_mutex);
2053 
2054 	switch (interval) {
2055 	case EHCI_INTR_1MS_POLL:
2056 		pp->pp_max_periodic_req_cnt = EHCI_INTR_1MS_REQS;
2057 		break;
2058 	case EHCI_INTR_2MS_POLL:
2059 		pp->pp_max_periodic_req_cnt = EHCI_INTR_2MS_REQS;
2060 		break;
2061 	default:
2062 		pp->pp_max_periodic_req_cnt = EHCI_INTR_XMS_REQS;
2063 		break;
2064 	}
2065 
2066 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2067 	    "ehci_set_periodic_pipe_polling: Max periodic requests = %d",
2068 	    pp->pp_max_periodic_req_cnt);
2069 }
2070 
2071 /*
2072  * ehci_allocate_intr_resources:
2073  *
2074  * Calculates the number of tds necessary for a intr transfer, and allocates
2075  * all the necessary resources.
2076  *
2077  * Returns NULL if there is insufficient resources otherwise TW.
2078  */
2079 ehci_trans_wrapper_t *
2080 ehci_allocate_intr_resources(
2081 	ehci_state_t		*ehcip,
2082 	usba_pipe_handle_data_t	*ph,
2083 	usb_intr_req_t		*intr_reqp,
2084 	usb_flags_t		flags)
2085 {
2086 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2087 	int			pipe_dir;
2088 	size_t			qtd_count = 1;
2089 	size_t			tw_length;
2090 	ehci_trans_wrapper_t	*tw;
2091 
2092 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2093 	    "ehci_allocate_intr_resources:");
2094 
2095 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2096 
2097 	pipe_dir = ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK;
2098 
2099 	/* Get the length of interrupt transfer & alloc data */
2100 	if (intr_reqp) {
2101 		tw_length = intr_reqp->intr_len;
2102 	} else {
2103 		ASSERT(pipe_dir == USB_EP_DIR_IN);
2104 		tw_length = (pp->pp_client_periodic_in_reqp) ?
2105 		    (((usb_intr_req_t *)pp->
2106 		    pp_client_periodic_in_reqp)->intr_len) :
2107 		    ph->p_ep.wMaxPacketSize;
2108 	}
2109 
2110 	/* Check the size of interrupt request */
2111 	if (tw_length > EHCI_MAX_QTD_XFER_SIZE) {
2112 
2113 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2114 		    "ehci_allocate_intr_resources: Intr request size 0x%lx is "
2115 		    "more than 0x%x", tw_length, EHCI_MAX_QTD_XFER_SIZE);
2116 
2117 		return (NULL);
2118 	}
2119 
2120 	if ((tw = ehci_allocate_tw_resources(ehcip, pp, tw_length, flags,
2121 	    qtd_count)) == NULL) {
2122 
2123 		return (NULL);
2124 	}
2125 
2126 	if (pipe_dir == USB_EP_DIR_IN) {
2127 		if (ehci_allocate_intr_in_resource(ehcip, pp, tw, flags) !=
2128 		    USB_SUCCESS) {
2129 			ehci_deallocate_tw(ehcip, pp, tw);
2130 		}
2131 		tw->tw_direction = EHCI_QTD_CTRL_IN_PID;
2132 	} else {
2133 		if (tw_length) {
2134 			ASSERT(intr_reqp->intr_data != NULL);
2135 
2136 			/* Copy the data into the buffer */
2137 			bcopy(intr_reqp->intr_data->b_rptr, tw->tw_buf,
2138 			    intr_reqp->intr_len);
2139 
2140 			Sync_IO_Buffer_for_device(tw->tw_dmahandle,
2141 			    intr_reqp->intr_len);
2142 		}
2143 
2144 		tw->tw_curr_xfer_reqp = (usb_opaque_t)intr_reqp;
2145 		tw->tw_direction = EHCI_QTD_CTRL_OUT_PID;
2146 	}
2147 
2148 	if (intr_reqp) {
2149 		tw->tw_timeout = intr_reqp->intr_timeout;
2150 	}
2151 
2152 	/*
2153 	 * Initialize the callback and any callback
2154 	 * data required when the qtd completes.
2155 	 */
2156 	tw->tw_handle_qtd = ehci_handle_intr_qtd;
2157 	tw->tw_handle_callback_value = NULL;
2158 
2159 	return (tw);
2160 }
2161 
2162 
2163 /*
2164  * ehci_insert_intr_req:
2165  *
2166  * Insert an Interrupt request into the Host Controller's periodic list.
2167  */
2168 /* ARGSUSED */
2169 void
2170 ehci_insert_intr_req(
2171 	ehci_state_t		*ehcip,
2172 	ehci_pipe_private_t	*pp,
2173 	ehci_trans_wrapper_t	*tw,
2174 	usb_flags_t		flags)
2175 {
2176 	uint_t			ctrl = 0;
2177 
2178 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2179 
2180 	ASSERT(tw->tw_curr_xfer_reqp != NULL);
2181 
2182 	ctrl = (tw->tw_direction | EHCI_QTD_CTRL_INTR_ON_COMPLETE);
2183 
2184 	/* Insert another interrupt QTD */
2185 	(void) ehci_insert_qtd(ehcip, ctrl, 0, tw->tw_length, 0, pp, tw);
2186 
2187 	/* Start the timer for this Interrupt transfer */
2188 	ehci_start_xfer_timer(ehcip, pp, tw);
2189 }
2190 
2191 
2192 /*
2193  * ehci_stop_periodic_pipe_polling:
2194  */
2195 /* ARGSUSED */
2196 int
2197 ehci_stop_periodic_pipe_polling(
2198 	ehci_state_t		*ehcip,
2199 	usba_pipe_handle_data_t	*ph,
2200 	usb_flags_t		flags)
2201 {
2202 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2203 	usb_ep_descr_t		*eptd = &ph->p_ep;
2204 
2205 	USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
2206 	    "ehci_stop_periodic_pipe_polling: Flags = 0x%x", flags);
2207 
2208 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2209 
2210 	/*
2211 	 * Check and handle stop polling on root hub interrupt pipe.
2212 	 */
2213 	if ((ph->p_usba_device->usb_addr == ROOT_HUB_ADDR) &&
2214 	    ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
2215 	    USB_EP_ATTR_INTR)) {
2216 
2217 		ehci_handle_root_hub_pipe_stop_intr_polling(ph, flags);
2218 
2219 		return (USB_SUCCESS);
2220 	}
2221 
2222 	if (pp->pp_state != EHCI_PIPE_STATE_ACTIVE) {
2223 
2224 		USB_DPRINTF_L2(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
2225 		    "ehci_stop_periodic_pipe_polling: "
2226 		    "Polling already stopped");
2227 
2228 		return (USB_SUCCESS);
2229 	}
2230 
2231 	/* Set pipe state to pipe stop polling */
2232 	pp->pp_state = EHCI_PIPE_STATE_STOP_POLLING;
2233 
2234 	ehci_pipe_cleanup(ehcip, ph);
2235 
2236 	return (USB_SUCCESS);
2237 }
2238 
2239 
2240 /*
2241  * ehci_insert_qtd:
2242  *
2243  * Insert a Transfer Descriptor (QTD) on an Endpoint Descriptor (QH).
2244  * Always returns USB_SUCCESS for now.	Once Isoch has been implemented,
2245  * it may return USB_FAILURE.
2246  */
2247 int
2248 ehci_insert_qtd(
2249 	ehci_state_t		*ehcip,
2250 	uint32_t		qtd_ctrl,
2251 	size_t			qtd_dma_offs,
2252 	size_t			qtd_length,
2253 	uint32_t		qtd_ctrl_phase,
2254 	ehci_pipe_private_t	*pp,
2255 	ehci_trans_wrapper_t	*tw)
2256 {
2257 	ehci_qtd_t		*curr_dummy_qtd, *next_dummy_qtd;
2258 	ehci_qtd_t		*new_dummy_qtd;
2259 	ehci_qh_t		*qh = pp->pp_qh;
2260 	int			error = USB_SUCCESS;
2261 
2262 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2263 
2264 	/* Allocate new dummy QTD */
2265 	new_dummy_qtd = tw->tw_qtd_free_list;
2266 
2267 	ASSERT(new_dummy_qtd != NULL);
2268 	tw->tw_qtd_free_list = ehci_qtd_iommu_to_cpu(ehcip,
2269 	    Get_QTD(new_dummy_qtd->qtd_tw_next_qtd));
2270 	Set_QTD(new_dummy_qtd->qtd_tw_next_qtd, NULL);
2271 
2272 	/* Get the current and next dummy QTDs */
2273 	curr_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2274 	    Get_QH(qh->qh_dummy_qtd));
2275 	next_dummy_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2276 	    Get_QTD(curr_dummy_qtd->qtd_next_qtd));
2277 
2278 	/* Update QH's dummy qtd field */
2279 	Set_QH(qh->qh_dummy_qtd, ehci_qtd_cpu_to_iommu(ehcip, next_dummy_qtd));
2280 
2281 	/* Update next dummy's next qtd pointer */
2282 	Set_QTD(next_dummy_qtd->qtd_next_qtd,
2283 	    ehci_qtd_cpu_to_iommu(ehcip, new_dummy_qtd));
2284 
2285 	/*
2286 	 * Fill in the current dummy qtd and
2287 	 * add the new dummy to the end.
2288 	 */
2289 	ehci_fill_in_qtd(ehcip, curr_dummy_qtd, qtd_ctrl,
2290 	    qtd_dma_offs, qtd_length, qtd_ctrl_phase, pp, tw);
2291 
2292 	/* Insert this qtd onto the tw */
2293 	ehci_insert_qtd_on_tw(ehcip, tw, curr_dummy_qtd);
2294 
2295 	/*
2296 	 * Insert this qtd onto active qtd list.
2297 	 * Don't insert polled mode qtd here.
2298 	 */
2299 	if (pp->pp_flag != EHCI_POLLED_MODE_FLAG) {
2300 		/* Insert this qtd onto active qtd list */
2301 		ehci_insert_qtd_into_active_qtd_list(ehcip, curr_dummy_qtd);
2302 	}
2303 
2304 	/* Print qh and qtd */
2305 	ehci_print_qh(ehcip, qh);
2306 	ehci_print_qtd(ehcip, curr_dummy_qtd);
2307 
2308 	return (error);
2309 }
2310 
2311 
2312 /*
2313  * ehci_allocate_qtd_from_pool:
2314  *
2315  * Allocate a Transfer Descriptor (QTD) from the QTD buffer pool.
2316  */
2317 static ehci_qtd_t *
2318 ehci_allocate_qtd_from_pool(ehci_state_t	*ehcip)
2319 {
2320 	int		i, ctrl;
2321 	ehci_qtd_t	*qtd;
2322 
2323 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2324 
2325 	/*
2326 	 * Search for a blank Transfer Descriptor (QTD)
2327 	 * in the QTD buffer pool.
2328 	 */
2329 	for (i = 0; i < ehci_qtd_pool_size; i ++) {
2330 		ctrl = Get_QTD(ehcip->ehci_qtd_pool_addr[i].qtd_state);
2331 		if (ctrl == EHCI_QTD_FREE) {
2332 			break;
2333 		}
2334 	}
2335 
2336 	if (i >= ehci_qtd_pool_size) {
2337 		USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2338 		    "ehci_allocate_qtd_from_pool: QTD exhausted");
2339 
2340 		return (NULL);
2341 	}
2342 
2343 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2344 	    "ehci_allocate_qtd_from_pool: Allocated %d", i);
2345 
2346 	/* Create a new dummy for the end of the QTD list */
2347 	qtd = &ehcip->ehci_qtd_pool_addr[i];
2348 
2349 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2350 	    "ehci_allocate_qtd_from_pool: qtd 0x%p", (void *)qtd);
2351 
2352 	/* Mark the newly allocated QTD as a dummy */
2353 	Set_QTD(qtd->qtd_state, EHCI_QTD_DUMMY);
2354 
2355 	/* Mark the status of this new QTD to halted state */
2356 	Set_QTD(qtd->qtd_ctrl, EHCI_QTD_CTRL_HALTED_XACT);
2357 
2358 	/* Disable dummy QTD's next and alternate next pointers */
2359 	Set_QTD(qtd->qtd_next_qtd, EHCI_QTD_NEXT_QTD_PTR_VALID);
2360 	Set_QTD(qtd->qtd_alt_next_qtd, EHCI_QTD_ALT_NEXT_QTD_PTR_VALID);
2361 
2362 	return (qtd);
2363 }
2364 
2365 
2366 /*
2367  * ehci_fill_in_qtd:
2368  *
2369  * Fill in the fields of a Transfer Descriptor (QTD).
2370  * The "Buffer Pointer" fields of a QTD are retrieved from the TW
2371  * it is associated with.
2372  *
2373  * Note:
2374  * qtd_dma_offs - the starting offset into the TW buffer, where the QTD
2375  *                should transfer from. It should be 4K aligned. And when
2376  *                a TW has more than one QTDs, the QTDs must be filled in
2377  *                increasing order.
2378  * qtd_length - the total bytes to transfer.
2379  */
2380 /*ARGSUSED*/
2381 static void
2382 ehci_fill_in_qtd(
2383 	ehci_state_t		*ehcip,
2384 	ehci_qtd_t		*qtd,
2385 	uint32_t		qtd_ctrl,
2386 	size_t			qtd_dma_offs,
2387 	size_t			qtd_length,
2388 	uint32_t		qtd_ctrl_phase,
2389 	ehci_pipe_private_t	*pp,
2390 	ehci_trans_wrapper_t	*tw)
2391 {
2392 	uint32_t		buf_addr;
2393 	size_t			buf_len = qtd_length;
2394 	uint32_t		ctrl = qtd_ctrl;
2395 	uint_t			i = 0;
2396 	int			rem_len;
2397 
2398 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2399 	    "ehci_fill_in_qtd: qtd 0x%p ctrl 0x%x bufoffs 0x%lx "
2400 	    "len 0x%lx", qtd, qtd_ctrl, qtd_dma_offs, qtd_length);
2401 
2402 	/* Assert that the qtd to be filled in is a dummy */
2403 	ASSERT(Get_QTD(qtd->qtd_state) == EHCI_QTD_DUMMY);
2404 
2405 	/* Change QTD's state Active */
2406 	Set_QTD(qtd->qtd_state, EHCI_QTD_ACTIVE);
2407 
2408 	/* Set the total length data transfer */
2409 	ctrl |= (((qtd_length << EHCI_QTD_CTRL_BYTES_TO_XFER_SHIFT)
2410 	    & EHCI_QTD_CTRL_BYTES_TO_XFER) | EHCI_QTD_CTRL_MAX_ERR_COUNTS);
2411 
2412 	/*
2413 	 * QTDs must be filled in increasing DMA offset order.
2414 	 * tw_dma_offs is initialized to be 0 at TW creation and
2415 	 * is only increased in this function.
2416 	 */
2417 	ASSERT(buf_len == 0 || qtd_dma_offs >= tw->tw_dma_offs);
2418 
2419 	/*
2420 	 * Save the starting dma buffer offset used and
2421 	 * length of data that will be transfered in
2422 	 * the current QTD.
2423 	 */
2424 	Set_QTD(qtd->qtd_xfer_offs, qtd_dma_offs);
2425 	Set_QTD(qtd->qtd_xfer_len, buf_len);
2426 
2427 	while (buf_len) {
2428 		/*
2429 		 * Advance to the next DMA cookie until finding the cookie
2430 		 * that qtd_dma_offs falls in.
2431 		 * It is very likely this loop will never repeat more than
2432 		 * once. It is here just to accommodate the case qtd_dma_offs
2433 		 * is increased by multiple cookies during two consecutive
2434 		 * calls into this function. In that case, the interim DMA
2435 		 * buffer is allowed to be skipped.
2436 		 */
2437 		while ((tw->tw_dma_offs + tw->tw_cookie.dmac_size) <=
2438 		    qtd_dma_offs) {
2439 			/*
2440 			 * tw_dma_offs always points to the starting offset
2441 			 * of a cookie
2442 			 */
2443 			tw->tw_dma_offs += tw->tw_cookie.dmac_size;
2444 			ddi_dma_nextcookie(tw->tw_dmahandle, &tw->tw_cookie);
2445 			tw->tw_cookie_idx++;
2446 			ASSERT(tw->tw_cookie_idx < tw->tw_ncookies);
2447 		}
2448 
2449 		/*
2450 		 * Counting the remained buffer length to be filled in
2451 		 * the QTD for current DMA cookie
2452 		 */
2453 		rem_len = (tw->tw_dma_offs + tw->tw_cookie.dmac_size) -
2454 		    qtd_dma_offs;
2455 
2456 		/* Update the beginning of the buffer */
2457 		buf_addr = (qtd_dma_offs - tw->tw_dma_offs) +
2458 		    tw->tw_cookie.dmac_address;
2459 		ASSERT((buf_addr % EHCI_4K_ALIGN) == 0);
2460 		Set_QTD(qtd->qtd_buf[i], buf_addr);
2461 
2462 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2463 		    "ehci_fill_in_qtd: dmac_addr 0x%p dmac_size "
2464 		    "0x%lx idx %d", buf_addr, tw->tw_cookie.dmac_size,
2465 		    tw->tw_cookie_idx);
2466 
2467 		if (buf_len <= EHCI_MAX_QTD_BUF_SIZE) {
2468 			ASSERT(buf_len <= rem_len);
2469 			break;
2470 		} else {
2471 			ASSERT(rem_len >= EHCI_MAX_QTD_BUF_SIZE);
2472 			buf_len -= EHCI_MAX_QTD_BUF_SIZE;
2473 			qtd_dma_offs += EHCI_MAX_QTD_BUF_SIZE;
2474 		}
2475 
2476 		i++;
2477 	}
2478 
2479 	/*
2480 	 * Setup the alternate next qTD pointer if appropriate.  The alternate
2481 	 * qtd is currently pointing to a QTD that is not yet linked, but will
2482 	 * be in the very near future.	If a short_xfer occurs in this
2483 	 * situation , the HC will automatically skip this QH.	Eventually
2484 	 * everything will be placed and the alternate_qtd will be valid QTD.
2485 	 * For more information on alternate qtds look at section 3.5.2 in the
2486 	 * EHCI spec.
2487 	 */
2488 	if (tw->tw_alt_qtd != NULL) {
2489 		Set_QTD(qtd->qtd_alt_next_qtd,
2490 		    (ehci_qtd_cpu_to_iommu(ehcip, tw->tw_alt_qtd) &
2491 		    EHCI_QTD_ALT_NEXT_QTD_PTR));
2492 	}
2493 
2494 	/*
2495 	 * For control, bulk and interrupt QTD, now
2496 	 * enable current QTD by setting active bit.
2497 	 */
2498 	Set_QTD(qtd->qtd_ctrl, (ctrl | EHCI_QTD_CTRL_ACTIVE_XACT));
2499 
2500 	/*
2501 	 * For Control Xfer, qtd_ctrl_phase is a valid filed.
2502 	 */
2503 	if (qtd_ctrl_phase) {
2504 		Set_QTD(qtd->qtd_ctrl_phase, qtd_ctrl_phase);
2505 	}
2506 
2507 	/* Set the transfer wrapper */
2508 	ASSERT(tw != NULL);
2509 	ASSERT(tw->tw_id != NULL);
2510 
2511 	Set_QTD(qtd->qtd_trans_wrapper, (uint32_t)tw->tw_id);
2512 }
2513 
2514 
2515 /*
2516  * ehci_insert_qtd_on_tw:
2517  *
2518  * The transfer wrapper keeps a list of all Transfer Descriptors (QTD) that
2519  * are allocated for this transfer. Insert a QTD  onto this list. The  list
2520  * of QTD's does not include the dummy QTD that is at the end of the list of
2521  * QTD's for the endpoint.
2522  */
2523 static void
2524 ehci_insert_qtd_on_tw(
2525 	ehci_state_t		*ehcip,
2526 	ehci_trans_wrapper_t	*tw,
2527 	ehci_qtd_t		*qtd)
2528 {
2529 	/*
2530 	 * Set the next pointer to NULL because
2531 	 * this is the last QTD on list.
2532 	 */
2533 	Set_QTD(qtd->qtd_tw_next_qtd, NULL);
2534 
2535 	if (tw->tw_qtd_head == NULL) {
2536 		ASSERT(tw->tw_qtd_tail == NULL);
2537 		tw->tw_qtd_head = qtd;
2538 		tw->tw_qtd_tail = qtd;
2539 	} else {
2540 		ehci_qtd_t *dummy = (ehci_qtd_t *)tw->tw_qtd_tail;
2541 
2542 		ASSERT(dummy != NULL);
2543 		ASSERT(dummy != qtd);
2544 		ASSERT(Get_QTD(qtd->qtd_state) != EHCI_QTD_DUMMY);
2545 
2546 		/* Add the qtd to the end of the list */
2547 		Set_QTD(dummy->qtd_tw_next_qtd,
2548 		    ehci_qtd_cpu_to_iommu(ehcip, qtd));
2549 
2550 		tw->tw_qtd_tail = qtd;
2551 
2552 		ASSERT(Get_QTD(qtd->qtd_tw_next_qtd) == NULL);
2553 	}
2554 }
2555 
2556 
2557 /*
2558  * ehci_insert_qtd_into_active_qtd_list:
2559  *
2560  * Insert current QTD into active QTD list.
2561  */
2562 static void
2563 ehci_insert_qtd_into_active_qtd_list(
2564 	ehci_state_t		*ehcip,
2565 	ehci_qtd_t		*qtd)
2566 {
2567 	ehci_qtd_t		*curr_qtd, *next_qtd;
2568 
2569 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2570 
2571 	curr_qtd = ehcip->ehci_active_qtd_list;
2572 
2573 	/* Insert this QTD into QTD Active List */
2574 	if (curr_qtd) {
2575 		next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2576 		    Get_QTD(curr_qtd->qtd_active_qtd_next));
2577 
2578 		while (next_qtd) {
2579 			curr_qtd = next_qtd;
2580 			next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2581 			    Get_QTD(curr_qtd->qtd_active_qtd_next));
2582 		}
2583 
2584 		Set_QTD(qtd->qtd_active_qtd_prev,
2585 		    ehci_qtd_cpu_to_iommu(ehcip, curr_qtd));
2586 
2587 		Set_QTD(curr_qtd->qtd_active_qtd_next,
2588 		    ehci_qtd_cpu_to_iommu(ehcip, qtd));
2589 	} else {
2590 		ehcip->ehci_active_qtd_list = qtd;
2591 		Set_QTD(qtd->qtd_active_qtd_next, NULL);
2592 		Set_QTD(qtd->qtd_active_qtd_prev, NULL);
2593 	}
2594 }
2595 
2596 
2597 /*
2598  * ehci_remove_qtd_from_active_qtd_list:
2599  *
2600  * Remove current QTD from the active QTD list.
2601  *
2602  * NOTE: This function is also called from POLLED MODE.
2603  */
2604 void
2605 ehci_remove_qtd_from_active_qtd_list(
2606 	ehci_state_t		*ehcip,
2607 	ehci_qtd_t		*qtd)
2608 {
2609 	ehci_qtd_t		*curr_qtd, *prev_qtd, *next_qtd;
2610 
2611 	ASSERT(qtd != NULL);
2612 
2613 	curr_qtd = ehcip->ehci_active_qtd_list;
2614 
2615 	while ((curr_qtd) && (curr_qtd != qtd)) {
2616 		curr_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2617 		    Get_QTD(curr_qtd->qtd_active_qtd_next));
2618 	}
2619 
2620 	if ((curr_qtd) && (curr_qtd == qtd)) {
2621 		prev_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2622 		    Get_QTD(curr_qtd->qtd_active_qtd_prev));
2623 		next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2624 		    Get_QTD(curr_qtd->qtd_active_qtd_next));
2625 
2626 		if (prev_qtd) {
2627 			Set_QTD(prev_qtd->qtd_active_qtd_next,
2628 			    Get_QTD(curr_qtd->qtd_active_qtd_next));
2629 		} else {
2630 			ehcip->ehci_active_qtd_list = next_qtd;
2631 		}
2632 
2633 		if (next_qtd) {
2634 			Set_QTD(next_qtd->qtd_active_qtd_prev,
2635 			    Get_QTD(curr_qtd->qtd_active_qtd_prev));
2636 		}
2637 	} else {
2638 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2639 		    "ehci_remove_qtd_from_active_qtd_list: "
2640 		    "Unable to find QTD in active_qtd_list");
2641 	}
2642 }
2643 
2644 
2645 /*
2646  * ehci_traverse_qtds:
2647  *
2648  * Traverse the list of QTDs for given pipe using transfer wrapper.  Since
2649  * the endpoint is marked as Halted, the Host Controller (HC) is no longer
2650  * accessing these QTDs. Remove all the QTDs that are attached to endpoint.
2651  */
2652 static void
2653 ehci_traverse_qtds(
2654 	ehci_state_t		*ehcip,
2655 	usba_pipe_handle_data_t	*ph)
2656 {
2657 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
2658 	ehci_trans_wrapper_t	*next_tw;
2659 	ehci_qtd_t		*qtd;
2660 	ehci_qtd_t		*next_qtd;
2661 
2662 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2663 
2664 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2665 	    "ehci_traverse_qtds:");
2666 
2667 	/* Process the transfer wrappers for this pipe */
2668 	next_tw = pp->pp_tw_head;
2669 
2670 	while (next_tw) {
2671 		/* Stop the the transfer timer */
2672 		ehci_stop_xfer_timer(ehcip, next_tw, EHCI_REMOVE_XFER_ALWAYS);
2673 
2674 		qtd = (ehci_qtd_t *)next_tw->tw_qtd_head;
2675 
2676 		/* Walk through each QTD for this transfer wrapper */
2677 		while (qtd) {
2678 			/* Remove this QTD from active QTD list */
2679 			ehci_remove_qtd_from_active_qtd_list(ehcip, qtd);
2680 
2681 			next_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2682 			    Get_QTD(qtd->qtd_tw_next_qtd));
2683 
2684 			/* Deallocate this QTD */
2685 			ehci_deallocate_qtd(ehcip, qtd);
2686 
2687 			qtd = next_qtd;
2688 		}
2689 
2690 		next_tw = next_tw->tw_next;
2691 	}
2692 
2693 	/* Clear current qtd pointer */
2694 	Set_QH(pp->pp_qh->qh_curr_qtd, (uint32_t)0x00000000);
2695 
2696 	/* Update the next qtd pointer in the QH */
2697 	Set_QH(pp->pp_qh->qh_next_qtd, Get_QH(pp->pp_qh->qh_dummy_qtd));
2698 }
2699 
2700 
2701 /*
2702  * ehci_deallocate_qtd:
2703  *
2704  * Deallocate a Host Controller's (HC) Transfer Descriptor (QTD).
2705  *
2706  * NOTE: This function is also called from POLLED MODE.
2707  */
2708 void
2709 ehci_deallocate_qtd(
2710 	ehci_state_t		*ehcip,
2711 	ehci_qtd_t		*old_qtd)
2712 {
2713 	ehci_trans_wrapper_t	*tw = NULL;
2714 
2715 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2716 	    "ehci_deallocate_qtd: old_qtd = 0x%p", (void *)old_qtd);
2717 
2718 	/*
2719 	 * Obtain the transaction wrapper and tw will be
2720 	 * NULL for the dummy QTDs.
2721 	 */
2722 	if (Get_QTD(old_qtd->qtd_state) != EHCI_QTD_DUMMY) {
2723 		tw = (ehci_trans_wrapper_t *)
2724 		    EHCI_LOOKUP_ID((uint32_t)
2725 		    Get_QTD(old_qtd->qtd_trans_wrapper));
2726 
2727 		ASSERT(tw != NULL);
2728 	}
2729 
2730 	/*
2731 	 * If QTD's transfer wrapper is NULL, don't access its TW.
2732 	 * Just free the QTD.
2733 	 */
2734 	if (tw) {
2735 		ehci_qtd_t	*qtd, *next_qtd;
2736 
2737 		qtd = tw->tw_qtd_head;
2738 
2739 		if (old_qtd != qtd) {
2740 			next_qtd = ehci_qtd_iommu_to_cpu(
2741 			    ehcip, Get_QTD(qtd->qtd_tw_next_qtd));
2742 
2743 			while (next_qtd != old_qtd) {
2744 				qtd = next_qtd;
2745 				next_qtd = ehci_qtd_iommu_to_cpu(
2746 				    ehcip, Get_QTD(qtd->qtd_tw_next_qtd));
2747 			}
2748 
2749 			Set_QTD(qtd->qtd_tw_next_qtd, old_qtd->qtd_tw_next_qtd);
2750 
2751 			if (qtd->qtd_tw_next_qtd == NULL) {
2752 				tw->tw_qtd_tail = qtd;
2753 			}
2754 		} else {
2755 			tw->tw_qtd_head = ehci_qtd_iommu_to_cpu(
2756 			    ehcip, Get_QTD(old_qtd->qtd_tw_next_qtd));
2757 
2758 			if (tw->tw_qtd_head == NULL) {
2759 				tw->tw_qtd_tail = NULL;
2760 			}
2761 		}
2762 	}
2763 
2764 	bzero((void *)old_qtd, sizeof (ehci_qtd_t));
2765 	Set_QTD(old_qtd->qtd_state, EHCI_QTD_FREE);
2766 
2767 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2768 	    "Dealloc_qtd: qtd 0x%p", (void *)old_qtd);
2769 }
2770 
2771 
2772 /*
2773  * ehci_qtd_cpu_to_iommu:
2774  *
2775  * This function converts for the given Transfer Descriptor (QTD) CPU address
2776  * to IO address.
2777  *
2778  * NOTE: This function is also called from POLLED MODE.
2779  */
2780 uint32_t
2781 ehci_qtd_cpu_to_iommu(
2782 	ehci_state_t	*ehcip,
2783 	ehci_qtd_t	*addr)
2784 {
2785 	uint32_t	td;
2786 
2787 	td  = (uint32_t)ehcip->ehci_qtd_pool_cookie.dmac_address +
2788 	    (uint32_t)((uintptr_t)addr -
2789 	    (uintptr_t)(ehcip->ehci_qtd_pool_addr));
2790 
2791 	ASSERT((ehcip->ehci_qtd_pool_cookie.dmac_address +
2792 	    (uint32_t) (sizeof (ehci_qtd_t) *
2793 	    (addr - ehcip->ehci_qtd_pool_addr))) ==
2794 	    (ehcip->ehci_qtd_pool_cookie.dmac_address +
2795 	    (uint32_t)((uintptr_t)addr - (uintptr_t)
2796 	    (ehcip->ehci_qtd_pool_addr))));
2797 
2798 	ASSERT(td >= ehcip->ehci_qtd_pool_cookie.dmac_address);
2799 	ASSERT(td <= ehcip->ehci_qtd_pool_cookie.dmac_address +
2800 	    sizeof (ehci_qtd_t) * ehci_qtd_pool_size);
2801 
2802 	return (td);
2803 }
2804 
2805 
2806 /*
2807  * ehci_qtd_iommu_to_cpu:
2808  *
2809  * This function converts for the given Transfer Descriptor (QTD) IO address
2810  * to CPU address.
2811  *
2812  * NOTE: This function is also called from POLLED MODE.
2813  */
2814 ehci_qtd_t *
2815 ehci_qtd_iommu_to_cpu(
2816 	ehci_state_t	*ehcip,
2817 	uintptr_t	addr)
2818 {
2819 	ehci_qtd_t	*qtd;
2820 
2821 	if (addr == NULL) {
2822 
2823 		return (NULL);
2824 	}
2825 
2826 	qtd = (ehci_qtd_t *)((uintptr_t)
2827 	    (addr - ehcip->ehci_qtd_pool_cookie.dmac_address) +
2828 	    (uintptr_t)ehcip->ehci_qtd_pool_addr);
2829 
2830 	ASSERT(qtd >= ehcip->ehci_qtd_pool_addr);
2831 	ASSERT((uintptr_t)qtd <= (uintptr_t)ehcip->ehci_qtd_pool_addr +
2832 	    (uintptr_t)(sizeof (ehci_qtd_t) * ehci_qtd_pool_size));
2833 
2834 	return (qtd);
2835 }
2836 
2837 /*
2838  * ehci_allocate_tds_for_tw_resources:
2839  *
2840  * Allocate n Transfer Descriptors (TD) from the TD buffer pool and places it
2841  * into the TW.  Also chooses the correct alternate qtd when required.	It is
2842  * used for hardware short transfer support.  For more information on
2843  * alternate qtds look at section 3.5.2 in the EHCI spec.
2844  * Here is how each alternate qtd's are used:
2845  *
2846  * Bulk: used fully.
2847  * Intr: xfers only require 1 QTD, so alternate qtds are never used.
2848  * Ctrl: Should not use alternate QTD
2849  * Isoch: Doesn't support short_xfer nor does it use QTD
2850  *
2851  * Returns USB_NO_RESOURCES if it was not able to allocate all the requested TD
2852  * otherwise USB_SUCCESS.
2853  */
2854 int
2855 ehci_allocate_tds_for_tw(
2856 	ehci_state_t		*ehcip,
2857 	ehci_pipe_private_t	*pp,
2858 	ehci_trans_wrapper_t	*tw,
2859 	size_t			qtd_count)
2860 {
2861 	usb_ep_descr_t		*eptd = &pp->pp_pipe_handle->p_ep;
2862 	uchar_t			attributes;
2863 	ehci_qtd_t		*qtd;
2864 	uint32_t		qtd_addr;
2865 	int			i;
2866 	int			error = USB_SUCCESS;
2867 
2868 	attributes = eptd->bmAttributes & USB_EP_ATTR_MASK;
2869 
2870 	for (i = 0; i < qtd_count; i += 1) {
2871 		qtd = ehci_allocate_qtd_from_pool(ehcip);
2872 		if (qtd == NULL) {
2873 			error = USB_NO_RESOURCES;
2874 			USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2875 			    "ehci_allocate_qtds_for_tw: "
2876 			    "Unable to allocate %lu QTDs",
2877 			    qtd_count);
2878 			break;
2879 		}
2880 		if (i > 0) {
2881 			qtd_addr = ehci_qtd_cpu_to_iommu(ehcip,
2882 			    tw->tw_qtd_free_list);
2883 			Set_QTD(qtd->qtd_tw_next_qtd, qtd_addr);
2884 		}
2885 		tw->tw_qtd_free_list = qtd;
2886 
2887 		/*
2888 		 * Save the second one as a pointer to the new dummy 1.
2889 		 * It is used later for the alt_qtd_ptr.  Xfers with only
2890 		 * one qtd do not need alt_qtd_ptr.
2891 		 * The tds's are allocated and put into a stack, that is
2892 		 * why the second qtd allocated will turn out to be the
2893 		 * new dummy 1.
2894 		 */
2895 		if ((i == 1) && (attributes == USB_EP_ATTR_BULK)) {
2896 			tw->tw_alt_qtd = qtd;
2897 		}
2898 	}
2899 
2900 	return (error);
2901 }
2902 
2903 /*
2904  * ehci_allocate_tw_resources:
2905  *
2906  * Allocate a Transaction Wrapper (TW) and n Transfer Descriptors (QTD)
2907  * from the QTD buffer pool and places it into the TW.	It does an all
2908  * or nothing transaction.
2909  *
2910  * Returns NULL if there is insufficient resources otherwise TW.
2911  */
2912 static ehci_trans_wrapper_t *
2913 ehci_allocate_tw_resources(
2914 	ehci_state_t		*ehcip,
2915 	ehci_pipe_private_t	*pp,
2916 	size_t			tw_length,
2917 	usb_flags_t		usb_flags,
2918 	size_t			qtd_count)
2919 {
2920 	ehci_trans_wrapper_t	*tw;
2921 
2922 	tw = ehci_create_transfer_wrapper(ehcip, pp, tw_length, usb_flags);
2923 
2924 	if (tw == NULL) {
2925 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2926 		    "ehci_allocate_tw_resources: Unable to allocate TW");
2927 	} else {
2928 		if (ehci_allocate_tds_for_tw(ehcip, pp, tw, qtd_count) ==
2929 		    USB_SUCCESS) {
2930 			tw->tw_num_qtds = qtd_count;
2931 		} else {
2932 			ehci_deallocate_tw(ehcip, pp, tw);
2933 			tw = NULL;
2934 		}
2935 	}
2936 
2937 	return (tw);
2938 }
2939 
2940 
2941 /*
2942  * ehci_free_tw_td_resources:
2943  *
2944  * Free all allocated resources for Transaction Wrapper (TW).
2945  * Does not free the TW itself.
2946  *
2947  * Returns NULL if there is insufficient resources otherwise TW.
2948  */
2949 static void
2950 ehci_free_tw_td_resources(
2951 	ehci_state_t		*ehcip,
2952 	ehci_trans_wrapper_t	*tw)
2953 {
2954 	ehci_qtd_t		*qtd = NULL;
2955 	ehci_qtd_t		*temp_qtd = NULL;
2956 
2957 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
2958 	    "ehci_free_tw_td_resources: tw = 0x%p", tw);
2959 
2960 	qtd = tw->tw_qtd_free_list;
2961 	while (qtd != NULL) {
2962 		/* Save the pointer to the next qtd before destroying it */
2963 		temp_qtd = ehci_qtd_iommu_to_cpu(ehcip,
2964 		    Get_QTD(qtd->qtd_tw_next_qtd));
2965 		ehci_deallocate_qtd(ehcip, qtd);
2966 		qtd = temp_qtd;
2967 	}
2968 	tw->tw_qtd_free_list = NULL;
2969 }
2970 
2971 /*
2972  * Transfer Wrapper functions
2973  *
2974  * ehci_create_transfer_wrapper:
2975  *
2976  * Create a Transaction Wrapper (TW) and this involves the allocating of DMA
2977  * resources.
2978  */
2979 static ehci_trans_wrapper_t *
2980 ehci_create_transfer_wrapper(
2981 	ehci_state_t		*ehcip,
2982 	ehci_pipe_private_t	*pp,
2983 	size_t			length,
2984 	uint_t			usb_flags)
2985 {
2986 	ddi_device_acc_attr_t	dev_attr;
2987 	ddi_dma_attr_t		dma_attr;
2988 	int			result;
2989 	size_t			real_length;
2990 	ehci_trans_wrapper_t	*tw;
2991 	int			kmem_flag;
2992 	int			(*dmamem_wait)(caddr_t);
2993 
2994 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
2995 	    "ehci_create_transfer_wrapper: length = 0x%lx flags = 0x%x",
2996 	    length, usb_flags);
2997 
2998 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2999 
3000 	/* SLEEP flag should not be used in interrupt context */
3001 	if (servicing_interrupt()) {
3002 		kmem_flag = KM_NOSLEEP;
3003 		dmamem_wait = DDI_DMA_DONTWAIT;
3004 	} else {
3005 		kmem_flag = KM_SLEEP;
3006 		dmamem_wait = DDI_DMA_SLEEP;
3007 	}
3008 
3009 	/* Allocate space for the transfer wrapper */
3010 	tw = kmem_zalloc(sizeof (ehci_trans_wrapper_t), kmem_flag);
3011 
3012 	if (tw == NULL) {
3013 		USB_DPRINTF_L2(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3014 		    "ehci_create_transfer_wrapper: kmem_zalloc failed");
3015 
3016 		return (NULL);
3017 	}
3018 
3019 	/* zero-length packet doesn't need to allocate dma memory */
3020 	if (length == 0) {
3021 
3022 		goto dmadone;
3023 	}
3024 
3025 	/* allow sg lists for transfer wrapper dma memory */
3026 	bcopy(&ehcip->ehci_dma_attr, &dma_attr, sizeof (ddi_dma_attr_t));
3027 	dma_attr.dma_attr_sgllen = EHCI_DMA_ATTR_TW_SGLLEN;
3028 	dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
3029 
3030 	/* Allocate the DMA handle */
3031 	result = ddi_dma_alloc_handle(ehcip->ehci_dip,
3032 	    &dma_attr, dmamem_wait, 0, &tw->tw_dmahandle);
3033 
3034 	if (result != DDI_SUCCESS) {
3035 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3036 		    "ehci_create_transfer_wrapper: Alloc handle failed");
3037 
3038 		kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3039 
3040 		return (NULL);
3041 	}
3042 
3043 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
3044 
3045 	/* no need for swapping the raw data */
3046 	dev_attr.devacc_attr_endian_flags  = DDI_NEVERSWAP_ACC;
3047 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
3048 
3049 	/* Allocate the memory */
3050 	result = ddi_dma_mem_alloc(tw->tw_dmahandle, length,
3051 	    &dev_attr, DDI_DMA_CONSISTENT, dmamem_wait, NULL,
3052 	    (caddr_t *)&tw->tw_buf, &real_length, &tw->tw_accesshandle);
3053 
3054 	if (result != DDI_SUCCESS) {
3055 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3056 		    "ehci_create_transfer_wrapper: dma_mem_alloc fail");
3057 
3058 		ddi_dma_free_handle(&tw->tw_dmahandle);
3059 		kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3060 
3061 		return (NULL);
3062 	}
3063 
3064 	ASSERT(real_length >= length);
3065 
3066 	/* Bind the handle */
3067 	result = ddi_dma_addr_bind_handle(tw->tw_dmahandle, NULL,
3068 	    (caddr_t)tw->tw_buf, real_length, DDI_DMA_RDWR|DDI_DMA_CONSISTENT,
3069 	    dmamem_wait, NULL, &tw->tw_cookie, &tw->tw_ncookies);
3070 
3071 	if (result != DDI_DMA_MAPPED) {
3072 		ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
3073 
3074 		ddi_dma_mem_free(&tw->tw_accesshandle);
3075 		ddi_dma_free_handle(&tw->tw_dmahandle);
3076 		kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3077 
3078 		return (NULL);
3079 	}
3080 
3081 	tw->tw_cookie_idx = 0;
3082 	tw->tw_dma_offs = 0;
3083 
3084 dmadone:
3085 	/*
3086 	 * Only allow one wrapper to be added at a time. Insert the
3087 	 * new transaction wrapper into the list for this pipe.
3088 	 */
3089 	if (pp->pp_tw_head == NULL) {
3090 		pp->pp_tw_head = tw;
3091 		pp->pp_tw_tail = tw;
3092 	} else {
3093 		pp->pp_tw_tail->tw_next = tw;
3094 		pp->pp_tw_tail = tw;
3095 	}
3096 
3097 	/* Store the transfer length */
3098 	tw->tw_length = length;
3099 
3100 	/* Store a back pointer to the pipe private structure */
3101 	tw->tw_pipe_private = pp;
3102 
3103 	/* Store the transfer type - synchronous or asynchronous */
3104 	tw->tw_flags = usb_flags;
3105 
3106 	/* Get and Store 32bit ID */
3107 	tw->tw_id = EHCI_GET_ID((void *)tw);
3108 
3109 	ASSERT(tw->tw_id != NULL);
3110 
3111 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3112 	    "ehci_create_transfer_wrapper: tw = 0x%p, ncookies = %u",
3113 	    tw, tw->tw_ncookies);
3114 
3115 	return (tw);
3116 }
3117 
3118 
3119 /*
3120  * ehci_start_xfer_timer:
3121  *
3122  * Start the timer for the control, bulk and for one time interrupt
3123  * transfers.
3124  */
3125 /* ARGSUSED */
3126 static void
3127 ehci_start_xfer_timer(
3128 	ehci_state_t		*ehcip,
3129 	ehci_pipe_private_t	*pp,
3130 	ehci_trans_wrapper_t	*tw)
3131 {
3132 	USB_DPRINTF_L3(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3133 	    "ehci_start_xfer_timer: tw = 0x%p", tw);
3134 
3135 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3136 
3137 	/*
3138 	 * The timeout handling is done only for control, bulk and for
3139 	 * one time Interrupt transfers.
3140 	 *
3141 	 * NOTE: If timeout is zero; Assume infinite timeout and don't
3142 	 * insert this transfer on the timeout list.
3143 	 */
3144 	if (tw->tw_timeout) {
3145 		/*
3146 		 * Add this transfer wrapper to the head of the pipe's
3147 		 * tw timeout list.
3148 		 */
3149 		if (pp->pp_timeout_list) {
3150 			tw->tw_timeout_next = pp->pp_timeout_list;
3151 		}
3152 
3153 		pp->pp_timeout_list = tw;
3154 		ehci_start_timer(ehcip, pp);
3155 	}
3156 }
3157 
3158 
3159 /*
3160  * ehci_stop_xfer_timer:
3161  *
3162  * Start the timer for the control, bulk and for one time interrupt
3163  * transfers.
3164  */
3165 void
3166 ehci_stop_xfer_timer(
3167 	ehci_state_t		*ehcip,
3168 	ehci_trans_wrapper_t	*tw,
3169 	uint_t			flag)
3170 {
3171 	ehci_pipe_private_t	*pp;
3172 	timeout_id_t		timer_id;
3173 
3174 	USB_DPRINTF_L3(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3175 	    "ehci_stop_xfer_timer: tw = 0x%p", tw);
3176 
3177 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3178 
3179 	/* Obtain the pipe private structure */
3180 	pp = tw->tw_pipe_private;
3181 
3182 	/* check if the timeout tw list is empty */
3183 	if (pp->pp_timeout_list == NULL) {
3184 
3185 		return;
3186 	}
3187 
3188 	switch (flag) {
3189 	case EHCI_REMOVE_XFER_IFLAST:
3190 		if (tw->tw_qtd_head != tw->tw_qtd_tail) {
3191 			break;
3192 		}
3193 
3194 		/* FALLTHRU */
3195 	case EHCI_REMOVE_XFER_ALWAYS:
3196 		ehci_remove_tw_from_timeout_list(ehcip, tw);
3197 
3198 		if ((pp->pp_timeout_list == NULL) &&
3199 		    (pp->pp_timer_id)) {
3200 
3201 			timer_id = pp->pp_timer_id;
3202 
3203 			/* Reset the timer id to zero */
3204 			pp->pp_timer_id = 0;
3205 
3206 			mutex_exit(&ehcip->ehci_int_mutex);
3207 
3208 			(void) untimeout(timer_id);
3209 
3210 			mutex_enter(&ehcip->ehci_int_mutex);
3211 		}
3212 		break;
3213 	default:
3214 		break;
3215 	}
3216 }
3217 
3218 
3219 /*
3220  * ehci_xfer_timeout_handler:
3221  *
3222  * Control or bulk transfer timeout handler.
3223  */
3224 static void
3225 ehci_xfer_timeout_handler(void *arg)
3226 {
3227 	usba_pipe_handle_data_t	*ph = (usba_pipe_handle_data_t *)arg;
3228 	ehci_state_t		*ehcip = ehci_obtain_state(
3229 	    ph->p_usba_device->usb_root_hub_dip);
3230 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3231 	ehci_trans_wrapper_t	*tw, *next;
3232 	ehci_trans_wrapper_t	*expire_xfer_list = NULL;
3233 	ehci_qtd_t		*qtd;
3234 
3235 	USB_DPRINTF_L4(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3236 	    "ehci_xfer_timeout_handler: ehcip = 0x%p, ph = 0x%p", ehcip, ph);
3237 
3238 	mutex_enter(&ehcip->ehci_int_mutex);
3239 
3240 	/*
3241 	 * Check whether still timeout handler is valid.
3242 	 */
3243 	if (pp->pp_timer_id != 0) {
3244 
3245 		/* Reset the timer id to zero */
3246 		pp->pp_timer_id = 0;
3247 	} else {
3248 		mutex_exit(&ehcip->ehci_int_mutex);
3249 
3250 		return;
3251 	}
3252 
3253 	/* Get the transfer timeout list head */
3254 	tw = pp->pp_timeout_list;
3255 
3256 	while (tw) {
3257 
3258 		/* Get the transfer on the timeout list */
3259 		next = tw->tw_timeout_next;
3260 
3261 		tw->tw_timeout--;
3262 
3263 		if (tw->tw_timeout <= 0) {
3264 
3265 			/* remove the tw from the timeout list */
3266 			ehci_remove_tw_from_timeout_list(ehcip, tw);
3267 
3268 			/* remove QTDs from active QTD list */
3269 			qtd = tw->tw_qtd_head;
3270 			while (qtd) {
3271 				ehci_remove_qtd_from_active_qtd_list(
3272 				    ehcip, qtd);
3273 
3274 				/* Get the next QTD from the wrapper */
3275 				qtd = ehci_qtd_iommu_to_cpu(ehcip,
3276 				    Get_QTD(qtd->qtd_tw_next_qtd));
3277 			}
3278 
3279 			/*
3280 			 * Preserve the order to the requests
3281 			 * started time sequence.
3282 			 */
3283 			tw->tw_timeout_next = expire_xfer_list;
3284 			expire_xfer_list = tw;
3285 		}
3286 
3287 		tw = next;
3288 	}
3289 
3290 	/*
3291 	 * The timer should be started before the callbacks.
3292 	 * There is always a chance that ehci interrupts come
3293 	 * in when we release the mutex while calling the tw back.
3294 	 * To keep an accurate timeout it should be restarted
3295 	 * as soon as possible.
3296 	 */
3297 	ehci_start_timer(ehcip, pp);
3298 
3299 	/* Get the expired transfer timeout list head */
3300 	tw = expire_xfer_list;
3301 
3302 	while (tw) {
3303 
3304 		/* Get the next tw on the expired transfer timeout list */
3305 		next = tw->tw_timeout_next;
3306 
3307 		/*
3308 		 * The error handle routine will release the mutex when
3309 		 * calling back to USBA. But this will not cause any race.
3310 		 * We do the callback and are relying on ehci_pipe_cleanup()
3311 		 * to halt the queue head and clean up since we should not
3312 		 * block in timeout context.
3313 		 */
3314 		ehci_handle_error(ehcip, tw->tw_qtd_head, USB_CR_TIMEOUT);
3315 
3316 		tw = next;
3317 	}
3318 	mutex_exit(&ehcip->ehci_int_mutex);
3319 }
3320 
3321 
3322 /*
3323  * ehci_remove_tw_from_timeout_list:
3324  *
3325  * Remove Control or bulk transfer from the timeout list.
3326  */
3327 static void
3328 ehci_remove_tw_from_timeout_list(
3329 	ehci_state_t		*ehcip,
3330 	ehci_trans_wrapper_t	*tw)
3331 {
3332 	ehci_pipe_private_t	*pp;
3333 	ehci_trans_wrapper_t	*prev, *next;
3334 
3335 	USB_DPRINTF_L3(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3336 	    "ehci_remove_tw_from_timeout_list: tw = 0x%p", tw);
3337 
3338 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3339 
3340 	/* Obtain the pipe private structure */
3341 	pp = tw->tw_pipe_private;
3342 
3343 	if (pp->pp_timeout_list) {
3344 		if (pp->pp_timeout_list == tw) {
3345 			pp->pp_timeout_list = tw->tw_timeout_next;
3346 
3347 			tw->tw_timeout_next = NULL;
3348 		} else {
3349 			prev = pp->pp_timeout_list;
3350 			next = prev->tw_timeout_next;
3351 
3352 			while (next && (next != tw)) {
3353 				prev = next;
3354 				next = next->tw_timeout_next;
3355 			}
3356 
3357 			if (next == tw) {
3358 				prev->tw_timeout_next =
3359 				    next->tw_timeout_next;
3360 				tw->tw_timeout_next = NULL;
3361 			}
3362 		}
3363 	}
3364 }
3365 
3366 
3367 /*
3368  * ehci_start_timer:
3369  *
3370  * Start the pipe's timer
3371  */
3372 static void
3373 ehci_start_timer(
3374 	ehci_state_t		*ehcip,
3375 	ehci_pipe_private_t	*pp)
3376 {
3377 	USB_DPRINTF_L4(PRINT_MASK_LISTS,  ehcip->ehci_log_hdl,
3378 	    "ehci_start_timer: ehcip = 0x%p, pp = 0x%p", ehcip, pp);
3379 
3380 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3381 
3382 	/*
3383 	 * Start the pipe's timer only if currently timer is not
3384 	 * running and if there are any transfers on the timeout
3385 	 * list. This timer will be per pipe.
3386 	 */
3387 	if ((!pp->pp_timer_id) && (pp->pp_timeout_list)) {
3388 		pp->pp_timer_id = timeout(ehci_xfer_timeout_handler,
3389 		    (void *)(pp->pp_pipe_handle), drv_usectohz(1000000));
3390 	}
3391 }
3392 
3393 /*
3394  * ehci_deallocate_tw:
3395  *
3396  * Deallocate of a Transaction Wrapper (TW) and this involves the freeing of
3397  * of DMA resources.
3398  */
3399 void
3400 ehci_deallocate_tw(
3401 	ehci_state_t		*ehcip,
3402 	ehci_pipe_private_t	*pp,
3403 	ehci_trans_wrapper_t	*tw)
3404 {
3405 	ehci_trans_wrapper_t	*prev, *next;
3406 
3407 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3408 	    "ehci_deallocate_tw: tw = 0x%p", tw);
3409 
3410 	/*
3411 	 * If the transfer wrapper has no Host Controller (HC)
3412 	 * Transfer Descriptors (QTD) associated with it,  then
3413 	 * remove the transfer wrapper.
3414 	 */
3415 	if (tw->tw_qtd_head) {
3416 		ASSERT(tw->tw_qtd_tail != NULL);
3417 
3418 		return;
3419 	}
3420 
3421 	ASSERT(tw->tw_qtd_tail == NULL);
3422 
3423 	/* Make sure we return all the unused qtd's to the pool as well */
3424 	ehci_free_tw_td_resources(ehcip, tw);
3425 
3426 	/*
3427 	 * If pp->pp_tw_head and pp->pp_tw_tail are pointing to
3428 	 * given TW then set the head and  tail  equal to NULL.
3429 	 * Otherwise search for this TW in the linked TW's list
3430 	 * and then remove this TW from the list.
3431 	 */
3432 	if (pp->pp_tw_head == tw) {
3433 		if (pp->pp_tw_tail == tw) {
3434 			pp->pp_tw_head = NULL;
3435 			pp->pp_tw_tail = NULL;
3436 		} else {
3437 			pp->pp_tw_head = tw->tw_next;
3438 		}
3439 	} else {
3440 		prev = pp->pp_tw_head;
3441 		next = prev->tw_next;
3442 
3443 		while (next && (next != tw)) {
3444 			prev = next;
3445 			next = next->tw_next;
3446 		}
3447 
3448 		if (next == tw) {
3449 			prev->tw_next = next->tw_next;
3450 
3451 			if (pp->pp_tw_tail == tw) {
3452 				pp->pp_tw_tail = prev;
3453 			}
3454 		}
3455 	}
3456 
3457 	/*
3458 	 * Make sure that, this TW has been removed
3459 	 * from the timeout list.
3460 	 */
3461 	ehci_remove_tw_from_timeout_list(ehcip, tw);
3462 
3463 	/* Deallocate this TW */
3464 	ehci_free_tw(ehcip, pp, tw);
3465 }
3466 
3467 
3468 /*
3469  * ehci_free_dma_resources:
3470  *
3471  * Free dma resources of a Transfer Wrapper (TW) and also free the TW.
3472  *
3473  * NOTE: This function is also called from POLLED MODE.
3474  */
3475 void
3476 ehci_free_dma_resources(
3477 	ehci_state_t		*ehcip,
3478 	usba_pipe_handle_data_t	*ph)
3479 {
3480 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3481 	ehci_trans_wrapper_t	*head_tw = pp->pp_tw_head;
3482 	ehci_trans_wrapper_t	*next_tw, *tw;
3483 
3484 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3485 	    "ehci_free_dma_resources: ph = 0x%p", (void *)ph);
3486 
3487 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3488 
3489 	/* Process the Transfer Wrappers */
3490 	next_tw = head_tw;
3491 	while (next_tw) {
3492 		tw = next_tw;
3493 		next_tw = tw->tw_next;
3494 
3495 		USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3496 		    "ehci_free_dma_resources: Free TW = 0x%p", (void *)tw);
3497 
3498 		ehci_free_tw(ehcip, pp, tw);
3499 	}
3500 
3501 	/* Adjust the head and tail pointers */
3502 	pp->pp_tw_head = NULL;
3503 	pp->pp_tw_tail = NULL;
3504 }
3505 
3506 
3507 /*
3508  * ehci_free_tw:
3509  *
3510  * Free the Transfer Wrapper (TW).
3511  */
3512 /*ARGSUSED*/
3513 static void
3514 ehci_free_tw(
3515 	ehci_state_t		*ehcip,
3516 	ehci_pipe_private_t	*pp,
3517 	ehci_trans_wrapper_t	*tw)
3518 {
3519 	int	rval;
3520 
3521 	USB_DPRINTF_L4(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
3522 	    "ehci_free_tw: tw = 0x%p", tw);
3523 
3524 	ASSERT(tw != NULL);
3525 	ASSERT(tw->tw_id != NULL);
3526 
3527 	/* Free 32bit ID */
3528 	EHCI_FREE_ID((uint32_t)tw->tw_id);
3529 
3530 	if (tw->tw_dmahandle != NULL) {
3531 		rval = ddi_dma_unbind_handle(tw->tw_dmahandle);
3532 		ASSERT(rval == DDI_SUCCESS);
3533 
3534 		ddi_dma_mem_free(&tw->tw_accesshandle);
3535 		ddi_dma_free_handle(&tw->tw_dmahandle);
3536 	}
3537 
3538 	/* Free transfer wrapper */
3539 	kmem_free(tw, sizeof (ehci_trans_wrapper_t));
3540 }
3541 
3542 
3543 /*
3544  * Miscellaneous functions
3545  */
3546 
3547 /*
3548  * ehci_allocate_intr_in_resource
3549  *
3550  * Allocate interrupt request structure for the interrupt IN transfer.
3551  */
3552 /*ARGSUSED*/
3553 int
3554 ehci_allocate_intr_in_resource(
3555 	ehci_state_t		*ehcip,
3556 	ehci_pipe_private_t	*pp,
3557 	ehci_trans_wrapper_t	*tw,
3558 	usb_flags_t		flags)
3559 {
3560 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
3561 	usb_intr_req_t		*curr_intr_reqp;
3562 	usb_opaque_t		client_periodic_in_reqp;
3563 	size_t			length = 0;
3564 
3565 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3566 	    "ehci_allocate_intr_in_resource:"
3567 	    "pp = 0x%p tw = 0x%p flags = 0x%x", pp, tw, flags);
3568 
3569 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3570 	ASSERT(tw->tw_curr_xfer_reqp == NULL);
3571 
3572 	/* Get the client periodic in request pointer */
3573 	client_periodic_in_reqp = pp->pp_client_periodic_in_reqp;
3574 
3575 	/*
3576 	 * If it a periodic IN request and periodic request is NULL,
3577 	 * allocate corresponding usb periodic IN request for the
3578 	 * current periodic polling request and copy the information
3579 	 * from the saved periodic request structure.
3580 	 */
3581 	if (client_periodic_in_reqp) {
3582 
3583 		/* Get the interrupt transfer length */
3584 		length = ((usb_intr_req_t *)
3585 		    client_periodic_in_reqp)->intr_len;
3586 
3587 		curr_intr_reqp = usba_hcdi_dup_intr_req(ph->p_dip,
3588 		    (usb_intr_req_t *)client_periodic_in_reqp, length, flags);
3589 	} else {
3590 		curr_intr_reqp = usb_alloc_intr_req(ph->p_dip, length, flags);
3591 	}
3592 
3593 	if (curr_intr_reqp == NULL) {
3594 
3595 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3596 		    "ehci_allocate_intr_in_resource: Interrupt"
3597 		    "request structure allocation failed");
3598 
3599 		return (USB_NO_RESOURCES);
3600 	}
3601 
3602 	/* For polled mode */
3603 	if (client_periodic_in_reqp == NULL) {
3604 		curr_intr_reqp->intr_attributes = USB_ATTRS_SHORT_XFER_OK;
3605 		curr_intr_reqp->intr_len = ph->p_ep.wMaxPacketSize;
3606 	} else {
3607 		/* Check and save the timeout value */
3608 		tw->tw_timeout = (curr_intr_reqp->intr_attributes &
3609 		    USB_ATTRS_ONE_XFER) ? curr_intr_reqp->intr_timeout: 0;
3610 	}
3611 
3612 	tw->tw_curr_xfer_reqp = (usb_opaque_t)curr_intr_reqp;
3613 	tw->tw_length = curr_intr_reqp->intr_len;
3614 
3615 	mutex_enter(&ph->p_mutex);
3616 	ph->p_req_count++;
3617 	mutex_exit(&ph->p_mutex);
3618 
3619 	pp->pp_state = EHCI_PIPE_STATE_ACTIVE;
3620 
3621 	return (USB_SUCCESS);
3622 }
3623 
3624 /*
3625  * ehci_pipe_cleanup
3626  *
3627  * Cleanup ehci pipe.
3628  */
3629 void
3630 ehci_pipe_cleanup(
3631 	ehci_state_t		*ehcip,
3632 	usba_pipe_handle_data_t	*ph)
3633 {
3634 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3635 	uint_t			pipe_state = pp->pp_state;
3636 	usb_cr_t		completion_reason;
3637 	usb_ep_descr_t		*eptd = &ph->p_ep;
3638 
3639 	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3640 	    "ehci_pipe_cleanup: ph = 0x%p", ph);
3641 
3642 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3643 
3644 	if (EHCI_ISOC_ENDPOINT(eptd)) {
3645 		ehci_isoc_pipe_cleanup(ehcip, ph);
3646 
3647 		return;
3648 	}
3649 
3650 	ASSERT(!servicing_interrupt());
3651 
3652 	/*
3653 	 * Set the QH's status to Halt condition.
3654 	 * If another thread is halting this function will automatically
3655 	 * wait. If a pipe close happens at this time
3656 	 * we will be in lots of trouble.
3657 	 * If we are in an interrupt thread, don't halt, because it may
3658 	 * do a wait_for_sof.
3659 	 */
3660 	ehci_modify_qh_status_bit(ehcip, pp, SET_HALT);
3661 
3662 	/*
3663 	 * Wait for processing all completed transfers and
3664 	 * to send results to upstream.
3665 	 */
3666 	ehci_wait_for_transfers_completion(ehcip, pp);
3667 
3668 	/* Save the data toggle information */
3669 	ehci_save_data_toggle(ehcip, ph);
3670 
3671 	/*
3672 	 * Traverse the list of QTDs for this pipe using transfer
3673 	 * wrapper. Process these QTDs depending on their status.
3674 	 * And stop the timer of this pipe.
3675 	 */
3676 	ehci_traverse_qtds(ehcip, ph);
3677 
3678 	/* Make sure the timer is not running */
3679 	ASSERT(pp->pp_timer_id == 0);
3680 
3681 	/* Do callbacks for all unfinished requests */
3682 	ehci_handle_outstanding_requests(ehcip, pp);
3683 
3684 	/* Free DMA resources */
3685 	ehci_free_dma_resources(ehcip, ph);
3686 
3687 	switch (pipe_state) {
3688 	case EHCI_PIPE_STATE_CLOSE:
3689 		completion_reason = USB_CR_PIPE_CLOSING;
3690 		break;
3691 	case EHCI_PIPE_STATE_RESET:
3692 	case EHCI_PIPE_STATE_STOP_POLLING:
3693 		/* Set completion reason */
3694 		completion_reason = (pipe_state ==
3695 		    EHCI_PIPE_STATE_RESET) ?
3696 		    USB_CR_PIPE_RESET: USB_CR_STOPPED_POLLING;
3697 
3698 		/* Restore the data toggle information */
3699 		ehci_restore_data_toggle(ehcip, ph);
3700 
3701 		/*
3702 		 * Clear the halt bit to restart all the
3703 		 * transactions on this pipe.
3704 		 */
3705 		ehci_modify_qh_status_bit(ehcip, pp, CLEAR_HALT);
3706 
3707 		/* Set pipe state to idle */
3708 		pp->pp_state = EHCI_PIPE_STATE_IDLE;
3709 
3710 		break;
3711 	}
3712 
3713 	/*
3714 	 * Do the callback for the original client
3715 	 * periodic IN request.
3716 	 */
3717 	if ((EHCI_PERIODIC_ENDPOINT(eptd)) &&
3718 	    ((ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK) ==
3719 	    USB_EP_DIR_IN)) {
3720 
3721 		ehci_do_client_periodic_in_req_callback(
3722 		    ehcip, pp, completion_reason);
3723 	}
3724 }
3725 
3726 
3727 /*
3728  * ehci_wait_for_transfers_completion:
3729  *
3730  * Wait for processing all completed transfers and to send results
3731  * to upstream.
3732  */
3733 static void
3734 ehci_wait_for_transfers_completion(
3735 	ehci_state_t		*ehcip,
3736 	ehci_pipe_private_t	*pp)
3737 {
3738 	ehci_trans_wrapper_t	*next_tw = pp->pp_tw_head;
3739 	clock_t			xfer_cmpl_time_wait;
3740 	ehci_qtd_t		*qtd;
3741 
3742 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3743 	    ehcip->ehci_log_hdl,
3744 	    "ehci_wait_for_transfers_completion: pp = 0x%p", pp);
3745 
3746 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3747 
3748 	if ((ehci_state_is_operational(ehcip)) != USB_SUCCESS) {
3749 
3750 		return;
3751 	}
3752 
3753 	pp->pp_count_done_qtds = 0;
3754 
3755 	/* Process the transfer wrappers for this pipe */
3756 	while (next_tw) {
3757 		qtd = (ehci_qtd_t *)next_tw->tw_qtd_head;
3758 
3759 		/*
3760 		 * Walk through each QTD for this transfer wrapper.
3761 		 * If a QTD still exists, then it is either on done
3762 		 * list or on the QH's list.
3763 		 */
3764 		while (qtd) {
3765 			if (!(Get_QTD(qtd->qtd_ctrl) &
3766 			    EHCI_QTD_CTRL_ACTIVE_XACT)) {
3767 				pp->pp_count_done_qtds++;
3768 			}
3769 
3770 			qtd = ehci_qtd_iommu_to_cpu(ehcip,
3771 			    Get_QTD(qtd->qtd_tw_next_qtd));
3772 		}
3773 
3774 		next_tw = next_tw->tw_next;
3775 	}
3776 
3777 	USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3778 	    "ehci_wait_for_transfers_completion: count_done_qtds = 0x%x",
3779 	    pp->pp_count_done_qtds);
3780 
3781 	if (!pp->pp_count_done_qtds) {
3782 
3783 		return;
3784 	}
3785 
3786 	/* Get the number of clock ticks to wait */
3787 	xfer_cmpl_time_wait = drv_usectohz(EHCI_XFER_CMPL_TIMEWAIT * 1000000);
3788 
3789 	(void) cv_timedwait(&pp->pp_xfer_cmpl_cv,
3790 	    &ehcip->ehci_int_mutex,
3791 	    ddi_get_lbolt() + xfer_cmpl_time_wait);
3792 
3793 	if (pp->pp_count_done_qtds) {
3794 
3795 		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3796 		    "ehci_wait_for_transfers_completion:"
3797 		    "No transfers completion confirmation received");
3798 	}
3799 }
3800 
3801 /*
3802  * ehci_check_for_transfers_completion:
3803  *
3804  * Check whether anybody is waiting for transfers completion event. If so, send
3805  * this event and also stop initiating any new transfers on this pipe.
3806  */
3807 void
3808 ehci_check_for_transfers_completion(
3809 	ehci_state_t		*ehcip,
3810 	ehci_pipe_private_t	*pp)
3811 {
3812 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3813 	    ehcip->ehci_log_hdl,
3814 	    "ehci_check_for_transfers_completion: pp = 0x%p", pp);
3815 
3816 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3817 
3818 	if ((pp->pp_state == EHCI_PIPE_STATE_STOP_POLLING) &&
3819 	    (pp->pp_error == USB_CR_NO_RESOURCES) &&
3820 	    (pp->pp_cur_periodic_req_cnt == 0)) {
3821 
3822 		/* Reset pipe error to zero */
3823 		pp->pp_error = 0;
3824 
3825 		/* Do callback for original request */
3826 		ehci_do_client_periodic_in_req_callback(
3827 		    ehcip, pp, USB_CR_NO_RESOURCES);
3828 	}
3829 
3830 	if (pp->pp_count_done_qtds) {
3831 
3832 		USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3833 		    "ehci_check_for_transfers_completion:"
3834 		    "count_done_qtds = 0x%x", pp->pp_count_done_qtds);
3835 
3836 		/* Decrement the done qtd count */
3837 		pp->pp_count_done_qtds--;
3838 
3839 		if (!pp->pp_count_done_qtds) {
3840 
3841 			USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3842 			    "ehci_check_for_transfers_completion:"
3843 			    "Sent transfers completion event pp = 0x%p", pp);
3844 
3845 			/* Send the transfer completion signal */
3846 			cv_signal(&pp->pp_xfer_cmpl_cv);
3847 		}
3848 	}
3849 }
3850 
3851 
3852 /*
3853  * ehci_save_data_toggle:
3854  *
3855  * Save the data toggle information.
3856  */
3857 static void
3858 ehci_save_data_toggle(
3859 	ehci_state_t		*ehcip,
3860 	usba_pipe_handle_data_t	*ph)
3861 {
3862 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3863 	usb_ep_descr_t		*eptd = &ph->p_ep;
3864 	uint_t			data_toggle;
3865 	usb_cr_t		error = pp->pp_error;
3866 	ehci_qh_t		*qh = pp->pp_qh;
3867 
3868 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3869 	    ehcip->ehci_log_hdl,
3870 	    "ehci_save_data_toggle: ph = 0x%p", ph);
3871 
3872 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3873 
3874 	/* Reset the pipe error value */
3875 	pp->pp_error = USB_CR_OK;
3876 
3877 	/* Return immediately if it is a control pipe */
3878 	if ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
3879 	    USB_EP_ATTR_CONTROL) {
3880 
3881 		return;
3882 	}
3883 
3884 	/* Get the data toggle information from the endpoint (QH) */
3885 	data_toggle = (Get_QH(qh->qh_status) &
3886 	    EHCI_QH_STS_DATA_TOGGLE)? DATA1:DATA0;
3887 
3888 	/*
3889 	 * If error is STALL, then, set
3890 	 * data toggle to zero.
3891 	 */
3892 	if (error == USB_CR_STALL) {
3893 		data_toggle = DATA0;
3894 	}
3895 
3896 	/*
3897 	 * Save the data toggle information
3898 	 * in the usb device structure.
3899 	 */
3900 	mutex_enter(&ph->p_mutex);
3901 	usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress,
3902 	    data_toggle);
3903 	mutex_exit(&ph->p_mutex);
3904 }
3905 
3906 
3907 /*
3908  * ehci_restore_data_toggle:
3909  *
3910  * Restore the data toggle information.
3911  */
3912 void
3913 ehci_restore_data_toggle(
3914 	ehci_state_t		*ehcip,
3915 	usba_pipe_handle_data_t	*ph)
3916 {
3917 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
3918 	usb_ep_descr_t		*eptd = &ph->p_ep;
3919 	uint_t			data_toggle = 0;
3920 
3921 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3922 	    ehcip->ehci_log_hdl,
3923 	    "ehci_restore_data_toggle: ph = 0x%p", ph);
3924 
3925 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3926 
3927 	/* Return immediately if it is a control pipe */
3928 	if ((eptd->bmAttributes & USB_EP_ATTR_MASK) ==
3929 	    USB_EP_ATTR_CONTROL) {
3930 
3931 		return;
3932 	}
3933 
3934 	mutex_enter(&ph->p_mutex);
3935 
3936 	data_toggle = usba_hcdi_get_data_toggle(ph->p_usba_device,
3937 	    ph->p_ep.bEndpointAddress);
3938 	usba_hcdi_set_data_toggle(ph->p_usba_device, ph->p_ep.bEndpointAddress,
3939 	    0);
3940 
3941 	mutex_exit(&ph->p_mutex);
3942 
3943 	/*
3944 	 * Restore the data toggle bit depending on the
3945 	 * previous data toggle information.
3946 	 */
3947 	if (data_toggle) {
3948 		Set_QH(pp->pp_qh->qh_status,
3949 		    Get_QH(pp->pp_qh->qh_status) | EHCI_QH_STS_DATA_TOGGLE);
3950 	} else {
3951 		Set_QH(pp->pp_qh->qh_status,
3952 		    Get_QH(pp->pp_qh->qh_status) & (~EHCI_QH_STS_DATA_TOGGLE));
3953 	}
3954 }
3955 
3956 
3957 /*
3958  * ehci_handle_outstanding_requests
3959  *
3960  * Deallocate interrupt request structure for the interrupt IN transfer.
3961  * Do the callbacks for all unfinished requests.
3962  *
3963  * NOTE: This function is also called from POLLED MODE.
3964  */
3965 void
3966 ehci_handle_outstanding_requests(
3967 	ehci_state_t		*ehcip,
3968 	ehci_pipe_private_t	*pp)
3969 {
3970 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
3971 	usb_ep_descr_t		*eptd = &ph->p_ep;
3972 	ehci_trans_wrapper_t	*curr_tw;
3973 	ehci_trans_wrapper_t	*next_tw;
3974 	usb_opaque_t		curr_xfer_reqp;
3975 
3976 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
3977 	    ehcip->ehci_log_hdl,
3978 	    "ehci_handle_outstanding_requests: pp = 0x%p", pp);
3979 
3980 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3981 
3982 	/* Deallocate all pre-allocated interrupt requests */
3983 	next_tw = pp->pp_tw_head;
3984 
3985 	while (next_tw) {
3986 		curr_tw = next_tw;
3987 		next_tw = curr_tw->tw_next;
3988 
3989 		curr_xfer_reqp = curr_tw->tw_curr_xfer_reqp;
3990 
3991 		/* Deallocate current interrupt request */
3992 		if (curr_xfer_reqp) {
3993 
3994 			if ((EHCI_PERIODIC_ENDPOINT(eptd)) &&
3995 			    (curr_tw->tw_direction == EHCI_QTD_CTRL_IN_PID)) {
3996 
3997 				/* Decrement periodic in request count */
3998 				pp->pp_cur_periodic_req_cnt--;
3999 
4000 				ehci_deallocate_intr_in_resource(
4001 				    ehcip, pp, curr_tw);
4002 			} else {
4003 				ehci_hcdi_callback(ph, curr_tw, USB_CR_FLUSHED);
4004 			}
4005 		}
4006 	}
4007 }
4008 
4009 
4010 /*
4011  * ehci_deallocate_intr_in_resource
4012  *
4013  * Deallocate interrupt request structure for the interrupt IN transfer.
4014  */
4015 void
4016 ehci_deallocate_intr_in_resource(
4017 	ehci_state_t		*ehcip,
4018 	ehci_pipe_private_t	*pp,
4019 	ehci_trans_wrapper_t	*tw)
4020 {
4021 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
4022 	uchar_t			ep_attr = ph->p_ep.bmAttributes;
4023 	usb_opaque_t		curr_xfer_reqp;
4024 
4025 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
4026 	    ehcip->ehci_log_hdl,
4027 	    "ehci_deallocate_intr_in_resource: "
4028 	    "pp = 0x%p tw = 0x%p", pp, tw);
4029 
4030 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
4031 	ASSERT((ep_attr & USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR);
4032 
4033 	curr_xfer_reqp = tw->tw_curr_xfer_reqp;
4034 
4035 	/* Check the current periodic in request pointer */
4036 	if (curr_xfer_reqp) {
4037 
4038 		tw->tw_curr_xfer_reqp = NULL;
4039 
4040 		mutex_enter(&ph->p_mutex);
4041 		ph->p_req_count--;
4042 		mutex_exit(&ph->p_mutex);
4043 
4044 		/* Free pre-allocated interrupt requests */
4045 		usb_free_intr_req((usb_intr_req_t *)curr_xfer_reqp);
4046 
4047 		/* Set periodic in pipe state to idle */
4048 		pp->pp_state = EHCI_PIPE_STATE_IDLE;
4049 	}
4050 }
4051 
4052 
4053 /*
4054  * ehci_do_client_periodic_in_req_callback
4055  *
4056  * Do callback for the original client periodic IN request.
4057  */
4058 void
4059 ehci_do_client_periodic_in_req_callback(
4060 	ehci_state_t		*ehcip,
4061 	ehci_pipe_private_t	*pp,
4062 	usb_cr_t		completion_reason)
4063 {
4064 	usba_pipe_handle_data_t	*ph = pp->pp_pipe_handle;
4065 	usb_ep_descr_t		*eptd = &ph->p_ep;
4066 
4067 	USB_DPRINTF_L4(PRINT_MASK_LISTS,
4068 	    ehcip->ehci_log_hdl,
4069 	    "ehci_do_client_periodic_in_req_callback: "
4070 	    "pp = 0x%p cc = 0x%x", pp, completion_reason);
4071 
4072 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
4073 
4074 	/*
4075 	 * Check for Interrupt/Isochronous IN, whether we need to do
4076 	 * callback for the original client's periodic IN request.
4077 	 */
4078 	if (pp->pp_client_periodic_in_reqp) {
4079 		ASSERT(pp->pp_cur_periodic_req_cnt == 0);
4080 		if (EHCI_ISOC_ENDPOINT(eptd)) {
4081 			ehci_hcdi_isoc_callback(ph, NULL, completion_reason);
4082 		} else {
4083 			ehci_hcdi_callback(ph, NULL, completion_reason);
4084 		}
4085 	}
4086 }
4087 
4088 
4089 /*
4090  * ehci_hcdi_callback()
4091  *
4092  * Convenience wrapper around usba_hcdi_cb() other than root hub.
4093  */
4094 void
4095 ehci_hcdi_callback(
4096 	usba_pipe_handle_data_t	*ph,
4097 	ehci_trans_wrapper_t	*tw,
4098 	usb_cr_t		completion_reason)
4099 {
4100 	ehci_state_t		*ehcip = ehci_obtain_state(
4101 	    ph->p_usba_device->usb_root_hub_dip);
4102 	ehci_pipe_private_t	*pp = (ehci_pipe_private_t *)ph->p_hcd_private;
4103 	usb_opaque_t		curr_xfer_reqp;
4104 	uint_t			pipe_state = 0;
4105 
4106 	USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
4107 	    "ehci_hcdi_callback: ph = 0x%p, tw = 0x%p, cr = 0x%x",
4108 	    ph, tw, completion_reason);
4109 
4110 	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
4111 
4112 	/* Set the pipe state as per completion reason */
4113 	switch (completion_reason) {
4114 	case USB_CR_OK:
4115 		pipe_state = pp->pp_state;
4116 		break;
4117 	case USB_CR_NO_RESOURCES:
4118 	case USB_CR_NOT_SUPPORTED:
4119 	case USB_CR_PIPE_RESET:
4120 	case USB_CR_STOPPED_POLLING:
4121 		pipe_state = EHCI_PIPE_STATE_IDLE;
4122 		break;
4123 	case USB_CR_PIPE_CLOSING:
4124 		break;
4125 	default:
4126 		/* Set the pipe state to error */
4127 		pipe_state = EHCI_PIPE_STATE_ERROR;
4128 		pp->pp_error = completion_reason;
4129 		break;
4130 
4131 	}
4132 
4133 	pp->pp_state = pipe_state;
4134 
4135 	if (tw && tw->tw_curr_xfer_reqp) {
4136 		curr_xfer_reqp = tw->tw_curr_xfer_reqp;
4137 		tw->tw_curr_xfer_reqp = NULL;
4138 	} else {
4139 		ASSERT(pp->pp_client_periodic_in_reqp != NULL);
4140 
4141 		curr_xfer_reqp = pp->pp_client_periodic_in_reqp;
4142 		pp->pp_client_periodic_in_reqp = NULL;
4143 	}
4144 
4145 	ASSERT(curr_xfer_reqp != NULL);
4146 
4147 	mutex_exit(&ehcip->ehci_int_mutex);
4148 
4149 	usba_hcdi_cb(ph, curr_xfer_reqp, completion_reason);
4150 
4151 	mutex_enter(&ehcip->ehci_int_mutex);
4152 }
4153